Compare commits

...

60 Commits

Author SHA1 Message Date
vlado
d274b7730d test 2025-06-10 15:44:10 +02:00
Gabriel Cruz
83de0c0abd feat(peeridauth): add peeridauth (#1445) 2025-06-10 10:25:34 -03:00
AkshayaMani
c501adc9ab feat(gossipsub): Add support for custom connection handling (Mix protocol integration) (#1420)
Co-authored-by: Ben-PH <benphawke@gmail.com>
2025-06-09 13:36:06 -04:00
Radosław Kamiński
f9fc24cc08 test(gossipsub): flaky tests (#1451) 2025-06-09 17:20:49 +01:00
richΛrd
cd26244ccc chore(quic): add libp2p_network_bytes metric (#1439)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-09 09:42:52 -03:00
vladopajic
cabab6aafe chore(gossipsub): add consts (#1447)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-06 14:33:38 +00:00
Radosław Kamiński
fb42a9b4aa test(gossipsub): parameters (#1442)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-06 14:09:55 +00:00
Radosław Kamiński
141f4d9116 fix(GossipSub): save sent iHave in first element (#1405) 2025-06-06 10:27:59 +00:00
Gabriel Cruz
cb31152b53 feat(autotls): add acme client (#1436) 2025-06-05 17:47:02 +00:00
Radosław Kamiński
3a7745f920 test(gossipsub): message cache (#1431) 2025-06-03 15:18:29 +01:00
Radosław Kamiński
a89916fb1a test: checkUntilTimeout refactor (#1437) 2025-06-03 13:31:34 +01:00
vladopajic
c6cf46c904 fix(ci-daily): delete cache action will continue on error (#1435) 2025-06-02 17:08:31 +02:00
Gabriel Cruz
b28a71ab13 chore(readme): improve README's development section (#1427) 2025-05-29 17:51:29 +00:00
vladopajic
95b9859bcd chore(interop): move interop code to separate folder (#1413) 2025-05-29 16:14:12 +00:00
vladopajic
9e599753af ci(daily): add pinned dependencies variant (#1418) 2025-05-29 15:27:06 +00:00
richΛrd
2e924906bb chore: bump quic (#1428) 2025-05-29 14:25:02 +00:00
Radosław Kamiński
e811c1ad32 fix(gossipsub): save iDontWants messages in the first element of history (#1393) 2025-05-29 13:33:51 +01:00
Radosław Kamiński
86695b55bb test(gossipsub): include missing test files and handle flaky tests (#1416)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-05-29 12:44:21 +01:00
vladopajic
8c3a4d882a ci(dependencies): fix access to tokens (#1421) 2025-05-29 00:27:36 +00:00
richΛrd
4bad343ddc fix: limit chronicles version to < 0.11.0 (#1423) 2025-05-28 21:00:41 -03:00
vladopajic
47b8a05c32 ci(daily): improvements (#1404) 2025-05-27 14:41:53 +00:00
Radosław Kamiński
4e6f4af601 test(gossipsub): heartbeat tests (#1391) 2025-05-27 10:28:12 +01:00
Miran
7275f6f9c3 chore: unused imports are now errors (#1399) 2025-05-26 21:36:08 +02:00
richΛrd
c3dae6a7d4 fix(quic): reset and mm for interop tests (#1397) 2025-05-26 12:16:17 -04:00
vladopajic
bb404eda4a fix(ci-daily): remove --solver flag (#1400) 2025-05-26 16:48:51 +02:00
richΛrd
584710bd80 chore: move -d:libp2p_quic_support flag to .nimble (#1392) 2025-05-26 08:57:26 -04:00
Radosław Kamiński
ad5eae9adf test(gossipsub): move and refactor control messages tests (#1380) 2025-05-22 15:10:37 +00:00
richΛrd
26fae7cd2d chore: bump quic (#1387) 2025-05-21 22:30:35 +00:00
Miran
87d6655368 chore: update more dependencies (#1374) 2025-05-21 21:46:09 +00:00
richΛrd
cd60b254a0 chore(version): update libp2p.nimble to 1.10.1 (#1390) 2025-05-21 07:40:11 -04:00
richΛrd
b88cdcdd4b chore: make quic optional (#1389) 2025-05-20 21:04:30 -04:00
vladopajic
4a5e06cb45 revert: disable transport interop with zig-v0.0.1 (#1372) (#1383) 2025-05-20 14:20:42 +02:00
vladopajic
fff3a7ad1f chore(hp): add timeout on dial (#1378) 2025-05-20 11:10:01 +00:00
Miran
05c894d487 fix(ci): test Nim 2.2 (#1385) 2025-05-19 15:51:56 -03:00
vladopajic
8850e9ccd9 ci(test): reduce timeout (#1376) 2025-05-19 15:34:16 +00:00
Ivan FB
2746531851 chore(dialer): capture possible exception (#1381) 2025-05-19 10:57:04 -04:00
vladopajic
2856db5490 ci(interop): disable transport interop with zig-v0.0.1 (#1372) 2025-05-15 20:04:41 +00:00
AYAHASSAN287
b29e78ccae test(gossipsub): block5 protobuf test cases (#1204)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
2025-05-15 16:32:03 +01:00
Gabriel Cruz
c9761c3588 chore: improve README.md text (#1373) 2025-05-15 12:35:01 +00:00
richΛrd
e4ef21e07c chore: bump quic (#1371)
Co-authored-by: Gabriel Cruz <8129788+gmelodie@users.noreply.github.com>
2025-05-14 21:06:38 +00:00
Miran
61429aa0d6 chore: fix import warnings (#1370) 2025-05-14 19:08:46 +00:00
Radosław Kamiński
c1ef011556 test(gossipsub): refactor testgossipinternal (#1366) 2025-05-14 17:15:31 +01:00
vladopajic
cd1424c09f chore(interop): use the same redis dependency (#1364) 2025-05-14 15:49:51 +00:00
Miran
878d627f93 chore: update dependencies (#1368) 2025-05-14 10:51:08 -03:00
richΛrd
1d6385ddc5 chore: bump quic (#1361)
Co-authored-by: Gabriel Cruz <8129788+gmelodie@users.noreply.github.com>
2025-05-14 11:40:13 +00:00
Gabriel Cruz
873f730b4e chore: change nim-stew dep tagging (#1362) 2025-05-13 21:46:07 -04:00
Radosław Kamiński
1c1547b137 test(gossipsub): Topic Membership Tests - updated (#1363) 2025-05-13 16:22:49 +01:00
Álex
9997f3e3d3 test(gossipsub): control message (#1191)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
2025-05-13 10:54:07 -04:00
richΛrd
4d0b4ecc22 feat: interop (#1303) 2025-05-06 19:27:33 -04:00
Gabriel Cruz
ccb24b5f1f feat(cert): add certificate signing request (CSR) generation (#1355) 2025-05-06 18:56:51 +00:00
Marko Burčul
5cb493439d fix(ci): secrets token typo (#1357) 2025-05-05 09:49:42 -03:00
Ivan FB
24b284240a chore: add gcsafe pragma to removeValidator (#1356) 2025-05-02 18:39:00 +02:00
richΛrd
b0f77d24f9 chore(version): update libp2p.nimble to 1.10.0 (#1351) 2025-05-01 05:39:58 -04:00
richΛrd
e32ac492d3 chore: set @vacp2p/p2p team as codeowners of repo (#1352) 2025-05-01 05:03:54 -03:00
Gabriel Cruz
470a7f8cc5 chore: add libp2p CID codec (#1348) 2025-04-27 09:45:40 +00:00
Radosław Kamiński
b269fce289 test(gossipsub): reorganize tests by feature category (#1350) 2025-04-25 16:48:50 +01:00
vladopajic
bc4febe92c fix: git ignore for tests (#1349) 2025-04-24 15:36:46 +02:00
Radosław Kamiński
b5f9bfe0f4 test(gossipsub): optimise heartbeat interval and sleepAsync (#1342) 2025-04-23 18:10:16 +01:00
Gabriel Cruz
4ce1e8119b chore(readme): add gabe as a maintainer (#1346) 2025-04-23 15:57:32 +02:00
Miran
65136b38e2 chore: fix warnings (#1341)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-04-22 19:45:53 +00:00
122 changed files with 6264 additions and 3500 deletions

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @vacp2p/p2p

View File

@@ -14,7 +14,7 @@ concurrency:
jobs:
test:
timeout-minutes: 90
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
@@ -36,6 +36,8 @@ jobs:
memory_management: refc
- ref: version-2-0
memory_management: refc
- ref: version-2-2
memory_management: refc
include:
- platform:
os: linux
@@ -116,5 +118,5 @@ jobs:
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test

View File

@@ -6,9 +6,26 @@ on:
workflow_dispatch:
jobs:
test_amd64:
name: Daily amd64
test_amd64_latest:
name: Daily amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"

View File

@@ -4,6 +4,11 @@ name: Daily Common
on:
workflow_call:
inputs:
pinned_deps:
description: 'Should dependencies be installed from pinned file or use latest versions'
required: false
type: boolean
default: false
nim:
description: 'Nim Configuration'
required: true
@@ -17,26 +22,18 @@ on:
required: false
type: string
default: "[]"
use_sat_solver:
description: 'Install dependencies with SAT Solver'
required: false
type: boolean
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
delete_cache:
name: Delete github action's branch cache
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: snnaplab/delete-branch-cache-action@v1
test:
needs: delete_cache
timeout-minutes: 90
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
@@ -81,8 +78,14 @@ jobs:
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |
nimble install_pinned
- name: Install dependencies (latest)
if: ${{ inputs.pinned_deps != 'true' }}
run: |
nimble install -y --depsOnly
@@ -91,11 +94,6 @@ jobs:
nim --version
nimble --version
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
dependency_solver="sat"
else
dependency_solver="legacy"
fi
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
nimble testintegraion

View File

@@ -1,14 +0,0 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_nim_devel:
name: Daily Nim Devel
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['amd64']"

View File

@@ -10,6 +10,14 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"

View File

@@ -1,15 +0,0 @@
name: Daily SAT
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_amd64:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,13 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_NIMBUS_ETH2 }}
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NWAKU }}
secret: ACTIONS_GITHUB_TOKEN_NWAKU
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NIM_CODEX }}
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -32,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ matrix.target.token }}
token: ${{ secrets[matrix.target.secret] }}
- name: Checkout this ref in target repository
run: |

View File

@@ -27,7 +27,7 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
@@ -35,7 +35,7 @@ jobs:
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
extra-versions: ${{ github.workspace }}/interop/transport/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
@@ -48,12 +48,12 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}

View File

@@ -22,6 +22,6 @@ jobs:
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests tools *.nim*"
options: "examples libp2p tests interop tools *.nim*"
fail: true
suggest: true

13
.gitignore vendored
View File

@@ -18,9 +18,10 @@ nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests),
# by ignoring anything that does not have following file name scheme:
# has extension or is Dockerfile...
/tests/*
!/tests/*.*
!/tests/Dockerfile
# Ignore all test build files in tests folder (auto generated when running tests).
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
# Second and third rules are here to un-ignores all files with extension and Docker file,
# because it appears that vs code is skipping text search is some tests files without these rules.
tests/**/test*[^.]*
!tests/**/*.*
!tests/**/Dockerfile

35
.pinned
View File

@@ -1,19 +1,22 @@
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
quic;https://github.com/status-im/nim-quic.git@#d54e8f0f2e454604b767fadeae243d95c30c383f
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97

172
README.md
View File

@@ -16,43 +16,126 @@
<img src="https://img.shields.io/badge/nim-%3E%3D1.2.0-orange.svg?style=flat-square" />
</p>
tests
# Table of Contents
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Go-libp2p-daemon](#go-libp2p-daemon)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [License](#license)
## Background
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim version is 1.6.18.
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
```
nimble install libp2p
```
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/).
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)
## Development
Clone the repository and install the dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
nimble install -dy
```
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
Run unit tests:
```sh
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
nimble tasks
```
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## Modules
List of packages modules implemented in nim-libp2p:
@@ -80,10 +163,10 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
| **Data Types** | |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
| **Utilities** | |
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
@@ -111,65 +194,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
Run unit tests:
```sh
# run all the unit tests
nimble test
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## License
Licensed and distributed under either of

View File

@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")

View File

@@ -3,9 +3,7 @@
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`.
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.16.0`, as previously stated.
```sh
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our assistance.
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage
## Example
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
## Getting Started
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)

View File

@@ -7,11 +7,11 @@ COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev

View File

@@ -0,0 +1,138 @@
import std/[os, options, strformat, sequtils]
import redis
import chronos, chronicles
import
../../libp2p/[
builders,
switch,
multicodec,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../../tests/[stubs/autonatclientstub, errorhelpers]
logScope:
topics = "hp interop node"
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder
.new()
.withRng(rng)
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng = rng))
return s
proc main() {.async.} =
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
debug "All relay addresses", relayAddr
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
try:
debug "Dialing relay...", relayMA
let relayId = await switch.connect(relayMA).wait(30.seconds)
debug "Connected to relay", relayId
except AsyncTimeoutError:
raise newException(CatchableError, "Connection to relay timed out")
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
)
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(4.minutes))
except AsyncTimeoutError:
error "Program execution timed out."
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -13,6 +13,6 @@ COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./tests/transport-interop/main.nim
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/tests/transport-interop/main"]
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -42,6 +42,10 @@ proc main() {.async.} =
discard switchBuilder.withTcpTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
)
of "quic-v1":
discard switchBuilder.withQuicTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
)
of "ws":
discard switchBuilder
.withTransport(
@@ -55,16 +59,12 @@ proc main() {.async.} =
case secureChannel
of "noise":
discard switchBuilder.withNoise()
else:
doAssert false
case muxer
of "yamux":
discard switchBuilder.withYamux()
of "mplex":
discard switchBuilder.withMplex()
else:
doAssert false
let
rng = newRng()
@@ -99,7 +99,18 @@ proc main() {.async.} =
pingRTTMilllis: float(pingDelay.milliseconds),
)
)
quit(0)
discard waitFor(main().withTimeout(testTimeout))
quit(1)
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(testTimeout))
except AsyncTimeoutError:
error "Program execution timed out."
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -3,7 +3,8 @@
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws"
"ws",
"quic-v1"
],
"secureChannels": [
"noise"

View File

@@ -52,7 +52,6 @@ else:
stream/connection,
transports/transport,
transports/tcptransport,
transports/quictransport,
protocols/secure/noise,
cid,
multihash,
@@ -71,3 +70,7 @@ else:
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
multicodec, builders, pubsub
when defined(libp2p_quic_support):
import libp2p/transports/quictransport
export quictransport

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.9.0"
version = "1.10.1"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
"websock", "unittest2",
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -30,7 +30,7 @@ proc runTest(filename: string, moreoptions: string = "") =
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r " & " tests/" & filename
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
@@ -56,13 +56,15 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testgossipinternal")
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task testintegration, "Runs integraion tests":
runTest("testintegration")
task test, "Runs the test suite":
runTest("testall")
exec "nimble testfilter"

441
libp2p/autotls/acme/api.nim Normal file
View File

@@ -0,0 +1,441 @@
import options, base64, sequtils, strutils, json
from times import DateTime, parse
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
import ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export ACMEError
const
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
Alg = "RS256"
DefaultChalCompletedRetries = 10
DefaultChalCompletedRetryTime = 1.seconds
DefaultFinalizeRetries = 10
DefaultFinalizeRetryTime = 1.seconds
DefaultRandStringSize = 256
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
type Nonce = string
type Kid = string
type ACMEDirectory = object
newNonce: string
newOrder: string
newAccount: string
type ACMEApi* = object
directory: ACMEDirectory
session: HttpSessionRef
acmeServerURL: string
type JWK = object
kty: string
n: string
e: string
# whether the request uses Kid or not
type ACMERequestType = enum
ACMEJwkRequest
ACMEKidRequest
type ACMERequestHeader = object
alg: string
typ: string
nonce: string
url: string
case kind: ACMERequestType
of ACMEJwkRequest:
jwk: JWK
of ACMEKidRequest:
kid: Kid
type ACMERegisterRequest* = object
termsOfServiceAgreed: bool
contact: seq[string]
type ACMEAccountStatus = enum
valid
deactivated
revoked
type ACMERegisterResponseBody = object
status*: ACMEAccountStatus
type ACMERegisterResponse* = object
kid*: Kid
status*: ACMEAccountStatus
type ACMEChallengeStatus* {.pure.} = enum
pending = "pending"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMEChallenge = object
url*: string
`type`*: string
status*: ACMEChallengeStatus
token*: string
type ACMEChallengeIdentifier = object
`type`: string
value: string
type ACMEChallengeRequest = object
identifiers: seq[ACMEChallengeIdentifier]
type ACMEChallengeResponseBody = object
status: ACMEChallengeStatus
authorizations: seq[string]
finalize: string
type ACMEChallengeResponse* = object
status: ACMEChallengeStatus
authorizations: seq[string]
finalize: string
orderURL: string
type ACMEChallengeResponseWrapper* = object
finalizeURL*: string
orderURL*: string
dns01*: ACMEChallenge
type ACMEAuthorizationsResponse* = object
challenges: seq[ACMEChallenge]
type ACMECompletedResponse* = object
checkURL: string
type ACMEOrderStatus* {.pure.} = enum
pending = "pending"
ready = "ready"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMECheckKind = enum
ACMEOrderCheck
ACMEChallengeCheck
type ACMECheckResponse* = object
case kind: ACMECheckKind
of ACMEOrderCheck:
orderStatus: ACMEOrderStatus
of ACMEChallengeCheck:
chalStatus: ACMEChallengeStatus
retryAfter: Duration
type ACMEFinalizedResponse* = object
type ACMEOrderResponse* = object
certificate: string
expires: string
type ACMECertificateResponse* = object
rawCertificate: string
certificateExpiry: DateTime
template handleError(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
proc new*(
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
let session = HttpSessionRef.new()
let directory = handleError("new API"):
let rawResponse =
await HttpClientRequestRef.get(session, acmeServerURL & "/directory").get().send()
let body = await rawResponse.getResponseBody()
body.to(ACMEDirectory)
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
proc requestNonce(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let resp =
await HttpClientRequestRef.get(self.session, self.directory.newNonce).get().send()
return resp.headers.getString("Replay-Nonce")
except HttpError as exc:
raise newException(ACMEError, "Failed to request new nonce from ACME server", exc)
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, url: string, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone:
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: url,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: url,
kid: kid.get(),
)
proc post(
self: ACMEApi, url: string, payload: string
): Future[HttpClientResponseRef] {.
async: (raises: [ACMEError, HttpError, CancelledError])
.} =
await HttpClientRequestRef
.post(self.session, url, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
proc createSignedAcmeRequest(
self: ACMEApi,
url: string,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(url, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
self.directory.newAccount, registerRequest, key, needsJwk = true
)
let rawResponse = await self.post(self.directory.newAccount, payload)
let body = await rawResponse.getResponseBody()
let headers = rawResponse.headers
let acmeResponseBody = body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status, kid: headers.getString("location")
)
proc requestNewOrder(
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
)
let rawResponse = await self.post(self.directory.newOrder, payload)
let body = await rawResponse.getResponseBody()
let headers = rawResponse.headers
let challengeResponseBody = body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len() == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
orderURL: headers.getString("location"),
)
proc requestAuthorizations(
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let rawResponse =
await HttpClientRequestRef.get(self.session, authorizations[0]).get().send()
let body = await rawResponse.getResponseBody()
body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
let challengeResponse = await self.requestNewOrder(domains, key, kid)
let authorizationsResponse =
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
return ACMEChallengeResponseWrapper(
finalizeURL: challengeResponse.finalize,
orderURL: challengeResponse.orderURL,
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
)
proc requestCheck(
self: ACMEApi, checkURL: string, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let rawResponse =
await HttpClientRequestRef.get(self.session, checkURL).get().send()
let body = await rawResponse.getResponseBody()
let headers = rawResponse.headers
let retryAfter =
try:
parseInt(rawResponse.headers.getString("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(ACMEError, "Invalid order status: " & body["status"].getStr)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus: parseEnum[ACMEChallengeStatus](body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(ACMEError, "Invalid order status: " & body["status"].getStr)
proc requestCompleted(
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("challengeCompleted (send notify)"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let rawResponse = await self.post(chalURL, payload)
let body = await rawResponse.getResponseBody()
body.to(ACMECompletedResponse)
proc challengeCompleted*(
self: ACMEApi,
chalURL: string,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[void] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.requestCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
for i in 0 .. retries:
let checkResponse =
await self.requestCheck(completedResponse.checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.pending:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.valid:
return
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
proc requestFinalize*(
self: ACMEApi, domain: string, finalizeURL: string, key: KeyPair, kid: Kid
): Future[ACMEFinalizedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let derCSR = createCSR(domain)
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
)
let rawResponse = await self.post(finalizeURL, payload)
let body = await rawResponse.getResponseBody()
body.to(ACMEFinalizedResponse)
proc finalizeCertificate*(
self: ACMEApi,
domain: string,
finalizeURL: string,
orderURL: string,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
# call finalize and keep checking order until cert is valid (done)
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
handleError("finalizeCertificate (check finalized)"):
var checkResponse: ACMECheckResponse
for i in 0 .. retries:
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.valid:
return true
of ACMEOrderStatus.processing:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
return false
return false
proc requestGetOrder*(
self: ACMEApi, orderURL: string
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let rawResponse =
await HttpClientRequestRef.get(self.session, orderURL).get().send()
let body = await rawResponse.getResponseBody()
body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, orderURL: string
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(orderURL)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,43 @@
import base64, strutils, chronos/apps/http/httpclient, json
import ../../errors
import ../../transports/tls/certificate_ffi
type ACMEError* = object of LPError
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let responseBody = bytesToString(await response.getBodyBytes()).parseJson()
return responseBody
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
except Exception as exc: # this is required for nim 1.6
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
let personalizationStr = "libp2p_autotls"
if cert_init_drbg(
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to initialize certCtx")
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to generate cert key")
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport, quictransport, memorytransport],
transports/[transport, tcptransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -169,11 +169,14 @@ proc withTcpTransport*(
TcpTransport.new(flags, upgr)
)
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
when defined(libp2p_quic_support):
import transports/quictransport
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
@@ -262,6 +265,10 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let pkRes = PrivateKey.random(b.rng[])
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
if b.secureManagers.len == 0:
debug "no secure managers defined. Adding noise by default"
b.secureManagers.add(SecureProtocol.Noise)
var secureManagerInstances: seq[Secure]
if SecureProtocol.Noise in b.secureManagers:
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)

View File

@@ -10,10 +10,11 @@
## This module implementes CID (Content IDentifier).
{.push raises: [].}
{.used.}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
import stew/[base58, results]
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
export results
@@ -41,6 +42,7 @@ const ContentIdsList = [
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("libp2p-key"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),

View File

@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import stew/results
import results
export results, utility
# This is workaround for Nim's `import` bug

View File

@@ -18,7 +18,7 @@
{.push raises: [].}
import bearssl/[ec, rand]
import stew/results
import results
from stew/assign2 import assign
export results

View File

@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/[results, ctops]
import stew/ctops
import results
import ../utility

View File

@@ -18,7 +18,8 @@ import constants
import nimcrypto/[hash, sha2]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import stew/[results, ctops]
import results
import stew/ctops
import ../../utility

View File

@@ -11,7 +11,8 @@
{.push raises: [].}
import stew/[endians2, results, ctops]
import stew/[endians2, ctops]
import results
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
dest[0] = cast[byte](value)
1
else:
var s = 0
var res = 0
while v != 0:
v = v shr 7
s += 7
inc(res)
if len(dest) >= res:
var k = 0
while s != 0:
s -= 7
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
inc(k)
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return ok(field)
else:
return err(Asn1Error.NoSupport)
inclass = false
ttag = 0
else:
return err(Asn1Error.NoSupport)

View File

@@ -17,7 +17,8 @@
import bearssl/[rsa, rand, hash]
import minasn1
import stew/[results, ctops]
import results
import stew/ctops
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import bearssl/rand
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
export sha2, results, rand

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import chronos
import stew/results
import results
import peerid, stream/connection, transports/transport
export results

View File

@@ -9,8 +9,7 @@
import std/tables
import stew/results
import pkg/[chronos, chronicles, metrics]
import pkg/[chronos, chronicles, metrics, results]
import
dial,
@@ -125,9 +124,13 @@ proc expandDnsAddr(
for resolvedAddress in resolved:
let lastPart = resolvedAddress[^1].tryGet()
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
let
var peerIdBytes: seq[byte]
try:
peerIdBytes = lastPart.protoArgument().tryGet()
addrPeerId = PeerId.init(peerIdBytes).tryGet()
except ResultError[string]:
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
else:
result.add((resolvedAddress, peerId))

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
import chronos, chronicles, results
import ../errors
type

View File

@@ -16,7 +16,8 @@
{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
import results
import stew/[base32, base58, base64]
type
MultiBaseStatus* {.pure.} = enum

View File

@@ -10,10 +10,11 @@
## This module implements MultiCodec.
{.push raises: [].}
{.used.}
import tables, hashes
import vbuffer
import stew/results
import results
export results
## List of officially supported codecs can BE found here
@@ -404,6 +405,7 @@ const MultiCodecList = [
# IPLD formats
("dag-pb", 0x70),
("dag-cbor", 0x71),
("libp2p-key", 0x72),
("dag-json", 0x129),
("git-raw", 0x78),
("eth-block", 0x90),

View File

@@ -22,12 +22,13 @@
## 2. MURMUR
{.push raises: [].}
{.used.}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
import varint, vbuffer, multicodec, multibase
import stew/base58
import stew/results
import results
export results
# This is workaround for Nim `import` bug.
export sha, sha2, keccak, blake2, hash, utils

View File

@@ -11,10 +11,12 @@
{.push raises: [].}
{.push public.}
{.used.}
import
std/[hashes, strutils],
stew/[base58, results],
stew/base58,
results,
chronicles,
nimcrypto/utils,
utility,

312
libp2p/peeridauth.nim Normal file
View File

@@ -0,0 +1,312 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import base64, json, strutils, uri, times
import chronos/apps/http/httpclient, results, chronicles, bio
import ./peerinfo, ./crypto/crypto, ./varint.nim
logScope:
topics = "libp2p peeridauth"
const
NimLibp2pUserAgent = "nim-libp2p"
PeerIDAuthPrefix = "libp2p-PeerID"
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
ChallengeDefaultLen = 48
type PeerIDAuthError* = object of LPError
type BearerToken* = object
token*: string
expires*: Opt[DateTime]
type PeerIDAuthOpaque = string
type PeerIDAuthSignature = string
type PeerIDAuthChallenge = string
type PeerIDAuthApi* = object
session: HttpSessionRef
rng: ref HmacDrbgContext
type PeerIDAuthAuthenticationResponse* = object
challengeClient: PeerIDAuthChallenge
opaque: PeerIDAuthOpaque
serverPubkey: PublicKey
type PeerIDAuthAuthorizationResponse* = object
sig: PeerIDAuthSignature
bearer: BearerToken
rawResponse: HttpClientResponseRef
type SigParam = object
k: string
v: seq[byte]
proc new*(T: typedesc[PeerIDAuthApi], rng: ref HmacDrbgContext): PeerIDAuthApi =
PeerIDAuthApi(session: HttpSessionRef.new(), rng: rng)
proc sampleChar(
ctx: var HmacDrbgContext, choices: string
): char {.raises: [ValueError].} =
## Samples a random character from the input string using the DRBG context
if choices.len == 0:
raise newException(ValueError, "Cannot sample from an empty string")
var idx: uint32
ctx.generate(idx)
return choices[uint32(idx mod uint32(choices.len))]
proc randomChallenge(
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
var rng = rng[]
var challenge = ""
try:
for _ in 0 ..< challengeLen:
challenge.add(rng.sampleChar(ChallengeCharset))
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
PeerIDAuthChallenge(challenge)
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
# Helper to extract quoted value from key
for segment in data.split(","):
if key in segment:
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
proc genDataToSign(
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
): seq[byte] {.raises: [PeerIDAuthError].} =
var buf: seq[byte] = prefix.toByteSeq()
for p in parts:
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
raise newException(PeerIDAuthError, "could not encode fields length to varint")
buf.add varintLen
buf.add (p.k & "=").toByteSeq()
buf.add p.v
return buf
proc getSigParams(
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
): seq[SigParam] =
if clientSender:
@[
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
]
else:
@[
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
]
proc sign(
privateKey: PrivateKey,
challenge: PeerIDAuthChallenge,
publicKey: PublicKey,
hostname: string,
clientSender: bool = true,
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
PeerIDAuthSignature(
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
)
proc checkSignature(
serverSig: PeerIDAuthSignature,
serverPublicKey: PublicKey,
challengeServer: PeerIDAuthChallenge,
clientPublicKey: PublicKey,
hostname: string,
): bool {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
var serverSignature: Signature
try:
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
raise newException(
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
)
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
serverSignature.verify(
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
)
proc post(
self: PeerIDAuthApi, uri: string, payload: string, authHeader: string
): Future[HttpClientResponseRef] {.async: (raises: [HttpError, CancelledError]).} =
await HttpClientRequestRef
.post(
self.session,
uri,
body = payload,
headers = [
("Content-Type", "application/json"),
("User-Agent", NimLibp2pUserAgent),
("Authorization", authHeader),
],
)
.get()
.send()
proc requestAuthentication(
self: PeerIDAuthApi, uri: Uri
): Future[PeerIDAuthAuthenticationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let rawResponse =
try:
await HttpClientRequestRef.get(self.session, $uri).get().send()
except HttpError as exc:
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
let wwwAuthenticate = rawResponse.headers.getString("WWW-Authenticate")
if wwwAuthenticate == "":
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
let serverPubkey: PublicKey =
try:
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
PeerIDAuthAuthenticationResponse(
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
opaque: extractField(wwwAuthenticate, "opaque"),
serverPubkey: serverPubkey,
)
proc pubkeyBytes(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
try:
pubkey.getBytes().valueOr:
raise
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
except ValueError as exc:
raise newException(
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
)
proc parse3339DateTime(
timeStr: string
): DateTime {.raises: [ValueError, TimeParseError].} =
let parts = timeStr.split('.')
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
let millis = parseInt(parts[1].strip(chars = {'Z'}))
result = base + initDuration(milliseconds = millis)
proc requestAuthorization(
self: PeerIDAuthApi,
peerInfo: PeerInfo,
uri: Uri,
challengeClient: PeerIDAuthChallenge,
challengeServer: PeerIDAuthChallenge,
serverPubkey: PublicKey,
opaque: PeerIDAuthOpaque,
payload: auto,
): Future[PeerIDAuthAuthorizationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
let authHeader =
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
let rawResponse =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
)
let authenticationInfo = rawResponse.headers.getString("authentication-info")
let bearerExpires =
try:
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
except ValueError, PeerIDAuthError, TimeParseError:
Opt.none(DateTime)
PeerIDAuthAuthorizationResponse(
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
bearer: BearerToken(
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
),
rawResponse: rawResponse,
)
proc sendWithoutBearer(
self: PeerIDAuthApi, uri: Uri, peerInfo: PeerInfo, payload: auto
): Future[(BearerToken, HttpClientResponseRef)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
# Authenticate in three ways as per the PeerID Auth spec
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
let authenticationResponse = await self.requestAuthentication(uri)
let challengeServer = self.rng.randomChallenge()
let authorizationResponse = await self.requestAuthorization(
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
)
if not checkSignature(
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
peerInfo.publicKey, uri.hostname,
):
raise newException(PeerIDAuthError, "Failed to validate server's signature")
return (authorizationResponse.bearer, authorizationResponse.rawResponse)
proc sendWithBearer(
self: PeerIDAuthApi,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken,
): Future[(BearerToken, HttpClientResponseRef)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
raise newException(PeerIDAuthError, "Bearer expired")
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
let rawResponse =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
)
return (bearer, rawResponse)
proc send*(
self: PeerIDAuthApi,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken = BearerToken(),
): Future[(BearerToken, HttpClientResponseRef)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.token == "":
await self.sendWithoutBearer(uri, peerInfo, payload)
else:
await self.sendWithBearer(uri, peerInfo, payload, bearer)
proc close*(self: PeerIDAuthApi): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -11,7 +11,7 @@
{.push public.}
import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import pkg/[chronos, chronicles, results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
export peerid, multiaddress, crypto, routing_record, errors, results

View File

@@ -160,10 +160,10 @@ proc updatePeerInfo*(
peerStore[KeyBook][info.peerId] = pubkey
info.agentVersion.withValue(agentVersion):
peerStore[AgentBook][info.peerId] = agentVersion.string
peerStore[AgentBook][info.peerId] = agentVersion
info.protoVersion.withValue(protoVersion):
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
peerStore[ProtoVersionBook][info.peerId] = protoVersion
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos

View File

@@ -11,7 +11,7 @@
{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
import ../varint, ../utility, stew/endians2, results
export results, utility
{.push public.}

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import stew/results
import results
import chronos, chronicles
import ../../../switch, ../../../multiaddress, ../../../peerid
import core

View File

@@ -9,8 +9,8 @@
{.push raises: [].}
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import ../../../multiaddress, ../../../peerid, ../../../errors
import ../../../protobuf/minprotobuf

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/results
import results
import chronos, chronicles
import
../../protocol,

View File

@@ -11,7 +11,7 @@
import std/sequtils
import stew/results
import results
import chronos, chronicles
import core

View File

@@ -10,8 +10,8 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import core
import

View File

@@ -10,7 +10,8 @@
{.push raises: [].}
import macros
import stew/[objects, results]
import stew/objects
import results
import ../../../peerinfo, ../../../signed_envelope
import ../../../protobuf/minprotobuf

View File

@@ -13,8 +13,7 @@
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import results, chronos, chronicles
import
../protobuf/minprotobuf,
../peerinfo,

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import chronos, stew/results
import chronos, results
import ../stream/connection
export results

View File

@@ -192,7 +192,7 @@ method init*(f: FloodSub) =
f.codec = FloodSubCodec
method publish*(
f: FloodSub, topic: string, data: seq[byte]
f: FloodSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.async: (raises: []).} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)

View File

@@ -29,7 +29,7 @@ import
../../utility,
../../switch
import stew/results
import results
export results
import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat
@@ -702,24 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(
proc makePeersForPublishUsingCustomConn(
g: GossipSub, topic: string
): HashSet[PubSubPeer] =
assert g.customConnCallbacks.isSome,
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
trace "Selecting peers via custom connection callback"
return g.customConnCallbacks.get().customPeerSelectionCB(
g.gossipsub.getOrDefault(topic),
g.subscribedDirectPeers.getOrDefault(topic),
g.mesh.getOrDefault(topic),
g.fanout.getOrDefault(topic),
)
proc makePeersForPublishDefault(
g: GossipSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
): HashSet[PubSubPeer] =
var peers: HashSet[PubSubPeer]
# add always direct peers
# Always include direct peers
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
if topic in g.topics: # if we're subscribed use the mesh
@@ -769,6 +772,29 @@ method publish*(
# ultimately is not sent)
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
return peers
method publish*(
g: GossipSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
let peers =
if useCustomConn:
g.makePeersForPublishUsingCustomConn(topic)
else:
g.makePeersForPublishDefault(topic, data)
if peers.len == 0:
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
debug "No peers for topic, skipping publish",
@@ -807,7 +833,12 @@ method publish*(
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
g.broadcast(
peers,
RPCMsg(messages: @[msg]),
isHighPriority = true,
useCustomConn = useCustomConn,
)
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])

View File

@@ -305,9 +305,9 @@ proc handleIHave*(
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
for dontWant in iDontWants:
for messageId in dontWant.messageIDs:
if peer.iDontWants[^1].len > 1000:
if peer.iDontWants[0].len >= IDontWantMaxCount:
break
peer.iDontWants[^1].incl(g.salt(messageId))
peer.iDontWants[0].incl(g.salt(messageId))
proc handleIWant*(
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
@@ -538,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it.peerId notin backingOff:
avail.add(it)
# by spec, grab only 2
if avail.len > 1:
# by spec, grab only up to MaxOpportunisticGraftPeers
if avail.len >= MaxOpportunisticGraftPeers:
break
for peer in avail:
@@ -690,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
for peer in allPeers:
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
for msgId in ihave.messageIDs:
peer.sentIHaves[^1].incl(msgId)
peer.sentIHaves[0].incl(msgId)
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)

View File

@@ -50,6 +50,9 @@ const
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
IHaveMaxLength* = 5000
IDontWantMaxCount* = 1000
# maximum number of IDontWant messages in one slot of the history
MaxOpportunisticGraftPeers* = 2
type
TopicInfo* = object # gossip 1.1 related

View File

@@ -31,7 +31,7 @@ import
../../errors,
../../utility
import stew/results
import results
export results
export tables, sets
@@ -176,6 +176,7 @@ type
rng*: ref HmacDrbgContext
knownTopics*: HashSet[string]
customConnCallbacks*: Option[CustomConnectionCallbacks]
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
## handle peer disconnects
@@ -187,7 +188,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
p: PubSub,
peer: PubSubPeer,
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
@@ -200,13 +205,14 @@ proc send*(
## priority messages have been sent.
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
peer.send(msg, p.anonymize, isHighPriority)
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
@@ -261,12 +267,12 @@ proc broadcast*(
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
p.send(peer, msg, isHighPriority)
p.send(peer, msg, isHighPriority, useCustomConn)
else:
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
proc sendSubs*(
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
@@ -373,8 +379,14 @@ method getOrCreatePeer*(
p.onPubSubPeerEvent(peer, event)
# create new pubsub peer
let pubSubPeer =
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
let pubSubPeer = PubSubPeer.new(
peerId,
getConn,
onEvent,
protoNegotiated,
p.maxMessageSize,
customConnCallbacks = p.customConnCallbacks,
)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
@@ -558,7 +570,7 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
p.updateTopicMetrics(topic)
method publish*(
p: PubSub, topic: string, data: seq[byte]
p: PubSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.base, async: (raises: []), public.} =
## publish to a ``topic``
##
@@ -589,7 +601,7 @@ method addValidator*(
method removeValidator*(
p: PubSub, topic: varargs[string], hook: ValidatorHandler
) {.base, public.} =
) {.base, public, gcsafe.} =
for t in topic:
p.validators.withValue(t, validators):
validators[].excl(hook)
@@ -648,6 +660,8 @@ proc init*[PubParams: object | bool](
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false,
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): P {.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
@@ -663,6 +677,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
else:
P(
@@ -678,6 +693,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
proc peerEventHandler(

View File

@@ -9,8 +9,8 @@
{.push raises: [].}
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import std/[sequtils, tables, hashes, options, sets, deques]
import results
import chronos, chronicles, nimcrypto/sha2, metrics
import chronos/ratelimit
import
@@ -95,6 +95,21 @@ type
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
CustomConnCreationProc* = proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection {.gcsafe, raises: [].}
CustomPeerSelectionProc* = proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] {.gcsafe, raises: [].}
CustomConnectionCallbacks* = object
customConnCreationCB*: CustomConnCreationProc
customPeerSelectionCB*: CustomPeerSelectionProc
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
@@ -123,6 +138,7 @@ type
maxNumElementsInNonPriorityQueue*: int
# The max number of elements allowed in the non-priority queue.
disconnected: bool
customConnCallbacks*: Option[CustomConnectionCallbacks]
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
@@ -356,21 +372,43 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
await sendMsgContinue(conn, conn.writeLp(msg))
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
proc sendMsg(
p: PubSubPeer, msg: seq[byte], useCustomConn: bool = false
): Future[void] {.async: (raises: []).} =
type ConnectionType = enum
ctCustom
ctSend
ctSlow
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
var slowPath = false
let (conn, connType) =
if useCustomConn and p.customConnCallbacks.isSome:
let address = p.address
(
p.customConnCallbacks.get().customConnCreationCB(address, p.peerId, p.codec),
ctCustom,
)
elif p.sendConn != nil and not p.sendConn.closed():
(p.sendConn, ctSend)
else:
slowPath = true
(nil, ctSlow)
if not slowPath:
trace "sending encoded msg to peer",
conntype = $connType, conn = conn, encoded = shortLog(msg)
let f = conn.writeLp(msg)
if not f.completed():
sendMsgContinue(conn, f)
else:
f
else:
trace "sending encoded msg to peer via slow path"
sendMsgSlow(p, msg)
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
proc sendEncoded*(
p: PubSubPeer, msg: seq[byte], isHighPriority: bool, useCustomConn: bool = false
): Future[void] =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
@@ -399,7 +437,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
maxSize = p.maxMessageSize, msgSize = msg.len
Future[void].completed()
elif isHighPriority or emptyQueues:
let f = p.sendMsg(msg)
let f = p.sendMsg(msg, useCustomConn)
if not f.finished:
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(pubsubpeer_queue_metrics):
@@ -458,7 +496,11 @@ iterator splitRPCMsg(
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
p: PubSubPeer,
msg: RPCMsg,
anonymize: bool,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
@@ -489,11 +531,11 @@ proc send*(
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
asyncSpawn p.sendEncoded(encoded, isHighPriority)
asyncSpawn p.sendEncoded(encoded, isHighPriority, useCustomConn)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -552,6 +594,8 @@ proc new*(
maxMessageSize: int,
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): T =
result = T(
getConn: getConn,
@@ -563,6 +607,7 @@ proc new*(
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
customConnCallbacks: customConnCallbacks,
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.iDontWants.addFirst(default(HashSet[SaltedId]))

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[hashes, sets]
import chronos/timer, stew/results
import chronos/timer, results
import ../../utility

View File

@@ -20,7 +20,6 @@ import ../../peerid
import ../../peerinfo
import ../../protobuf/minprotobuf
import ../../utility
import ../../errors
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]

View File

@@ -11,15 +11,14 @@
{.push raises: [].}
import std/[strformat]
import stew/results
import results
import chronos, chronicles
import
../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
../../peerinfo
export protocol, results

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/[sequtils, times]
import pkg/stew/results
import pkg/results
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
export peerid, multiaddress, signed_envelope

View File

@@ -10,8 +10,8 @@
{.push raises: [].}
import std/sequtils
import stew/[byteutils, results, endians2]
import chronos, chronos/transports/[osnet, ipnet], chronicles
import stew/endians2
import chronos, chronos/transports/[osnet, ipnet], chronicles, results
import ../[multiaddress, multicodec]
import ../switch
@@ -73,7 +73,6 @@ proc new*(
return T(networkInterfaceProvider: networkInterfaceProvider)
proc getProtocolArgument*(ma: MultiAddress, codec: MultiCodec): MaResult[seq[byte]] =
var buffer: seq[byte]
for item in ma:
let
ritem = ?item

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/sugar
import pkg/stew/[results, byteutils]
import pkg/stew/byteutils, pkg/results
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
export crypto

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[strformat]
import stew/results
import results
import chronos, chronicles, metrics
import connection
import ../utility
@@ -34,8 +34,6 @@ when defined(libp2p_agents_metrics):
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
func shortLog*(conn: ChronosStream): auto =
try:
if conn == nil:

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[hashes, oids, strformat]
import stew/results
import results
import chronicles, chronos, metrics
import lpstream, ../multiaddress, ../peerinfo, ../errors
@@ -52,6 +52,8 @@ func shortLog*(conn: Connection): string =
chronicles.formatIt(Connection):
shortLog(it)
declarePublicCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
method initStream*(s: Connection) =
if s.objName.len == 0:
s.objName = ConnectionTrackerName

View File

@@ -113,9 +113,9 @@ method initStream*(s: LPStream) {.base.} =
trackCounter(s.objName)
trace "Stream created", s, objName = s.objName, dir = $s.dir
proc join*(
method join*(
s: LPStream
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
): Future[void] {.base, async: (raises: [CancelledError], raw: true), public.} =
## Wait for the stream to be closed
s.closeEvent.wait()
@@ -135,9 +135,9 @@ method readOnce*(
## available
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
proc readExactly*(
method readExactly*(
s: LPStream, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[void] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Waits for `nbytes` to be available, then read
## them and return them
if s.atEof:
@@ -171,9 +171,9 @@ proc readExactly*(
trace "couldn't read all bytes, incomplete data", s, nbytes, read
raise newLPStreamIncompleteError()
proc readLine*(
method readLine*(
s: LPStream, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[string] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Reads up to `limit` bytes are read, or a `sep` is found
# TODO replace with something that exploits buffering better
var lim = if limit <= 0: -1 else: limit
@@ -199,9 +199,9 @@ proc readLine*(
if len(result) == lim:
break
proc readVarint*(
method readVarint*(
conn: LPStream
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[uint64] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
var buffer: array[10, byte]
for i in 0 ..< len(buffer):
@@ -218,9 +218,9 @@ proc readVarint*(
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(
method readLp*(
s: LPStream, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[seq[byte]] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()
@@ -244,9 +244,11 @@ method write*(
# Write `msg` to stream, waiting for the write to be finished
raiseAssert("[LPStream.write] abstract method not implemented!")
proc writeLp*(
method writeLp*(
s: LPStream, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
@@ -254,9 +256,11 @@ proc writeLp*(
buf[vbytes.len ..< buf.len] = msg
s.write(buf)
proc writeLp*(
method writeLp*(
s: LPStream, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
writeLp(s, msg.toOpenArrayByte(0, msg.high))
proc write*(

View File

@@ -9,15 +9,12 @@
import locks
import tables
import std/sequtils
import stew/byteutils
import pkg/chronos
import pkg/chronicles
import ./transport
import ../multiaddress
import ../stream/connection
import ../stream/bridgestream
import ../muxers/muxer
type
MemoryTransportError* = object of transport.TransportError

View File

@@ -17,7 +17,6 @@ import ../multiaddress
import ../stream/connection
import ../crypto/crypto
import ../upgrademngrs/upgrade
import ../muxers/muxer
import ./memorymanager
export connection

View File

@@ -1,7 +1,8 @@
import std/sequtils
import pkg/chronos
import pkg/chronicles
import pkg/quic
import chronos
import chronicles
import metrics
import quic
import results
import ../multiaddress
import ../multicodec
@@ -58,6 +59,7 @@ method readOnce*(
result = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], result)
stream.cached = stream.cached[result ..^ 1]
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
except CatchableError as exc:
raise newLPStreamEOFError()
@@ -66,6 +68,7 @@ method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
{.pop.}

View File

@@ -959,4 +959,133 @@ void cert_free_key(cert_key_t key) {
struct cert_key_s *k = (struct cert_key_s *)key;
EVP_PKEY_free(k->pkey);
free(k);
}
// Function to check if a Common Name is correct
// each label should have <= 63 characters
// the whole CN should have <= 253 characters
cert_error_t check_cn(const char *cn) {
cert_error_t ret_code = CERT_SUCCESS;
if (!cn || strlen(cn) == 0) {
return CERT_ERROR_CN_EMPTY;
}
if (strlen(cn) > 253) {
return CERT_ERROR_CN_TOO_LONG;
}
char *cn_copy = strdup(cn);
char *cn_copy_orig = cn_copy;
// trim trailing dot if any before checking
size_t len = strlen(cn_copy);
if (len > 0 && cn_copy[len - 1] == '.') {
cn_copy[len - 1] = '\0';
}
char *label;
char *last = NULL;
char *ptr = cn_copy;
while ((label = strtok(ptr, ".")) != NULL) {
if (last && last + strlen(last) + 1 != label) {
// empty label (e.g., "example..com")
ret_code = CERT_ERROR_CN_EMPTY_LABEL;
break;
}
if (strlen(label) > 63) {
ret_code = CERT_ERROR_CN_LABEL_TOO_LONG;
break;
}
last = label;
ptr = NULL;
}
free(cn_copy_orig);
return ret_code;
}
cert_error_t cert_signing_req(const char *cn, cert_key_t key, cert_buffer **csr_buffer) {
cert_error_t ret_code = CERT_SUCCESS;
X509_REQ *x509_req = NULL;
X509_NAME *name = NULL;
X509_EXTENSION *ext = NULL;
X509V3_CTX ctx;
STACK_OF(X509_EXTENSION) *exts = NULL;
unsigned char *der = NULL;
size_t der_len = 0;
ret_code = check_cn(cn);
if (ret_code != CERT_SUCCESS) {
goto cleanup;
}
if (!key || !(key->pkey)) {
ret_code = CERT_ERROR_NO_PUBKEY;
goto cleanup;
}
EVP_PKEY *pkey = key->pkey;
x509_req = X509_REQ_new();
if (!x509_req) {
ret_code = CERT_ERROR_X509_REQ_GEN;
goto cleanup;
}
if (!X509_REQ_set_pubkey(x509_req, pkey)) {
ret_code = CERT_ERROR_PUBKEY_SET;
goto cleanup;
}
// Build SAN extension
X509V3_set_ctx(&ctx, NULL, NULL, x509_req, NULL, 0);
char san_str[258]; // max of 253 from cn + 4 "DNS:" + \0
snprintf(san_str, sizeof(san_str), "DNS:%s", cn);
ext = X509V3_EXT_conf_nid(NULL, &ctx, NID_subject_alt_name, san_str);
if (!ext) {
ret_code = CERT_ERROR_X509_SAN;
goto cleanup;
}
exts = sk_X509_EXTENSION_new_null();
if (!exts || !sk_X509_EXTENSION_push(exts, ext)) {
ret_code = CERT_ERROR_X509_SAN;
goto cleanup;
}
if (!X509_REQ_add_extensions(x509_req, exts)) {
ret_code = CERT_ERROR_X509_SAN;
goto cleanup;
}
if (!X509_REQ_sign(x509_req, pkey, EVP_sha256())) {
ret_code = CERT_ERROR_SIGN;
goto cleanup;
}
der_len = i2d_X509_REQ(x509_req, &der);
if (der_len < 0) {
ret_code = CERT_ERROR_X509_REQ_DER;
goto cleanup;
}
ret_code = init_cert_buffer(csr_buffer, der, der_len);
if (ret_code < 0) {
goto cleanup;
}
cleanup:
if (exts)
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
if (x509_req)
X509_REQ_free(x509_req);
if (der)
OPENSSL_free(der);
if (ret_code != CERT_SUCCESS && csr_buffer) {
cert_free_buffer(*csr_buffer);
*csr_buffer = NULL;
}
return ret_code;
}

View File

@@ -54,6 +54,14 @@ typedef int32_t cert_error_t;
#define CERT_ERROR_PUBKEY_DER_CONV -41
#define CERT_ERROR_INIT_KEYGEN -42
#define CERT_ERROR_SET_CURVE -43
#define CERT_ERROR_X509_REQ_GEN -44
#define CERT_ERROR_X509_REQ_DER -45
#define CERT_ERROR_NO_PUBKEY -46
#define CERT_ERROR_X509_SAN -47
#define CERT_ERROR_CN_TOO_LONG -48
#define CERT_ERROR_CN_LABEL_TOO_LONG -49
#define CERT_ERROR_CN_EMPTY_LABEL -50
#define CERT_ERROR_CN_EMPTY -51
typedef enum { CERT_FORMAT_DER = 0, CERT_FORMAT_PEM = 1 } cert_format_t;
@@ -184,4 +192,15 @@ void cert_free_key(cert_key_t key);
*/
void cert_free_buffer(cert_buffer *buffer);
/**
* Create a X.509 certificate request
*
* @param cn Domain for which we're requesting the certificate
* @param key Public key of the requesting client
* @param csr_buffer Pointer to the buffer that will be set to the CSR in DER format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_signing_req(const char *cn, cert_key_t key, cert_buffer **csr_buffer);
#endif /* LIBP2P_CERT_H */

View File

@@ -55,10 +55,10 @@ type EncodingFormat* = enum
proc cert_format_t(self: EncodingFormat): cert_format_t =
if self == EncodingFormat.DER: CERT_FORMAT_DER else: CERT_FORMAT_PEM
proc toCertBuffer(self: seq[uint8]): cert_buffer =
proc toCertBuffer*(self: seq[uint8]): cert_buffer =
cert_buffer(data: self[0].unsafeAddr, length: self.len.csize_t)
proc toSeq(self: ptr cert_buffer): seq[byte] =
proc toSeq*(self: ptr cert_buffer): seq[byte] =
toOpenArray(cast[ptr UncheckedArray[byte]](self.data), 0, self.length.int - 1).toSeq()
# Initialize entropy and DRBG contexts at the module level

View File

@@ -79,3 +79,7 @@ proc cert_free_buffer*(
proc cert_free_parsed*(
cert: ptr cert_parsed
): void {.cdecl, importc: "cert_free_parsed".}
proc cert_signing_req*(
cn: cstring, key: cert_key_t, csr_buffer: ptr ptr cert_buffer
): cert_error_t {.cdecl, importc: "cert_signing_req".}

View File

@@ -11,9 +11,8 @@
{.push raises: [].}
import std/strformat
import chronos, chronicles, strutils
import stew/[byteutils, endians2, results, objects]
import chronos, chronicles, strutils, results
import stew/[byteutils, endians2, objects]
import ../multicodec
import
transport,

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/[sequtils]
import stew/results
import results
import chronos, chronicles
import
transport,

View File

@@ -10,7 +10,8 @@
{.push raises: [].}
import std/[sets, options, macros]
import stew/[byteutils, results]
import stew/byteutils
import results
export results

View File

@@ -18,7 +18,8 @@
{.push raises: [].}
import stew/[byteutils, leb128, results]
import stew/[byteutils, leb128]
import results
export leb128, results
type

View File

@@ -9,91 +9,115 @@
set -e
CACHE_DIR="$1" # optional parameter pointing to a CI cache dir.
LIBP2P_COMMIT="124530a3" # Tags maye be used as well
[[ -n "$2" ]] && LIBP2P_COMMIT="$2" # allow overriding it on the command line
force=false
verbose=false
CACHE_DIR=""
LIBP2P_COMMIT="124530a3"
while [[ "$#" -gt 0 ]]; do
case "$1" in
-f|--force) force=true ;;
-v|--verbose) verbose=true ;;
-h|--help)
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
exit 0
;;
*)
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
if [[ -z "$CACHE_DIR" ]]; then
CACHE_DIR="$1"
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
LIBP2P_COMMIT="$1"
else
echo "Unknown argument: $1"
exit 1
fi
;;
esac
shift
done
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
if [[ ! -e "$SUBREPO_DIR" ]]; then
# we're probably in nim-libp2p's CI
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q $LIBP2P_COMMIT
cd ..
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q "$LIBP2P_COMMIT"
cd ..
fi
## env vars
# verbosity level
[[ -z "$V" ]] && V=0
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
TARGET_DIR="$(go env GOPATH)/bin"
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
target_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
REBUILD=0
NO_REBUILD=1
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
}
build_target() {
echo -e "$BUILD_MSG"
[[ "$V" == "0" ]] && exec &>/dev/null
echo -e "$BUILD_MSG"
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
echo "retry #${CURR}"
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
if $verbose; then
echo "retry #${CURR}"
fi
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
popd
popd
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully."
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully: $TARGET_BINARY"
}
if target_needs_rebuilding; then
build_target
if $force || target_needs_rebuilding; then
build_target
else
echo "No rebuild needed."
echo "No rebuild needed."
fi

View File

@@ -1,4 +1,3 @@
import options, tables
import chronos, chronicles, stew/byteutils
import helpers
import ../libp2p

View File

@@ -1,15 +1,9 @@
{.used.}
import chronos, stew/[byteutils, results]
import chronos, results, stew/byteutils
import
../libp2p/
[
stream/connection,
transports/transport,
upgrademngrs/upgrade,
multiaddress,
errors,
]
[stream/connection, transports/transport, upgrademngrs/upgrade, multiaddress]
import ./helpers

View File

@@ -12,7 +12,7 @@ import ../libp2p/stream/chronosstream
import ../libp2p/muxers/mplex/lpchannel
import ../libp2p/protocols/secure/secure
import ../libp2p/switch
import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ../libp2p/nameresolving/mockresolver
import errorhelpers
import utils/async_tests
@@ -49,8 +49,10 @@ template checkTrackers*() =
{.push warning[BareExcept]: off.}
try:
GC_fullCollect()
except CatchableError:
discard
except Defect as exc:
raise exc # Reraise to maintain call stack
except Exception:
raiseAssert "Unexpected exception during GC collection"
when defined(nimHasWarnBareExcept):
{.pop.}
@@ -92,7 +94,9 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
testBufferStream.initStream()
testBufferStream
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
macro checkUntilTimeoutCustom*(
timeout: Duration, sleepInterval: Duration, code: untyped
): untyped =
## Periodically checks a given condition until it is true or a timeout occurs.
##
## `code`: untyped - A condition expression that should eventually evaluate to true.
@@ -101,17 +105,17 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
## Examples:
## ```nim
## # Example 1:
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
## asyncTest "checkUntilTimeoutCustom should pass if the condition is true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(2.seconds):
## checkUntilTimeoutCustom(2.seconds):
## a == b
##
## # Example 2: Multiple conditions
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
## asyncTest "checkUntilTimeoutCustom should pass if the conditions are true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(5.seconds)::
## checkUntilTimeoutCustom(5.seconds)::
## a == b
## a == 2
## b == 1
@@ -145,12 +149,12 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
if `combinedBoolExpr`:
return
else:
await sleepAsync(100.millis)
await sleepAsync(`sleepInterval`)
await checkExpiringInternal()
macro checkUntilTimeout*(code: untyped): untyped =
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
## Same as `checkUntilTimeoutCustom` but with a default timeout of 2s with 50ms interval.
##
## Examples:
## ```nim
@@ -171,7 +175,7 @@ macro checkUntilTimeout*(code: untyped): untyped =
## b == 1
## ```
result = quote:
checkUntilCustomTimeout(10.seconds, `code`)
checkUntilTimeoutCustom(2.seconds, 50.milliseconds, `code`)
proc unorderedCompare*[T](a, b: seq[T]): bool =
if a == b:

View File

@@ -1,126 +0,0 @@
import std/[os, options, strformat, sequtils]
import redis
import chronos, chronicles
import
../../libp2p/[
builders,
switch,
multicodec,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../stubs/autonatclientstub
import ../errorhelpers
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder
.new()
.withRng(rng)
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng = rng))
return s
proc main() {.async.} =
try:
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService =
AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
debug "Got relay address", relayMA
let relayId = await switch.connect(relayMA)
debug "Connected to relay", relayId
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(
it.contains(multiCodec("p2p-circuit")).tryGet()
)
:
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
)
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
quit(0)
except CatchableError as e:
error "Unexpected error", description = e.msg
discard waitFor(main().withTimeout(4.minutes))
quit(1)

View File

@@ -9,12 +9,11 @@
{.used.}
import sequtils, options, tables, sets
import sequtils, tables, sets
import chronos, stew/byteutils
import
utils,
../../libp2p/[
errors,
switch,
stream/connection,
crypto/crypto,
@@ -311,5 +310,5 @@ suite "FloodSub":
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkUntilTimeout:
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
messageReceived == 1

View File

@@ -1,925 +0,0 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[options, deques, sequtils, enumerate, algorithm]
import stew/byteutils
import ../../libp2p/builders
import ../../libp2p/errors
import ../../libp2p/crypto/crypto
import ../../libp2p/stream/bufferstream
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
import ../../libp2p/switch
import ../../libp2p/muxers/muxer
import ../../libp2p/protocols/pubsub/rpc/protobuf
import utils
import ../helpers
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
const MsgIdSuccess = "msg id gen success"
suite "GossipSub internal":
teardown:
checkTrackers()
asyncTest "subscribe/unsubscribeAll":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
discard
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
# test via dynamic dispatch
gossipSub.PubSub.subscribe(topic, handler)
check:
gossipSub.topics.contains(topic)
gossipSub.gossipsub[topic].len() > 0
gossipSub.mesh[topic].len() > 0
# test via dynamic dispatch
gossipSub.PubSub.unsubscribeAll(topic)
check:
topic notin gossipSub.topics # not in local topics
topic notin gossipSub.mesh # not in mesh
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "topic params":
let params = TopicParams.init()
params.validateParameters().tryGet()
asyncTest "`rebalanceMesh` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh - bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var scoreLow = -11'f64
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.score = scoreLow
gossipSub.gossipsub[topic].incl(peer)
scoreLow += 1.0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# low score peers should not be in mesh, that's why the count must be 4
check gossipSub.mesh[topic].len == 4
for peer in gossipSub.mesh[topic]:
check peer.score >= 0.0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`rebalanceMesh` Degree Hi":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len ==
gossipSub.parameters.d + gossipSub.parameters.dScore
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`replenishFanout` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
var peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic].incl(peer)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic1 = "foobar1"
let topic2 = "foobar2"
gossipSub.topicParams[topic1] = TopicParams.init()
gossipSub.topicParams[topic2] = TopicParams.init()
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic1].incl(peer)
gossipSub.fanout[topic2].incl(peer)
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
# generate mesh and fanout peers
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# generate gossipsub (free standing) peers
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
check gossipSub.fanout[topic].len == 15
check gossipSub.mesh[topic].len == 15
check gossipSub.gossipsub[topic].len == 15
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
for p in peers.keys:
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.fanout[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "Drop messages of topics without subscription":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "Disconnect bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.handler = handler
peer.appScore = gossipSub.parameters.graylistThreshold - 1
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
gossipSub.updateScores()
await sleepAsync(100.millis)
check:
# test our disconnect mechanics
gossipSub.gossipsub.peers(topic) == 0
# also ensure we cleanup properly the peersInIP table
gossipSub.peersInIP.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
for i in 0 .. gossipSub.topicsHigh + 10:
tooManyTopics &= "topic" & $i
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
let conn = TestBufferStream.new(noop)
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
peer.behaviourPenalty > 0.0
await conn.close()
await gossipSub.switch.stop()
asyncTest "invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
expect(CatchableError):
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
peerId, Moment.now() + 1.hours
)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff
check prunes.len != 0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# expect 0 since they are all backing off
check gossipSub.mesh[topic].len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff - remote":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.mesh[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len != 0
for i in 0 ..< 15:
let peerId = conns[i].peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.handlePrune(
peer,
@[
ControlPrune(
topicID: topic,
peers: @[],
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
)
],
)
# expect topic cleaned up since they are all pruned
check topic notin gossipSub.mesh
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh Degree Hi - audit scenario":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.parameters.dScore = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dOut = 3
gossipSub.parameters.dHigh = 12
gossipSub.parameters.dLow = 4
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.In
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 40.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
for i in 0 ..< 7:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.Out
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 10.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 13
gossipSub.rebalanceMesh(topic)
# ensure we are above dlow
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
var outbound = 0
for peer in gossipSub.mesh[topic]:
if peer.sendConn.transportDir == Direction.Out:
inc outbound
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "handleIHave/Iwant tests":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.subscribe(topic, handler2)
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
for i in 0 ..< 30:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# Peers with no budget should not request messages
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
let id = @[0'u8, 1, 2, 3]
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let genmsg = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
genmsg.len == 1
check gossipSub.mcache.msgs.len == 1
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
proc setupTest(): Future[
tuple[
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
]
] {.async.} =
let nodes = generateNodes(2, gossip = true, verifySignature = false)
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await nodes[1].switch.connect(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip0: GossipSub = GossipSub(nodes[0])
var gossip1: GossipSub = GossipSub(nodes[1])
return (gossip0, gossip1, receivedMessages)
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
proc createMessages(
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
var iwantMessageIds = newSeq[MessageId]()
var sentMessages = initHashSet[seq[byte]]()
for i, size in enumerate([size1, size2]):
let data = newSeqWith(size, i.byte)
sentMessages.incl(data)
let msg =
Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
iwantMessageIds.add(iwantMessageId)
gossip1.mcache.put(iwantMessageId, msg)
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
peer.sentIHaves[^1].incl(iwantMessageId)
return (iwantMessageIds, sentMessages)
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize div 2 + 1
let (iwantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
await sleepAsync(300.milliseconds)
checkUntilTimeout:
receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let size1 = gossip1.maxMessageSize div 2
let size2 = gossip1.maxMessageSize div 3
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let maxSize = gossip1.maxMessageSize
let size1 = maxSize div 2
let size2 = maxSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
if seqs[0] < seqs[1]:
smallestSet.incl(seqs[0])
else:
smallestSet.incl(seqs[1])
checkUntilTimeout:
receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)

File diff suppressed because it is too large Load Diff

View File

@@ -1,394 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import sequtils, options, tables, sets
import chronos, stew/byteutils, chronicles
import
utils,
../../libp2p/[
errors,
peerid,
peerinfo,
stream/connection,
stream/bufferstream,
crypto/crypto,
protocols/pubsub/pubsub,
protocols/pubsub/gossipsub,
protocols/pubsub/pubsubpeer,
protocols/pubsub/peertable,
protocols/pubsub/rpc/messages,
],
../utils/[futures, async_tests],
../helpers
template tryPublish(
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
): untyped =
var
expiration = Moment.now() + timeout
pubs = 0
while pubs < require and Moment.now() < expiration:
pubs = pubs + call
await sleepAsync(wait)
doAssert pubs >= require, "Failed to publish!"
suite "GossipSub":
teardown:
checkTrackers()
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesSparse(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
let dgossip = GossipSub(dialer)
dgossip.parameters.dHigh = 2
dgossip.parameters.dLow = 1
dgossip.parameters.d = 1
dgossip.parameters.dOut = 1
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
info "seen up", count = seen.len
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
let publishedTo = nodes[0].publish(
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
).await
check:
publishedTo != 0
publishedTo != runs
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
# We must subscribe before setting the validator
nodes[0].subscribe("foobar", handler)
var gossip = GossipSub(nodes[0])
let invalidDetected = newFuture[void]()
gossip.subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "GossipSub test directPeers":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
let invalidDetected = newFuture[void]()
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await connectNodesStar(nodes)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "GossipSub directPeers: always forward messages":
let nodes = generateNodes(3, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
await GossipSub(nodes[2]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
nodes[1].subscribe("foobar", noop)
nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
check "foobar" notin GossipSub(nodes[2]).mesh
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
GossipSub(nodes[1]).updateScores()
# peer shouldn't be in our mesh
check:
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
GossipSub(nodes[1]).parameters.graylistThreshold
GossipSub(nodes[1]).updateScores()
handlerFut = newFuture[void]()
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
# Without directPeers, this would fail
await handlerFut.wait(1.seconds)
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
# Given 2 nodes
let
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true)
node0 = nodes[0]
node1 = nodes[1]
g0 = GossipSub(node0)
g1 = GossipSub(node1)
startNodesAndDeferStop(nodes)
# With message observers
var
messageReceived0 = newFuture[bool]()
messageReceived1 = newFuture[bool]()
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived0.complete(true)
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived1.complete(true)
node0.addObserver(PubSubObserver(onRecv: observer0))
node1.addObserver(PubSubObserver(onRecv: observer1))
# Connect them as direct peers
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
# When node 0 sends a message
let message = "Hello!".toBytes()
let publishResult = await node0.publish("foobar", message)
# None should receive the message as they are not subscribed to the topic
let results = await waitForStates(@[messageReceived0, messageReceived1])
check:
publishResult == 0
results[0].isPending()
results[1].isPending()
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1, 5.seconds, 3.minutes
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
# Removing some subscriptions
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].unsubscribeAll("foobar")
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].subscribe("foobar", handler)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
asyncTest "GossipSub scoring - decayInterval":
let nodes = generateNodes(2, gossip = true)
var gossip = GossipSub(nodes[0])
# MacOs has some nasty jitter when sleeping
# (up to 7 ms), so we need some pretty long
# sleeps to be safe here
gossip.parameters.decayInterval = 300.milliseconds
startNodesAndDeferStop(nodes)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
await handlerFut
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
100
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
await sleepAsync(1500.milliseconds)
# We should have decayed 5 times, though allowing 4..6
check:
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
50.0 .. 66.0

View File

@@ -0,0 +1,543 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers
suite "GossipSub Control Messages":
teardown:
checkTrackers()
asyncTest "handleIHave - peers with no budget should not request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
gossipSub.mcache.msgs.len == 1
asyncTest "handleIHave - peers with budget should request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
check:
peer.iHaveBudget > 0
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "handleIWant - peers with budget should request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let messages = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
messages.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "GRAFT messages correctly add peers to mesh":
# Given 2 nodes
let
topic = "foobar"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
nodes = generateNodes(
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
await n0.stop()
await n1.stop()
# Second part of the hack
# Set values so peers can be GRAFTed
let newDValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
n0.parameters.applyDValues(newDValues)
n1.parameters.applyDValues(newDValues)
# When a GRAFT message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == 1)
# Then the peers are GRAFTed
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received GRAFT for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
nodes[0].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a GRAFT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not GRAFTed
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "PRUNE messages correctly removes peers from mesh":
# Given 2 nodes
let
topic = "foo"
backoff = 1
pruneMessage = ControlMessage(
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
)
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When another PRUNE message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received PRUNE for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
pruneMessage =
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
n0.subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not PRUNEd
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "IHAVE messages correctly advertise message ID to peers":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IHAVE observer
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
n1.addOnRecvObserver(checkForIHaves)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IHAVE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIHaves[0] == ControlIHave(topicID: topic, messageIDs: @[messageID])
asyncTest "IWANT messages correctly request messages by their IDs":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n1.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IWANT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n0.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both nodes subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When an IHAVE message is sent from node0
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IDONTWANT":
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
let
topic = "foobar"
nodes = generateNodes(3, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
let (bFinished, handlerB) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handlerB)
nodes[2].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
check:
nodes[2].mesh.peers(topic) == 1
# When we pre-emptively send a dontwant from C to B,
nodes[2].broadcast(
nodes[2].mesh[topic],
RPCMsg(
control: some(
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
)
),
isHighPriority = true,
)
# Then B doesn't relay the message to C.
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
discard await bFinished
# Then B sends IDONTWANT to C, but not A
checkUntilTimeout:
toSeq(nodes[2].mesh.getOrDefault(topic)).anyIt(it.iDontWants.anyIt(it.len == 1))
check:
toSeq(nodes[0].mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
asyncTest "IDONTWANT is broadcasted on publish":
# 2 nodes: A <=> B
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, sendIDontWantOnPublish = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
# Then IDONTWANT is sent to B on publish
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
asyncTest "IDONTWANT is sent only for 1.2":
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
let
topic = "foobar"
nodeA = generateNodes(1, gossip = true).toGossipSub()[0]
nodeB = generateNodes(1, gossip = true).toGossipSub()[0]
nodeC = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_11)
.toGossipSub()[0]
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
await connectNodes(nodeA, nodeB)
await connectNodes(nodeB, nodeC)
let (bFinished, handlerB) = createCompleteHandler()
nodeA.subscribe(topic, voidTopicHandler)
nodeB.subscribe(topic, handlerB)
nodeC.subscribe(topic, voidTopicHandler)
await waitSubGraph(@[nodeA, nodeB, nodeC], topic)
check:
nodeC.mesh.peers(topic) == 1
# When A sends a message to the topic
tryPublish await nodeA.publish(topic, newSeq[byte](10000)), 1
discard await bFinished
# Then B doesn't send IDONTWANT to both A and C (because C.gossipSubVersion == GossipSubCodec_11)
await waitForHeartbeat()
check:
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
asyncTest "Max IDONTWANT messages per heartbeat per peer":
# Given GossipSub node with 1 peer
let
topic = "foobar"
totalPeers = 1
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
defer:
await teardownGossipSub(gossipSub, conns)
let peer = peers[0]
# And sequence of iDontWants with more messages than max number (1200)
proc generateMessageIds(count: int): seq[MessageId] =
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
let iDontWants =
@[
ControlIWant(messageIDs: generateMessageIds(600)),
ControlIWant(messageIDs: generateMessageIds(600)),
]
# When node handles iDontWants
gossipSub.handleIDontWant(peer, iDontWants)
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
check:
peer.iDontWants[0].len == IDontWantMaxCount

View File

@@ -0,0 +1,97 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronos
import stew/byteutils
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../../libp2p/stream/connection
import ../helpers
type DummyConnection* = ref object of Connection
method write*(
self: DummyConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
let fut = newFuture[void]()
fut.complete()
return fut
proc new*(T: typedesc[DummyConnection]): DummyConnection =
let instance = T()
instance
suite "GossipSub Custom Connection Support":
teardown:
checkTrackers()
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
let
topic = "test"
handler = proc(topic: string, data: seq[byte]) {.async.} =
discard
nodes = generateNodes(2, gossip = true)
var
customConnCreated = false
peerSelectionCalled = false
GossipSub(nodes[0]).customConnCallbacks = some(
CustomConnectionCallbacks(
customConnCreationCB: proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection =
customConnCreated = true
return DummyConnection.new(),
customPeerSelectionCB: proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] =
peerSelectionCalled = true
return allPeers,
)
)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, handler)
await waitSub(nodes[0], nodes[1], topic)
tryPublish await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true), 1
check:
peerSelectionCalled
customConnCreated
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
let
topic = "test"
handler = proc(topic: string, data: seq[byte]) {.async.} =
discard
nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, handler)
await waitSub(nodes[0], nodes[1], topic)
var raised = false
try:
discard await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true)
except Defect:
raised = true
check raised

View File

@@ -0,0 +1,143 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../helpers
suite "GossipSub Fanout Management":
teardown:
checkTrackers()
asyncTest "`replenishFanout` Degree Lo":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let
topic1 = "foobar1"
topic2 = "foobar2"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow first topic to expire
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
asyncTest "e2e - GossipSub send over fanout A -> B":
let (passed, handler) = createCompleteHandler()
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
var observed = 0
let
obs1 = PubSubObserver(
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
inc observed
)
obs2 = PubSubObserver(
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
inc observed
)
nodes[1].addObserver(obs1)
nodes[0].addObserver(obs2)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
check:
"foobar" in gossip1.gossipsub
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
discard await passed.wait(2.seconds)
check observed == 2
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
let (passed, handler) = createCompleteHandler()
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
startNodesAndDeferStop(nodes)
GossipSub(nodes[1]).parameters.d = 0
GossipSub(nodes[1]).parameters.dHigh = 0
GossipSub(nodes[1]).parameters.dLow = 0
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
let gsNode = GossipSub(nodes[1])
checkUntilTimeout:
gsNode.mesh.getOrDefault("foobar").len == 0
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
(
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check:
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
discard await passed.wait(2.seconds)
trace "test done, stopping..."

View File

@@ -0,0 +1,389 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message]
import ../helpers, ../utils/[futures]
const MsgIdSuccess = "msg id gen success"
suite "GossipSub Gossip Protocol":
teardown:
checkTrackers()
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let topic = "foobar"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i in 0 ..< 30:
let peer = peers[i]
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# generate gossipsub (free standing) peers
for i in 30 ..< 45:
let peer = peers[i]
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
check gossipSub.fanout[topic].len == 15
check gossipSub.mesh[topic].len == 15
check gossipSub.gossipsub[topic].len == 15
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
for p in gossipPeers.keys:
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let topic = "foobar"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let topic = "foobar"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let topic = "foobar"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.fanout[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == 0
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
let
numberOfNodes = 5
topic = "foobar"
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are interconnected
await connectNodesStar(nodes)
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes.allIt(it.gossipsub.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# At least one of the nodes should have received an iHave message
# The check is made this way because the mesh structure changes from run to run
checkUntilTimeout:
messages[].mapIt(it[].len).anyIt(it > 0)
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat()
# None of the nodes should have received an iHave message
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len == 0
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.5),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 8 of the nodes should have received an iHave message
# That's because the gossip factor is 0.5 over 16 available nodes
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= 8
asyncTest "adaptive gossip dissemination, with dLazy priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 6 of the nodes should have received an iHave message
# That's because the dLazy is 6
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iDontWant messages
var messages = addIDontWantObservers(nodes)
# And are connected in a line
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == 1
nodes[1].gossipsub.getOrDefault(topic).len == 2
nodes[2].gossipsub.getOrDefault(topic).len == 1
# When node 0 sends a large message
let largeMsg = newSeq[byte](1000)
tryPublish await nodes[0].publish(topic, largeMsg), 1
# Only node 2 should have received the iDontWant message
checkUntilTimeout:
messages[].mapIt(it[].len)[2] == 1
messages[].mapIt(it[].len)[1] == 0
messages[].mapIt(it[].len)[0] == 0
asyncTest "e2e - GossipSub peer exchange":
# A, B & C are subscribed to something
# B unsubcribe from it, it should send
# PX to A & C
#
# C sent his SPR, not A
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, enablePX = true).toGossipSub() &
generateNodes(1, gossip = true, sendSignedPeerRecord = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitSubAllNodes(nodes, topic)
# Setup record handlers for all nodes
var
passed0: Future[void] = newFuture[void]()
passed2: Future[void] = newFuture[void]()
nodes[0].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed0.complete()
)
nodes[1].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
raiseAssert "should not get here"
)
nodes[2].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed2.complete()
)
# Unsubscribe from the topic
nodes[1].unsubscribe(topic, voidTopicHandler)
# Then verify what nodes receive the PX
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted()
results[1].isCompleted()
asyncTest "Peer must send right gosspipsub version":
let
topic = "foobar"
node0 = generateNodes(1, gossip = true)[0]
node1 = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_10)[0]
startNodesAndDeferStop(@[node0, node1])
await connectNodes(node0, node1)
node0.subscribe(topic, voidTopicHandler)
node1.subscribe(topic, voidTopicHandler)
await waitSubGraph(@[node0, node1], topic)
var gossip0: GossipSub = GossipSub(node0)
var gossip1: GossipSub = GossipSub(node1)
checkUntilTimeout:
gossip0.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
checkUntilTimeout:
gossip1.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10

View File

@@ -0,0 +1,348 @@
{.used.}
import std/[sequtils]
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers
suite "GossipSub Heartbeat":
teardown:
checkTrackers()
asyncTest "Mesh is rebalanced during heartbeat - pruning peers":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# When DValues of Node0 are updated to lower than defaults
const
newDLow = 2
newDHigh = 4
newDValues = some(
DValues(
dLow: some(newDLow),
dHigh: some(newDHigh),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
)
node0.parameters.applyDValues(newDValues)
# Then mesh of Node0 is rebalanced and peers are pruned to adapt to new values
checkUntilTimeout:
node0.mesh[topic].len >= newDLow and node0.mesh[topic].len <= newDHigh
asyncTest "Mesh is rebalanced during heartbeat - grafting new peers":
const
numberOfNodes = 10
topic = "foobar"
dLow = 3
dHigh = 4
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(dLow: some(dLow), dHigh: some(dHigh), d: some(3), dOut: some(1))
),
pruneBackoff = 20.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len >= dLow and
node0.mesh.getOrDefault(topic).len <= dHigh
# When peers of Node0 mesh are disconnected
let peersToDisconnect = node0.mesh[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh[topic].len >= dLow and node0.mesh[topic].len <= dHigh
node0.mesh[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "Mesh is rebalanced during heartbeat - opportunistic grafting":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(
dLow: some(3),
dHigh: some(4),
d: some(3),
dOut: some(1),
dLazy: some(3),
dScore: some(2),
)
),
pruneBackoff = 20.milliseconds,
opportunisticGraftThreshold = 600,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Keep track of initial mesh of Node0
let startingMesh = node0.mesh[topic].toSeq()
# When scores are assigned to Peers of Node0
var expectedGrafts: seq[PubSubPeer] = @[]
var score = 100.0
for peer in node0.gossipsub[topic]:
if peer in node0.mesh[topic]:
# Assign scores in starting Mesh
peer.score = score
score += 100.0
else:
# Assign scores higher than median to Peers not in starting Mesh and expect them to be grafted
peer.score = 800.0
expectedGrafts &= peer
# Then during heartbeat Peers with lower than median scores are pruned and max 2 Peers are grafted
await waitForHeartbeat(heartbeatInterval)
let actualGrafts = node0.mesh[topic].toSeq().filterIt(it notin startingMesh)
check:
actualGrafts.len == MaxOpportunisticGraftPeers
actualGrafts.allIt(it in expectedGrafts)
asyncTest "Fanout maintenance during heartbeat - expired peers are dropped":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let nodes = generateNodes(
numberOfNodes,
gossip = true,
fanoutTTL = 60.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
let node0 = nodes[0]
checkUntilTimeout:
node0.gossipsub.hasKey(topic)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 3
# Then Node0 fanout peers are populated
checkUntilTimeout:
node0.fanout.hasKey(topic)
node0.fanout[topic].len > 0
# And after heartbeat Node0 fanout peers are dropped (because fanoutTTL < heartbeatInterval)
checkUntilTimeout:
not node0.fanout.hasKey(topic)
asyncTest "Fanout maintenance during heartbeat - fanout peers are replenished":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 1
# Then Node0 fanout peers are populated
let maxFanoutPeers = node0.parameters.d
checkUntilTimeout:
node0.fanout[topic].len == maxFanoutPeers
# When all peers but first one of Node0 fanout are disconnected
let peersToDisconnect = node0.fanout[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
# Then Node0 fanout peers are replenished during heartbeat
# expecting 10[numberOfNodes] - 1[Node0] - (6[maxFanoutPeers] - 1[first peer not disconnected]) = 4
let expectedLen = numberOfNodes - 1 - (maxFanoutPeers - 1)
checkUntilTimeout:
node0.fanout[topic].len == expectedLen
node0.fanout[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "iDontWants history - last element is pruned during heartbeat":
const
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
let nodes = generateNodes(
2,
gossip = true,
sendIDontWantOnPublish = true,
historyLength = historyLength,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Get Node0 as Peer of Node1
let peer = nodes[1].mesh[topic].toSeq()[0]
# Wait for history to populate
checkUntilTimeout:
peer.iDontWants.len == historyLength
# When Node0 sends 5 messages to the topic
const msgCount = 5
for i in 0 ..< msgCount:
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
# Then Node1 receives 5 iDontWant messages from Node0
checkUntilTimeoutCustom(3.seconds, 50.milliseconds):
peer.iDontWants[0].len == msgCount
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peer.iDontWants[i].len == 0
# Then iDontWant messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = msgCount
# Until they reach last element and are pruned
checkUntilTimeout:
peer.iDontWants.mapIt(it.len) == expectedHistory
asyncTest "sentIHaves history - last element is pruned during heartbeat":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
gossipThreshold = -100.0
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
heartbeatInterval = heartbeatInterval,
gossipThreshold = gossipThreshold,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Find Peer outside of mesh to which Node 0 will send IHave
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
# Wait for history to populate
checkUntilTimeout:
peerOutsideMesh.sentIHaves.len == historyLength
# When a nodeOutsideMesh receives an IHave message, it responds with an IWant to request the full message from Node0
# Setting `peer.score < gossipThreshold` to prevent the nodeOutsideMesh from sending the IWant
# As when IWant is processed, messages are removed from sentIHaves history
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
for p in nodeOutsideMesh.gossipsub[topic].toSeq():
p.score = 2 * gossipThreshold
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# When next heartbeat occurs
# Then IHave is sent and sentIHaves is populated
checkUntilTimeout:
peerOutsideMesh.sentIHaves[0].len == 1
# Need to clear mCache as node would keep populating sentIHaves until cache is shifted enough times
nodes[0].clearMCache()
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peerOutsideMesh.sentIHaves[i].len == 0
# Then sentIHaves messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = 1
# Until they reach last element and are pruned
checkUntilTimeout:
peerOutsideMesh.sentIHaves.mapIt(it.len) == expectedHistory

View File

@@ -0,0 +1,527 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronicles
import std/[sequtils]
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers
suite "GossipSub Mesh Management":
teardown:
checkTrackers()
asyncTest "subscribe/unsubscribeAll":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
# test via dynamic dispatch
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
check:
gossipSub.topics.contains(topic)
gossipSub.gossipsub[topic].len() > 0
gossipSub.mesh[topic].len() > 0
# test via dynamic dispatch
gossipSub.PubSub.unsubscribeAll(topic)
check:
topic notin gossipSub.topics # not in local topics
topic notin gossipSub.mesh # not in mesh
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
asyncTest "`rebalanceMesh` Degree Lo":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len == gossipSub.parameters.d
asyncTest "rebalanceMesh - bad peers":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
var scoreLow = -11'f64
for peer in peers:
peer.score = scoreLow
scoreLow += 1.0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# low score peers should not be in mesh, that's why the count must be 4
check gossipSub.mesh[topic].len == 4
for peer in gossipSub.mesh[topic]:
check peer.score >= 0.0
asyncTest "`rebalanceMesh` Degree Hi":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.mesh[topic].len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len ==
gossipSub.parameters.d + gossipSub.parameters.dScore
asyncTest "rebalanceMesh fail due to backoff":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
for peer in peers:
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
peer.peerId, Moment.now() + 1.hours
)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff
check prunes.len != 0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# expect 0 since they are all backing off
check gossipSub.mesh[topic].len == 0
asyncTest "rebalanceMesh fail due to backoff - remote":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len != 0
for peer in peers:
gossipSub.handlePrune(
peer,
@[
ControlPrune(
topicID: topic,
peers: @[],
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
)
],
)
# expect topic cleaned up since they are all pruned
check topic notin gossipSub.mesh
asyncTest "rebalanceMesh Degree Hi - audit scenario":
let
topic = "foobar"
numInPeers = 6
numOutPeers = 7
totalPeers = numInPeers + numOutPeers
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
totalPeers, topic, populateGossipsub = true, populateMesh = true
)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.parameters.dScore = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dOut = 3
gossipSub.parameters.dHigh = 12
gossipSub.parameters.dLow = 4
for i in 0 ..< numInPeers:
let conn = conns[i]
let peer = peers[i]
conn.transportDir = Direction.In
peer.score = 40.0
for i in numInPeers ..< totalPeers:
let conn = conns[i]
let peer = peers[i]
conn.transportDir = Direction.Out
peer.score = 10.0
check gossipSub.mesh[topic].len == 13
gossipSub.rebalanceMesh(topic)
# ensure we are above dlow
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
var outbound = 0
for peer in gossipSub.mesh[topic]:
if peer.sendConn.transportDir == Direction.Out:
inc outbound
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
# Given GossipSub node starting with 13 peers in mesh
let
topic = "foobar"
totalPeers = 13
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
totalPeers, topic, populateGossipsub = true, populateMesh = true
)
defer:
await teardownGossipSub(gossipSub, conns)
# And mesh is larger than dHigh
gossipSub.parameters.dLow = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dHigh = 8
gossipSub.parameters.dOut = 3
gossipSub.parameters.dScore = 13
check gossipSub.mesh[topic].len == totalPeers
# When mesh is rebalanced
gossipSub.rebalanceMesh(topic)
# Then prunning is not triggered when mesh is not larger than dScore
check gossipSub.mesh[topic].len == totalPeers
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let expectedNumberOfPeers = numberOfNodes - 1
for i in 0 ..< numberOfNodes:
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len == expectedNumberOfPeers
node.fanout.len == 0
asyncTest "Nodes graft peers according to DValues - numberOfNodes > dHigh":
let
numberOfNodes = 15
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let
expectedNumberOfPeers = numberOfNodes - 1
dHigh = 12
d = 6
dLow = 4
for i in 0 ..< numberOfNodes:
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len >= dLow and
node.mesh.getOrDefault(topic).len <= dHigh
node.fanout.len == 0
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
checkUntilTimeout:
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], "foobar")
subs &= waitSub(nodes[0], nodes[1], "foobar")
await allFuturesThrowing(subs)
let
gossip1 = GossipSub(nodes[0])
gossip2 = GossipSub(nodes[1])
check:
"foobar" in gossip1.topics
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
# We must subscribe before setting the validator
nodes[0].subscribe("foobar", handler)
var gossip = GossipSub(nodes[0])
let invalidDetected = newFuture[void]()
gossip.subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "GossipSub test directPeers":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
let invalidDetected = newFuture[void]()
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await connectNodesStar(nodes)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "mesh and gossipsub updated when topic subscribed and unsubscribed":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# When all of them are connected and subscribed to the same topic
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then mesh and gossipsub should be populated
for node in nodes:
check node.topics.contains(topic)
check node.gossipsub.hasKey(topic)
check node.gossipsub[topic].len() == numberOfNodes - 1
check node.mesh.hasKey(topic)
check node.mesh[topic].len() == numberOfNodes - 1
# When all nodes unsubscribe from the topic
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then the topic should be removed from mesh and gossipsub
for node in nodes:
check topic notin node.topics
check topic notin node.mesh
check topic notin node.gossipsub
asyncTest "handle subscribe and unsubscribe for multiple topics":
let
numberOfNodes = 3
topics = @["foobar1", "foobar2", "foobar3"]
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# When nodes subscribe to multiple topics
await connectNodesStar(nodes)
for topic in topics:
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then all nodes should be subscribed to the topics initially
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(it.topics.contains(topic))
nodes.allIt(it.gossipsub.getOrDefault(topic).len() == numberOfNodes - 1)
nodes.allIt(it.mesh.getOrDefault(topic).len() == numberOfNodes - 1)
# When they unsubscribe from all topics
for topic in topics:
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
# Then topics should be removed from mesh and gossipsub
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(not it.topics.contains(topic))
nodes.allIt(topic notin it.gossipsub)
nodes.allIt(topic notin it.mesh)
asyncTest "Unsubscribe backoff":
const
numberOfNodes = 3
topic = "foobar"
unsubscribeBackoff = 1.seconds # 1s is the minimum
let nodes = generateNodes(
numberOfNodes, gossip = true, unsubscribeBackoff = unsubscribeBackoff
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
# When Node0 unsubscribes from the topic
nodes[0].unsubscribe(topic, voidTopicHandler)
# And subscribes back straight away
nodes[0].subscribe(topic, voidTopicHandler)
# Then its mesh is pruned and peers have applied unsubscribeBackoff
# Waiting more than one heartbeat (60ms) and less than unsubscribeBackoff (1s)
await sleepAsync(unsubscribeBackoff.div(2))
check:
not nodes[0].mesh.hasKey(topic)
# When unsubscribeBackoff period is done
await sleepAsync(unsubscribeBackoff)
# Then on the next heartbeat mesh is rebalanced and peers are regrafted
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
asyncTest "Prune backoff":
const
numberOfNodes = 9
topic = "foobar"
pruneBackoff = 1.seconds # 1s is the minimum
dValues = some(
DValues(
dLow: some(6),
dHigh: some(8),
d: some(6),
dLazy: some(6),
dScore: some(4),
dOut: some(2),
)
)
let
nodes = generateNodes(
numberOfNodes, gossip = true, dValues = dValues, pruneBackoff = pruneBackoff
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# When DValues of Node0 are updated to lower than initial dValues
const newDValues = some(
DValues(
dLow: some(2),
dHigh: some(4),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
)
node0.parameters.applyDValues(newDValues)
# Then Node0 mesh is pruned to newDValues.dHigh length
# And pruned peers have applied pruneBackoff
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# When DValues of Node0 are updated back to the initial dValues
node0.parameters.applyDValues(dValues)
# Waiting more than one heartbeat (60ms) and less than pruneBackoff (1s)
await sleepAsync(pruneBackoff.div(2))
check:
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# When pruneBackoff period is done
await sleepAsync(pruneBackoff)
# Then on the next heartbeat mesh is rebalanced and peers are regrafted to the initial d value
check:
node0.mesh.getOrDefault(topic).len == dValues.get.d.get

View File

@@ -0,0 +1,302 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
import ../../libp2p/protocols/pubsub/rpc/[messages, message]
import ../helpers
suite "GossipSub Message Cache":
teardown:
checkTrackers()
asyncTest "Received messages are added to the message cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
asyncTest "Message cache history shifts on heartbeat and is cleared on shift":
const
numberOfNodes = 2
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# When heartbeat happens, circular history shifts to the next position
# Waiting for 5(historyLength) heartbeats
await waitForHeartbeat(historyLength)
# Then history is cleared when the position with the message is reached again
# And message is removed
check:
nodes[1].mcache.window(topic).len == 0
not nodes[1].mcache.contains(messageId)
asyncTest "IHave propagation capped by history window":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to NodeOutsideMesh for received IHave messages
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
nodeOutsideMesh.addOnRecvObserver(checkForIHaves)
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# On each heartbeat, Node0 retrieves messages in its mcache and sends IHave to NodeOutsideMesh
# On heartbeat, Node0 mcache advances to the next position (rotating the message cache window)
# Node0 will gossip about messages from the last few positions, depending on the mcache window size (historyGossip)
# By waiting more than 'historyGossip' (2x3 = 6) heartbeats, we ensure Node0 does not send IHave messages for messages older than the window size
await waitForHeartbeat(2 * historyGossip)
# Then nodeInsideMesh receives 3 (historyGossip) IHave messages
check:
receivedIHaves[].len == historyGossip
asyncTest "Message is retrieved from cache when handling IWant and relayed to a peer outside the mesh":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to Node0 for received IWant messages
var (receivedIWantsNode0, checkForIWant) = createCheckForIWant()
nodes[0].addOnRecvObserver(checkForIWant)
# Find Peer outside of mesh to which Node 0 will relay received message
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
# Add observer to NodeOutsideMesh for received messages
var (receivedMessagesNodeOutsideMesh, checkForMessage) = createCheckForMessages()
nodeOutsideMesh.addOnRecvObserver(checkForMessage)
# When NodeInsideMesh publishes a message to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, "Hello!".toBytes()), 1
# Then Node0 receives the message from NodeInsideMesh and saves it in its cache
checkUntilTimeout:
nodes[0].mcache.window(topic).len == 1
let messageId = nodes[0].mcache.window(topic).toSeq()[0]
# When Node0 sends an IHave message to NodeOutsideMesh during a heartbeat
# Then NodeOutsideMesh responds with an IWant message to Node0
checkUntilTimeout:
receivedIWantsNode0[].anyIt(messageId in it.messageIDs)
# When Node0 handles the IWant message, it retrieves the message from its message cache using the MessageId
# Then Node0 relays the original message to NodeOutsideMesh
checkUntilTimeout:
messageId in
receivedMessagesNodeOutsideMesh[].mapIt(
nodeOutsideMesh.msgIdProvider(it).value()
)
asyncTest "Published and received messages are added to the seen cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message
# Get messageId from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# And both nodes save it in their seen cache
# Node0 when publish, Node1 when received
check:
nodes[0].hasSeen(nodes[0].salt(messageId))
nodes[1].hasSeen(nodes[1].salt(messageId))
asyncTest "Received messages are dropped if they are already in seen cache":
# 3 Nodes, Node 0 <==> Node 1 and Node 2 not connected and not subscribed yet
const
numberOfNodes = 3
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes two messages to the topic
tryPublish await nodes[0].publish(topic, "Hello".toBytes()), 1
tryPublish await nodes[0].publish(topic, "World".toBytes()), 1
# Then Node1 receives the messages
# Getting messageIds from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 2
let messageId1 = nodes[1].mcache.window(topic).toSeq()[0]
let messageId2 = nodes[1].mcache.window(topic).toSeq()[1]
# And Node0 doesn't receive messages
check:
nodes[2].mcache.window(topic).len == 0
# When Node2 connects with Node0 and subscribes to the topic
await connectNodes(nodes[0], nodes[2])
nodes[2].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# And messageIds are added to node0PeerNode2 sentIHaves to allow processing IWant
# let node0PeerNode2 =
let node0PeerNode2 = nodes[0].getPeerByPeerId(topic, nodes[2].peerInfo.peerId)
node0PeerNode2.sentIHaves[0].incl(messageId1)
node0PeerNode2.sentIHaves[0].incl(messageId2)
# And messageId1 is added to seen messages cache of Node2
check:
not nodes[2].addSeen(nodes[2].salt(messageId1))
# And Node2 sends IWant to Node0 requesting both messages
let iWantMessage =
ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageId1, messageId2])])
let node2PeerNode0 = nodes[2].getPeerByPeerId(topic, nodes[0].peerInfo.peerId)
nodes[2].broadcast(
@[node2PeerNode0], RPCMsg(control: some(iWantMessage)), isHighPriority = false
)
await waitForHeartbeat()
# Then Node2 receives only messageId2 and messageId1 is dropped
check:
nodes[2].mcache.window(topic).len == 1
nodes[2].mcache.window(topic).toSeq()[0] == messageId2
asyncTest "Published messages are dropped if they are already in seen cache":
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok("fixed_message_id_string".toBytes())
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(
numberOfNodes, gossip = true, msgIdProvider = customMsgIdProvider
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
await waitForHeartbeat()
# Given Node0 has msgId already in seen cache
let data = "Hello".toBytes()
let msg = Message.init(
some(nodes[0].peerInfo), data, topic, some(nodes[0].msgSeqno), nodes[0].sign
)
let msgId = nodes[0].msgIdProvider(msg)
check:
not nodes[0].addSeen(nodes[0].salt(msgId.value()))
# When Node0 publishes the message to the topic
discard await nodes[0].publish(topic, data)
await waitForHeartbeat()
# Then Node1 doesn't receive the message
check:
nodes[1].mcache.window(topic).len == 0

View File

@@ -0,0 +1,841 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils, enumerate]
import stew/byteutils
import utils
import sugar
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
import ../helpers, ../utils/[futures]
const MsgIdSuccess = "msg id gen success"
proc setupTest(): Future[
tuple[
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
]
] {.async.} =
let nodes = generateNodes(2, gossip = true, verifySignature = false)
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await nodes[1].switch.connect(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip0: GossipSub = GossipSub(nodes[0])
var gossip1: GossipSub = GossipSub(nodes[1])
return (gossip0, gossip1, receivedMessages)
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
proc createMessages(
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
var iwantMessageIds = newSeq[MessageId]()
var sentMessages = initHashSet[seq[byte]]()
for i, size in enumerate([size1, size2]):
let data = newSeqWith(size, i.byte)
sentMessages.incl(data)
let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
iwantMessageIds.add(iwantMessageId)
gossip1.mcache.put(iwantMessageId, msg)
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
peer.sentIHaves[^1].incl(iwantMessageId)
return (iwantMessageIds, sentMessages)
suite "GossipSub Message Handling":
teardown:
checkTrackers()
asyncTest "Drop messages of topics without subscription":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
let peer = peers[i]
inc seqno
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
asyncTest "subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
for i in 0 .. gossipSub.topicsHigh + 10:
tooManyTopics &= "topic" & $i
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
let conn = TestBufferStream.new(noop)
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
peer.behaviourPenalty > 0.0
await conn.close()
await gossipSub.switch.stop()
asyncTest "invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
expect(CatchableError):
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
await gossipSub.switch.stop()
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize div 2 + 1
let (iwantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
await sleepAsync(300.milliseconds)
checkUntilTimeout:
receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let size1 = gossip1.maxMessageSize div 2
let size2 = gossip1.maxMessageSize div 3
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let maxSize = gossip1.maxMessageSize
let size1 = maxSize div 2
let size2 = maxSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
if seqs[0] < seqs[1]:
smallestSet.incl(seqs[0])
else:
smallestSet.incl(seqs[1])
checkUntilTimeout:
receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)
asyncTest "messages are not sent back to source or forwarding peer":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
let (handlerFut0, handler0) = createCompleteHandler()
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are connected in a ring
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
await connectNodes(nodes[2], nodes[0])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
await waitForHeartbeat()
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
let results =
await waitForStates(@[handlerFut0, handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
check:
results[0].isPending()
results[1].isCompleted()
results[2].isCompleted()
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], "foobar")
subs &= waitSub(nodes[0], nodes[1], "foobar")
await allFuturesThrowing(subs)
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
check topic == "foobar"
validatorFut.complete(true)
result = ValidationResult.Accept
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) and (await handlerFut)
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = ValidationResult.Reject
validatorFut.complete(true)
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) == true
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = ValidationResult.Ignore
validatorFut.complete(true)
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) == true
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foo", handler)
nodes[1].subscribe("bar", handler)
var passed, failed: Future[bool] = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result =
if topic == "foo":
passed.complete(true)
ValidationResult.Accept
else:
failed.complete(true)
ValidationResult.Reject
nodes[1].addValidator("foo", "bar", validator)
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
check ((await passed) and (await failed) and (await handlerFut))
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
"foo" notin gossip1.mesh and gossip1.fanout["foo"].len == 1
"foo" notin gossip2.mesh and "foo" notin gossip2.fanout
"bar" notin gossip1.mesh and gossip1.fanout["bar"].len == 1
"bar" notin gossip2.mesh and "bar" notin gossip2.fanout
asyncTest "GossipSub's observers should run after message is sent, received and validated":
var
recvCounter = 0
sendCounter = 0
validatedCounter = 0
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
inc recvCounter
proc onSend(peer: PubSubPeer, msgs: var RPCMsg) =
inc sendCounter
proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) =
inc validatedCounter
let obs0 = PubSubObserver(onSend: onSend)
let obs1 = PubSubObserver(onRecv: onRecv, onValidated: onValidated)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].addObserver(obs0)
nodes[1].addObserver(obs1)
nodes[1].subscribe("foo", voidTopicHandler)
nodes[1].subscribe("bar", voidTopicHandler)
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = if topic == "foo": ValidationResult.Accept else: ValidationResult.Reject
nodes[1].addValidator("foo", "bar", validator)
# Send message that will be accepted by the receiver's validator
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
check:
recvCounter == 1
validatedCounter == 1
sendCounter == 1
# Send message that will be rejected by the receiver's validator
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
checkUntilTimeout:
recvCounter == 2
validatedCounter == 1
sendCounter == 2
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check await passed
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
check:
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
# 3 nodes: A, B, C
# A publishes, C relays, B is having a long validation
# so B should not send to anyone
let nodes = generateNodes(3, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var cRelayed: Future[void] = newFuture[void]()
var bFinished: Future[void] = newFuture[void]()
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
nodes[2].subscribe("foobar", handlerC)
await waitSubGraph(nodes, "foobar")
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
var gossip3: GossipSub = GossipSub(nodes[2])
proc slowValidator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
try:
await cRelayed
# Empty A & C caches to detect duplicates
gossip1.seen = TimedCache[SaltedId].init()
gossip3.seen = TimedCache[SaltedId].init()
let msgId = toSeq(gossip2.validationSeen.keys)[0]
checkUntilTimeout(
try:
gossip2.validationSeen[msgId].len > 0
except KeyError:
false
)
result = ValidationResult.Accept
bFinished.complete()
except CatchableError:
raiseAssert "err on slowValidator"
nodes[1].addValidator("foobar", slowValidator)
checkUntilTimeout:
gossip1.mesh.getOrDefault("foobar").len == 2
gossip2.mesh.getOrDefault("foobar").len == 2
gossip3.mesh.getOrDefault("foobar").len == 2
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
await bFinished
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
var gossip1: GossipSub = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
var gossip2: GossipSub = GossipSub(nodes[1])
gossip2.parameters.floodPublish = true
await connectNodesStar(nodes)
# nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check await passed.wait(10.seconds)
check:
"foobar" in gossip1.gossipsub
"foobar" notin gossip2.gossipsub
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "e2e - GossipSub floodPublish limit":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
gossip1.parameters.heartbeatInterval = milliseconds(700)
startNodesAndDeferStop(nodes)
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
gossip1.parameters.heartbeatInterval = milliseconds(700)
gossip1.parameters.bandwidthEstimatebps = 0
startNodesAndDeferStop(nodes)
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
asyncTest "e2e - GossipSub with multiple peers":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1
await wait(seenFut, 1.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
asyncTest "e2e - GossipSub with multiple peers (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesSparse(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
try:
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
except KeyError:
raiseAssert "seen checked before"
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1
await wait(seenFut, 60.seconds)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesSparse(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
let dgossip = GossipSub(dialer)
dgossip.parameters.dHigh = 2
dgossip.parameters.dLow = 1
dgossip.parameters.d = 1
dgossip.parameters.dOut = 1
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
info "seen up", count = seen.len
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
let publishedTo = nodes[0].publish(
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
).await
check:
publishedTo != 0
publishedTo != runs
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
asyncTest "GossipSub directPeers: always forward messages":
let nodes = generateNodes(3, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
await GossipSub(nodes[2]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
nodes[1].subscribe("foobar", noop)
nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
check "foobar" notin GossipSub(nodes[2]).mesh
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
# Given 2 nodes
let
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true)
node0 = nodes[0]
node1 = nodes[1]
g0 = GossipSub(node0)
g1 = GossipSub(node1)
startNodesAndDeferStop(nodes)
# With message observers
var
messageReceived0 = newFuture[bool]()
messageReceived1 = newFuture[bool]()
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived0.complete(true)
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived1.complete(true)
node0.addObserver(PubSubObserver(onRecv: observer0))
node1.addObserver(PubSubObserver(onRecv: observer1))
# Connect them as direct peers
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
# When node 0 sends a message
let message = "Hello!".toBytes()
let publishResult = await node0.publish("foobar", message)
# None should receive the message as they are not subscribed to the topic
let results = await waitForStates(@[messageReceived0, messageReceived1])
check:
publishResult == 0
results[0].isPending()
results[1].isPending()

View File

@@ -1,361 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import sequtils, tables, sets, sugar
import chronos, stew/byteutils
import chronicles
import metrics
import
utils,
../../libp2p/[
protocols/pubsub/pubsub,
protocols/pubsub/gossipsub,
protocols/pubsub/peertable,
protocols/pubsub/rpc/messages,
]
import ../helpers, ../utils/[futures]
from ../../libp2p/protocols/pubsub/mcache import window
proc voidTopicHandler(topic: string, data: seq[byte]) {.async.} =
discard
proc createCompleteHandler(): (
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
) =
var fut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
fut.complete(true)
return (fut, handler)
proc addIHaveObservers(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
let numberOfNodes = nodes.len
receivedIHaves[] = repeat(0, numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iHave = msgs.control.get.ihave
if iHave.len > 0:
for msg in iHave:
if msg.topicID == topic:
receivedIHaves[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
nodes[i].addObserver(pubsubObserver)
proc addIDontWantObservers(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
let numberOfNodes = nodes.len
receivedIDontWants[] = repeat(0, numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iDontWant = msgs.control.get.idontwant
if iDontWant.len > 0:
receivedIDontWants[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
nodes[i].addObserver(pubsubObserver)
suite "Gossipsub Parameters":
teardown:
checkTrackers()
asyncTest "dont prune peers if mesh len is less than d_high":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitSubAllNodes(nodes, topic)
let expectedNumberOfPeers = numberOfNodes - 1
for i in 0 ..< numberOfNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len == expectedNumberOfPeers
gossip.fanout.len == 0
asyncTest "prune peers if mesh len is higher than d_high":
let
numberofNodes = 15
topic = "foobar"
nodes = generateNodes(numberofNodes, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitSubAllNodes(nodes, topic)
# Give it time for a heartbeat
await sleepAsync(DURATION_TIMEOUT_EXTENDED)
let
expectedNumberOfPeers = numberofNodes - 1
dHigh = 12
d = 6
dLow = 4
for i in 0 ..< numberofNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
gossip.fanout.len == 0
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
let
numberOfNodes = 5
topic = "foobar"
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are interconnected
await connectNodesStar(nodes)
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
await sleepAsync(DURATION_TIMEOUT)
# At least one of the nodes should have received an iHave message
# The check is made this way because the mesh structure changes from run to run
let receivedIHaves = receivedIHavesRef[]
check:
anyIt(receivedIHavesRef[], it > 0)
asyncTest "messages are not sent back to source or forwarding peer":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
let (handlerFut0, handler0) = createCompleteHandler()
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are connected in a ring
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
await connectNodes(nodes[2], nodes[0])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
await sleepAsync(DURATION_TIMEOUT)
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
let results = await waitForStates(@[handlerFut0, handlerFut1, handlerFut2])
check:
results[0].isPending()
results[1].isCompleted()
results[2].isCompleted()
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
g0 = GossipSub(nodes[0])
startNodesAndDeferStop(nodes)
# Nodes 1 and 2 are connected to node 0
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[0], nodes[2])
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are subscribed to the same topic
nodes[1].subscribe(topic, handler1)
nodes[2].subscribe(topic, handler2)
await sleepAsync(1.seconds)
# Given node 2's score is below the threshold
for peer in g0.gossipsub.getOrDefault(topic):
if peer.peerId == nodes[2].peerInfo.peerId:
peer.score = (g0.parameters.publishThreshold - 1)
# When node 0 publishes a message to topic "foo"
let message = "Hello!".toBytes()
check (await nodes[0].publish(topic, message)) == 1
await sleepAsync(3.seconds)
# Then only node 1 should receive the message
let results = await waitForStates(@[handlerFut1, handlerFut2])
check:
results[0].isCompleted(true)
results[1].isPending()
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
await sleepAsync(DURATION_TIMEOUT)
# None of the nodes should have received an iHave message
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len == 0
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
)
nodes = generateNodes(
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
await sleepAsync(DURATION_TIMEOUT)
# At least 8 of the nodes should have received an iHave message
# That's because the gossip factor is 0.5 over 16 available nodes
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len >= 8
asyncTest "adaptive gossip dissemination, with dLazy priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
await sleepAsync(DURATION_TIMEOUT)
# At least 6 of the nodes should have received an iHave message
# That's because the dLazy is 6
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len == dValues.dLazy.get()
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
# All nodes are checking for iDontWant messages
var receivedIDontWantsRef = new seq[int]
addIDontWantObservers(nodes, receivedIDontWantsRef)
# And are connected in a line
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await sleepAsync(DURATION_TIMEOUT)
# When node 0 sends a large message
let largeMsg = newSeq[byte](1000)
check (await nodes[0].publish(topic, largeMsg)) == 1
await sleepAsync(DURATION_TIMEOUT)
# Only node 2 should have received the iDontWant message
let receivedIDontWants = receivedIDontWantsRef[]
check:
receivedIDontWants[0] == 0
receivedIDontWants[1] == 0
receivedIDontWants[2] == 1

View File

@@ -0,0 +1,395 @@
{.used.}
import unittest2
import chronos
import results
import ../../libp2p/protocols/pubsub/gossipsub/[types]
import ../../libp2p/protocols/pubsub/[gossipsub, pubsubpeer]
import ../../libp2p/[peerid, multiaddress]
suite "GossipSubParams validation":
proc newDefaultValidParams(): GossipSubParams =
result = GossipSubParams.init()
test "default parameters are valid":
var params = newDefaultValidParams()
check params.validateParameters().isOk()
test "dOut fails when equal to dLow":
const errorMessage =
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
var params = newDefaultValidParams()
params.dLow = 4
params.d = 8
params.dOut = params.dLow
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "dOut fails when bigger than d/2":
const errorMessage =
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
var params = newDefaultValidParams()
params.dLow = 4
params.d = 5
params.dOut = 3
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "dOut succeeds when less than dLow and equals d/2":
var params = newDefaultValidParams()
params.dLow = 4
params.d = 6
params.dOut = 3
check params.validateParameters().isOk()
test "gossipThreshold fails when zero":
const errorMessage = "gossipsub: gossipThreshold parameter error, Must be < 0"
var params = newDefaultValidParams()
params.gossipThreshold = 0.0
var res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "gossipThreshold succeeds when negative":
var params = newDefaultValidParams()
params.gossipThreshold = -0.1
check params.validateParameters().isOk()
test "unsubscribeBackoff fails when zero":
const errorMessage =
"gossipsub: unsubscribeBackoff parameter error, Must be > 0 seconds"
var params = newDefaultValidParams()
params.unsubscribeBackoff = 0.seconds
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "unsubscribeBackoff succeeds when positive":
var params = newDefaultValidParams()
params.unsubscribeBackoff = 1.seconds
check params.validateParameters().isOk()
test "publishThreshold fails when equal to gossipThreshold":
const errorMessage =
"gossipsub: publishThreshold parameter error, Must be < gossipThreshold"
var params = newDefaultValidParams()
params.publishThreshold = params.gossipThreshold
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "publishThreshold succeeds when less than gossipThreshold":
var params = newDefaultValidParams()
params.publishThreshold = params.gossipThreshold - 1.0
check params.validateParameters().isOk()
test "graylistThreshold fails when equal to publishThreshold":
const errorMessage =
"gossipsub: graylistThreshold parameter error, Must be < publishThreshold"
var params = newDefaultValidParams()
params.graylistThreshold = params.publishThreshold
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "graylistThreshold succeeds when less than publishThreshold":
var params = newDefaultValidParams()
params.graylistThreshold = params.publishThreshold - 1.0
check params.validateParameters().isOk()
test "acceptPXThreshold fails when negative":
const errorMessage = "gossipsub: acceptPXThreshold parameter error, Must be >= 0"
var params = newDefaultValidParams()
params.acceptPXThreshold = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "acceptPXThreshold succeeds when zero":
var params = newDefaultValidParams()
params.acceptPXThreshold = 0.0
check params.validateParameters().isOk()
test "opportunisticGraftThreshold fails when negative":
const errorMessage =
"gossipsub: opportunisticGraftThreshold parameter error, Must be >= 0"
var params = newDefaultValidParams()
params.opportunisticGraftThreshold = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "opportunisticGraftThreshold succeeds when zero":
var params = newDefaultValidParams()
params.opportunisticGraftThreshold = 0.0
check params.validateParameters().isOk()
test "decayToZero fails when greater than 0.5":
const errorMessage =
"gossipsub: decayToZero parameter error, Should be close to 0.0"
var params = newDefaultValidParams()
params.decayToZero = 0.51
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "decayToZero fails when zero":
const errorMessage =
"gossipsub: decayToZero parameter error, Should be close to 0.0"
var params = newDefaultValidParams()
params.decayToZero = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "decayToZero succeeds when exactly 0.5":
var params = newDefaultValidParams()
params.decayToZero = 0.5
check params.validateParameters().isOk()
test "decayToZero succeeds when small positive value":
var params = newDefaultValidParams()
params.decayToZero = 0.00001
check params.validateParameters().isOk()
test "appSpecificWeight fails when negative":
const errorMessage =
"gossipsub: appSpecificWeight parameter error, Must be positive"
var params = newDefaultValidParams()
params.appSpecificWeight = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "appSpecificWeight succeeds when zero":
var params = newDefaultValidParams()
params.appSpecificWeight = 0.0
check params.validateParameters().isOk()
test "ipColocationFactorWeight fails when positive":
const errorMessage =
"gossipsub: ipColocationFactorWeight parameter error, Must be negative or 0"
var params = newDefaultValidParams()
params.ipColocationFactorWeight = 0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "ipColocationFactorWeight succeeds when zero":
var params = newDefaultValidParams()
params.ipColocationFactorWeight = 0.0
check params.validateParameters().isOk()
test "ipColocationFactorWeight succeeds when negative":
var params = newDefaultValidParams()
params.ipColocationFactorWeight = -10.0
check params.validateParameters().isOk()
test "ipColocationFactorThreshold fails when less than 1":
const errorMessage =
"gossipsub: ipColocationFactorThreshold parameter error, Must be at least 1"
var params = newDefaultValidParams()
params.ipColocationFactorThreshold = 0.9
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "ipColocationFactorThreshold succeeds when exactly 1":
var params = newDefaultValidParams()
params.ipColocationFactorThreshold = 1.0
check params.validateParameters().isOk()
test "behaviourPenaltyWeight fails when zero":
const errorMessage =
"gossipsub: behaviourPenaltyWeight parameter error, Must be negative"
var params = newDefaultValidParams()
params.behaviourPenaltyWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyWeight succeeds when negative":
var params = newDefaultValidParams()
params.behaviourPenaltyWeight = -0.0001
check params.validateParameters().isOk()
test "behaviourPenaltyDecay fails when negative":
const errorMessage =
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyDecay fails when equal to 1":
const errorMessage =
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 1.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyDecay succeeds when zero":
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 0.0
check params.validateParameters().isOk()
test "behaviourPenaltyDecay succeeds when between 0 and 1":
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 0.5
check params.validateParameters().isOk()
test "maxNumElementsInNonPriorityQueue fails when zero":
const errorMessage =
"gossipsub: maxNumElementsInNonPriorityQueue parameter error, Must be > 0"
var params = newDefaultValidParams()
params.maxNumElementsInNonPriorityQueue = 0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "maxNumElementsInNonPriorityQueue succeeds when positive":
var params = newDefaultValidParams()
params.maxNumElementsInNonPriorityQueue = 1
check params.validateParameters().isOk()
suite "TopicParams validation":
proc newDefaultValidTopicParams(): TopicParams =
result = TopicParams.init()
test "default topic parameters are valid":
var params = newDefaultValidTopicParams()
check params.validateParameters().isOk()
test "timeInMeshWeight fails when zero":
const errorMessage =
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshWeight fails when greater than 1":
const errorMessage =
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 1.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshWeight succeeds when exactly 1":
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 1.0
check params.validateParameters().isOk()
test "timeInMeshWeight succeeds when small positive value":
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 0.01
check params.validateParameters().isOk()
test "timeInMeshCap fails when zero":
const errorMessage =
"gossipsub: timeInMeshCap parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshCap = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshCap succeeds when positive":
var params = newDefaultValidTopicParams()
params.timeInMeshCap = 10.0
check params.validateParameters().isOk()
test "firstMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: firstMessageDeliveriesWeight parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.firstMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "firstMessageDeliveriesWeight succeeds when positive":
var params = newDefaultValidTopicParams()
params.firstMessageDeliveriesWeight = 1.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: meshMessageDeliveriesWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesWeight = -1.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesThreshold fails when zero":
const errorMessage =
"gossipsub: meshMessageDeliveriesThreshold parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesThreshold succeeds when positive":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 5.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesCap fails when less than threshold":
const errorMessage =
"gossipsub: meshMessageDeliveriesCap parameter error, Should be >= meshMessageDeliveriesThreshold"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 10.0
params.meshMessageDeliveriesCap = 9.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesCap succeeds when equal to threshold":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 10.0
params.meshMessageDeliveriesCap = 10.0
check params.validateParameters().isOk()
test "meshFailurePenaltyWeight fails when zero":
const errorMessage =
"gossipsub: meshFailurePenaltyWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.meshFailurePenaltyWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshFailurePenaltyWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.meshFailurePenaltyWeight = -1.0
check params.validateParameters().isOk()
test "invalidMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: invalidMessageDeliveriesWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.invalidMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "invalidMessageDeliveriesWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.invalidMessageDeliveriesWeight = -1.0
check params.validateParameters().isOk()

View File

@@ -0,0 +1,409 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import metrics
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../../libp2p/muxers/muxer
import ../helpers, ../utils/[futures]
suite "GossipSub Scoring":
teardown:
checkTrackers()
asyncTest "Disconnect bad peers":
let topic = "foobar"
var (gossipSub, conns, peers) =
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
for i, peer in peers:
peer.appScore = gossipSub.parameters.graylistThreshold - 1
let conn = conns[i]
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
gossipSub.updateScores()
await sleepAsync(100.millis)
check:
# test our disconnect mechanics
gossipSub.gossipsub.peers(topic) == 0
# also ensure we cleanup properly the peersInIP table
gossipSub.peersInIP.len == 0
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
g0 = GossipSub(nodes[0])
startNodesAndDeferStop(nodes)
# Nodes 1 and 2 are connected to node 0
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[0], nodes[2])
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are subscribed to the same topic
nodes[1].subscribe(topic, handler1)
nodes[2].subscribe(topic, handler2)
await waitForHeartbeat()
# Given node 2's score is below the threshold
for peer in g0.gossipsub.getOrDefault(topic):
if peer.peerId == nodes[2].peerInfo.peerId:
peer.score = (g0.parameters.publishThreshold - 1)
# When node 0 publishes a message to topic "foo"
let message = "Hello!".toBytes()
check (await nodes[0].publish(topic, message)) == 1
await waitForHeartbeat(2)
# Then only node 1 should receive the message
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted(true)
results[1].isPending()
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
let nodes =
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
await startNodes(nodes)
await connectNodesStar(nodes)
proc handle(topic: string, data: seq[byte]) {.async.} =
discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
gossip0.subscribe("foobar", handle)
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
try:
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
)
except KeyError:
0
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
isHighPriority = true,
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
isHighPriority = true,
)
await waitForHeartbeat()
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
# Simulate sending an undecodable message
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
newSeqWith(33, 1.byte), isHighPriority = true
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
newSeqWith(35, 1.byte), isHighPriority = true
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let msg = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: "foobar",
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
backoff: 123'u64,
)
]
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
let msg2 = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: "foobar",
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
backoff: 123'u64,
)
]
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let topic = "foobar"
proc execValidator(
topic: string, message: messages.Message
): Future[ValidationResult] {.async: (raw: true).} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
isHighPriority = true,
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
GossipSub(nodes[1]).updateScores()
# peer shouldn't be in our mesh
check:
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
GossipSub(nodes[1]).parameters.graylistThreshold
GossipSub(nodes[1]).updateScores()
handlerFut = newFuture[void]()
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
# Without directPeers, this would fail
await handlerFut.wait(1.seconds)
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1, 5.seconds, 3.minutes
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
# Removing some subscriptions
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].unsubscribeAll("foobar")
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].subscribe("foobar", handler)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
asyncTest "GossipSub scoring - decayInterval":
let nodes = generateNodes(2, gossip = true)
var gossip = GossipSub(nodes[0])
const testDecayInterval = 50.milliseconds
gossip.parameters.decayInterval = testDecayInterval
startNodesAndDeferStop(nodes)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
await handlerFut
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
100
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
# We should have decayed 5 times, though allowing 4..6
await sleepAsync(testDecayInterval * 5)
check:
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
50.0 .. 66.0

View File

@@ -12,7 +12,9 @@ import
protocols/pubsub/errors,
protocols/pubsub/rpc/message,
protocols/pubsub/rpc/messages,
protocols/pubsub/rpc/protobuf,
]
import ../utils/async_tests
let rng = newRng()
@@ -139,3 +141,34 @@ suite "Message":
)
check byteSize(rpcMsg) == 28 + 32 + 2 + 2 + 38 # Total: 102 bytes
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
# check value before & after decoding equal using protoc cmd tool for reference
asyncTest "ControlMessage RPCMsg encoding and decoding":
let id: seq[byte] = @[123]
let message = RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
iwant: @[ControlIWant(messageIDs: @[id])],
graft: @[ControlGraft(topicID: "foobar")],
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
idontwant: @[ControlIWant(messageIDs: @[id])],
)
)
)
#data encoded using protoc cmd tool
let expectedEncoded: seq[byte] =
@[
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
97, 114, 24, 10, 42, 3, 10, 1, 123,
]
let actualEncoded = encodeRpcMsg(message, true)
check:
actualEncoded == expectedEncoded
let actualDecoded = decodeRpcMsg(expectedEncoded).value
check:
actualDecoded == message

Some files were not shown because too many files have changed in this diff Show More