Compare commits

...

38 Commits

Author SHA1 Message Date
Gabriel Cruz
b755f9e13e feat(autotls): add AutoTLS manager 2025-06-13 11:58:08 -03:00
vladopajic
848fdde0a8 feat(perf): add stats (#1452) 2025-06-13 10:16:45 +00:00
Gabriel Cruz
31e7dc68e2 chore(peeridauth): add mocked client (#1458) 2025-06-12 21:11:36 +00:00
Ivan FB
08299a2059 chore: Add some more context when an exception is caught (#1432)
Co-authored-by: richΛrd <info@richardramos.me>
2025-06-12 14:38:25 +00:00
Gabriel Cruz
2f3156eafb fix(daily): fix typo in testintegration (#1463) 2025-06-12 09:26:46 -03:00
Radosław Kamiński
72e85101b0 test(gossipsub): refactor and unify scoring tests (#1461) 2025-06-12 08:18:01 +00:00
Gabriel Cruz
d205260a3e chore(acme): add MockACMEApi for testing (#1457) 2025-06-11 18:59:29 +00:00
Radosław Kamiński
97e576d146 test: increase timeout (#1460) 2025-06-11 14:19:33 +00:00
richΛrd
888cb78331 feat(kad-dht): protobuffers (#1453) 2025-06-11 12:56:02 +00:00
richΛrd
1d4c261d2a feat: withWsTransport (#1398) 2025-06-10 22:32:55 +00:00
Gabriel Cruz
83de0c0abd feat(peeridauth): add peeridauth (#1445) 2025-06-10 10:25:34 -03:00
AkshayaMani
c501adc9ab feat(gossipsub): Add support for custom connection handling (Mix protocol integration) (#1420)
Co-authored-by: Ben-PH <benphawke@gmail.com>
2025-06-09 13:36:06 -04:00
Radosław Kamiński
f9fc24cc08 test(gossipsub): flaky tests (#1451) 2025-06-09 17:20:49 +01:00
richΛrd
cd26244ccc chore(quic): add libp2p_network_bytes metric (#1439)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-09 09:42:52 -03:00
vladopajic
cabab6aafe chore(gossipsub): add consts (#1447)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-06 14:33:38 +00:00
Radosław Kamiński
fb42a9b4aa test(gossipsub): parameters (#1442)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-06 14:09:55 +00:00
Radosław Kamiński
141f4d9116 fix(GossipSub): save sent iHave in first element (#1405) 2025-06-06 10:27:59 +00:00
Gabriel Cruz
cb31152b53 feat(autotls): add acme client (#1436) 2025-06-05 17:47:02 +00:00
Radosław Kamiński
3a7745f920 test(gossipsub): message cache (#1431) 2025-06-03 15:18:29 +01:00
Radosław Kamiński
a89916fb1a test: checkUntilTimeout refactor (#1437) 2025-06-03 13:31:34 +01:00
vladopajic
c6cf46c904 fix(ci-daily): delete cache action will continue on error (#1435) 2025-06-02 17:08:31 +02:00
Gabriel Cruz
b28a71ab13 chore(readme): improve README's development section (#1427) 2025-05-29 17:51:29 +00:00
vladopajic
95b9859bcd chore(interop): move interop code to separate folder (#1413) 2025-05-29 16:14:12 +00:00
vladopajic
9e599753af ci(daily): add pinned dependencies variant (#1418) 2025-05-29 15:27:06 +00:00
richΛrd
2e924906bb chore: bump quic (#1428) 2025-05-29 14:25:02 +00:00
Radosław Kamiński
e811c1ad32 fix(gossipsub): save iDontWants messages in the first element of history (#1393) 2025-05-29 13:33:51 +01:00
Radosław Kamiński
86695b55bb test(gossipsub): include missing test files and handle flaky tests (#1416)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-05-29 12:44:21 +01:00
vladopajic
8c3a4d882a ci(dependencies): fix access to tokens (#1421) 2025-05-29 00:27:36 +00:00
richΛrd
4bad343ddc fix: limit chronicles version to < 0.11.0 (#1423) 2025-05-28 21:00:41 -03:00
vladopajic
47b8a05c32 ci(daily): improvements (#1404) 2025-05-27 14:41:53 +00:00
Radosław Kamiński
4e6f4af601 test(gossipsub): heartbeat tests (#1391) 2025-05-27 10:28:12 +01:00
Miran
7275f6f9c3 chore: unused imports are now errors (#1399) 2025-05-26 21:36:08 +02:00
richΛrd
c3dae6a7d4 fix(quic): reset and mm for interop tests (#1397) 2025-05-26 12:16:17 -04:00
vladopajic
bb404eda4a fix(ci-daily): remove --solver flag (#1400) 2025-05-26 16:48:51 +02:00
richΛrd
584710bd80 chore: move -d:libp2p_quic_support flag to .nimble (#1392) 2025-05-26 08:57:26 -04:00
Radosław Kamiński
ad5eae9adf test(gossipsub): move and refactor control messages tests (#1380) 2025-05-22 15:10:37 +00:00
richΛrd
26fae7cd2d chore: bump quic (#1387) 2025-05-21 22:30:35 +00:00
Miran
87d6655368 chore: update more dependencies (#1374) 2025-05-21 21:46:09 +00:00
102 changed files with 5089 additions and 1565 deletions

View File

@@ -118,5 +118,5 @@ jobs:
nimble --version
gcc --version
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test

View File

@@ -51,7 +51,7 @@ jobs:
- name: Run test suite with coverage flags
run: |
export NIMFLAGS="-d:libp2p_quic_support --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
nimble testnative
nimble testpubsub
nimble testfilter

View File

@@ -6,9 +6,26 @@ on:
workflow_dispatch:
jobs:
test_amd64:
name: Daily amd64
test_amd64_latest:
name: Daily amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"

View File

@@ -4,6 +4,11 @@ name: Daily Common
on:
workflow_call:
inputs:
pinned_deps:
description: 'Should dependencies be installed from pinned file or use latest versions'
required: false
type: boolean
default: false
nim:
description: 'Nim Configuration'
required: true
@@ -17,20 +22,12 @@ on:
required: false
type: string
default: "[]"
use_sat_solver:
description: 'Install dependencies with SAT Solver'
required: false
type: boolean
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
delete_cache:
name: Delete github action's branch cache
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: snnaplab/delete-branch-cache-action@v1
@@ -81,8 +78,14 @@ jobs:
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |
nimble install_pinned
- name: Install dependencies (latest)
if: ${{ inputs.pinned_deps != 'true' }}
run: |
nimble install -y --depsOnly
@@ -91,11 +94,6 @@ jobs:
nim --version
nimble --version
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
dependency_solver="sat"
else
dependency_solver="legacy"
fi
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
nimble testintegration

View File

@@ -1,14 +0,0 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_nim_devel:
name: Daily Nim Devel
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['amd64']"

View File

@@ -10,6 +10,14 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"

View File

@@ -1,15 +0,0 @@
name: Daily SAT
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_amd64:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,13 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2 }}
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NWAKU }}
secret: ACTIONS_GITHUB_TOKEN_NWAKU
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIM_CODEX }}
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -32,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ matrix.target.token }}
token: ${{ secrets[matrix.target.secret] }}
- name: Checkout this ref in target repository
run: |

View File

@@ -27,7 +27,7 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
@@ -35,7 +35,7 @@ jobs:
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
extra-versions: ${{ github.workspace }}/interop/transport/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
@@ -48,12 +48,12 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}

View File

@@ -22,6 +22,6 @@ jobs:
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests tools *.nim*"
options: "examples libp2p tests interop tools *.nim*"
fail: true
suggest: true

11
.pinned
View File

@@ -1,6 +1,6 @@
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
@@ -8,12 +8,15 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#a6c30263c95fc5ddb2ef4d197c09b282555c06b0
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97

166
README.md
View File

@@ -20,14 +20,13 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Testing](#testing)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [License](#license)
## Background
@@ -39,20 +38,102 @@ This is a native Nim implementation, using [chronos](https://github.com/status-i
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
```
nimble install libp2p
```
## Getting Started
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)
## Development
Clone the repository and install the dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
nimble install -dy
```
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
Run unit tests:
```sh
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
nimble tasks
```
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## Modules
List of packages modules implemented in nim-libp2p:
@@ -111,71 +192,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
Run unit tests:
```sh
# run all the unit tests
nimble test
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## License
Licensed and distributed under either of

View File

@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")

View File

@@ -3,9 +3,7 @@
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`.
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.16.0`, as previously stated.
```sh
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our assistance.
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage
## Example
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
## Getting Started
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)

View File

@@ -11,7 +11,7 @@ RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240db
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev

View File

@@ -15,8 +15,7 @@ import
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../stubs/autonatclientstub
import ../errorhelpers
import ../../tests/[stubs/autonatclientstub, errorhelpers]
logScope:
topics = "hp interop node"
@@ -85,8 +84,8 @@ proc main() {.async.} =
debug "Dialing relay...", relayMA
let relayId = await switch.connect(relayMA).wait(30.seconds)
debug "Connected to relay", relayId
except AsyncTimeoutError:
raise newException(CatchableError, "Connection to relay timed out")
except AsyncTimeoutError as e:
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
@@ -104,7 +103,7 @@ proc main() {.async.} =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
@@ -131,8 +130,8 @@ try:
return "done"
discard waitFor(mainAsync().wait(4.minutes))
except AsyncTimeoutError:
error "Program execution timed out."
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg

View File

@@ -13,6 +13,6 @@ COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./tests/transport-interop/main.nim
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/tests/transport-interop/main"]
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -47,12 +47,9 @@ proc main() {.async.} =
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
)
of "ws":
discard switchBuilder
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
discard switchBuilder.withWsTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
)
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
else:
doAssert false
@@ -83,7 +80,7 @@ proc main() {.async.} =
try:
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
except Exception as e:
raise newException(CatchableError, e.msg)
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
let
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
dialingStart = Moment.now()
@@ -108,8 +105,8 @@ try:
return "done"
discard waitFor(mainAsync().wait(testTimeout))
except AsyncTimeoutError:
error "Program execution timed out."
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg

View File

@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew >= 0.4.0",
"websock", "unittest2", "results",
"https://github.com/status-im/nim-quic.git#a6c30263c95fc5ddb2ef4d197c09b282555c06b0"
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -30,7 +30,7 @@ proc runTest(filename: string, moreoptions: string = "") =
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r " & " tests/" & filename
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
@@ -62,6 +62,9 @@ task testfilter, "Run PKI filter test":
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task testintegration, "Runs integraion tests":
runTest("testintegration")
task test, "Runs the test suite":
runTest("testall")
exec "nimble testfilter"

480
libp2p/autotls/acme/api.nim Normal file
View File

@@ -0,0 +1,480 @@
import options, base64, sequtils, strutils, json
from times import DateTime, parse
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
import ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export ACMEError
const
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
Alg = "RS256"
DefaultChalCompletedRetries = 10
DefaultChalCompletedRetryTime = 1.seconds
DefaultFinalizeRetries = 10
DefaultFinalizeRetryTime = 1.seconds
DefaultRandStringSize = 256
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
type Domain* = string
type Kid* = string
type Nonce* = string
type SignedACMERequest* = string
type ACMEDirectory* = object
newNonce*: string
newOrder*: string
newAccount*: string
type ACMEApi* = ref object of RootObj
directory: ACMEDirectory
session: HttpSessionRef
acmeServerURL*: string
type HTTPResponse* = object
body*: JsonNode
headers*: HttpTable
type JWK = object
kty: string
n: string
e: string
# whether the request uses Kid or not
type ACMERequestType = enum
ACMEJwkRequest
ACMEKidRequest
type ACMERequestHeader = object
alg: string
typ: string
nonce: string
url: string
case kind: ACMERequestType
of ACMEJwkRequest:
jwk: JWK
of ACMEKidRequest:
kid: Kid
type ACMERegisterRequest* = object
termsOfServiceAgreed: bool
contact: seq[string]
type ACMEAccountStatus = enum
valid
deactivated
revoked
type ACMERegisterResponseBody = object
status*: ACMEAccountStatus
type ACMERegisterResponse* = object
kid*: Kid
status*: ACMEAccountStatus
type ACMEChallengeStatus* {.pure.} = enum
pending = "pending"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMEChallenge = object
url*: string
`type`*: string
status*: ACMEChallengeStatus
token*: string
type ACMEChallengeIdentifier = object
`type`: string
value: string
type ACMEChallengeRequest = object
identifiers: seq[ACMEChallengeIdentifier]
type ACMEChallengeResponseBody = object
status: ACMEChallengeStatus
authorizations: seq[string]
finalizeURL: string
type ACMEChallengeResponse* = object
status*: ACMEChallengeStatus
authorizations*: seq[string]
finalizeURL*: string
orderURL*: string
type ACMEChallengeResponseWrapper* = object
finalizeURL*: string
orderURL*: string
dns01*: ACMEChallenge
type ACMEAuthorizationsResponse* = object
challenges*: seq[ACMEChallenge]
type ACMECompletedResponse* = object
checkURL: string
type ACMEOrderStatus* {.pure.} = enum
pending = "pending"
ready = "ready"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMECheckKind* = enum
ACMEOrderCheck
ACMEChallengeCheck
type ACMECheckResponse* = object
case kind: ACMECheckKind
of ACMEOrderCheck:
orderStatus: ACMEOrderStatus
of ACMEChallengeCheck:
chalStatus: ACMEChallengeStatus
retryAfter: Duration
type ACMEFinalizeResponse* = object
status: ACMEOrderStatus
type ACMEOrderResponse* = object
certificate: string
expires: string
type ACMECertificateResponse* = object
rawCertificate: string
certificateExpiry: DateTime
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
method post*(
self: ACMEApi, url: Uri, payload: SignedACMERequest
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
method get*(
self: ACMEApi, url: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
proc new*(
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
let session = HttpSessionRef.new()
let directory = handleError("new API"):
let rawResponse =
await HttpClientRequestRef.get(session, acmeServerURL & "/directory").get().send()
let body = await rawResponse.getResponseBody()
body.to(ACMEDirectory)
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(self.directory.newNonce)
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, url: string, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone:
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: url,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: url,
kid: kid.get(),
)
method post*(
self: ACMEApi, uri: Uri, payload: SignedACMERequest
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
proc createSignedACMERequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[SignedACMERequest] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(url, key, needsJwk, kid)
handleError("createSignedACMERequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedACMERequest(
self.directory.newAccount, registerRequest, key, needsJwk = true
)
let acmeResponse = await self.post(self.directory.newAccount, payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedACMERequest(
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(self.directory.newOrder, payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len() == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalizeURL: challengeResponseBody.finalize,
orderURL: acmeResponse.headers.keyOrError("location"),
)
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(authorizations[0])
acmeResponse.body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
let challengeResponse = await self.requestNewOrder(domains, key, kid)
let authorizationsResponse =
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
return ACMEChallengeResponseWrapper(
finalizeURL: challengeResponse.finalize,
orderURL: challengeResponse.orderURL,
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
)
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc requestCompleted*(
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCompleted (send notify)"):
let payload =
await self.createSignedACMERequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: string,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.pending:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.valid:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: string,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.requestCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi, domain: Domain, finalizeURL: uri, key: KeyPair, kid: Kid
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let derCSR = createCSR(domain)
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
handleError("requestFinalize"):
let payload = await self.createSignedACMERequest(
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalizeURL, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
orderURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.valid:
return true
of ACMEOrderStatus.processing:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
raise newException(
ACMEError,
"Failed certificate finalization: expected 'valid', got '" &
$checkResponse.orderStatus & "'",
)
return false
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalizeURL: Uri,
orderURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(orderURL, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, orderURL: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get($orderURL)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, orderURL: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder($orderURL)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,38 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos, results
import ./api
type KeyAuthorization* = string
type ACMEClient* = object
api: ACMEApi
key*: KeyPair
kid*: Kid
proc new*(
T: typedesc[ACMEClient],
key: Opt[KeyPair] = Opt.none(KeyPair),
acmeServerURL: string = LetsEncryptURL,
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
let api = await ACMEApi.new()
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, self.rng[]).get()
let kid = await api.requestRegister(key)
T(api: api, key: key, kid: kid)
proc genKeyAuthorization*(self: ACMEClient, domains: seq[Domain]): KeyAuthorization =
let dns01 = self.api.requestChallenge(domains, self.key, self.kid)
base64UrlEncode(
@(sha256.digest((dns01.token & "." & thumbprint(self.key)).toByteSeq).data)
)

View File

@@ -0,0 +1,37 @@
import chronos, chronos/apps/http/httpclient, json
import ./api, ./utils
export api
type MockACMEApi* = ref object of ACMEApi
parent*: ACMEApi
mockedHeaders*: HttpTable
mockedBody*: JsonNode
proc new*(
T: typedesc[MockACMEApi]
): Future[MockACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
let directory = ACMEDirectory(
newNonce: LetsEncryptURL & "/new-nonce",
newOrder: LetsEncryptURL & "/new-order",
newAccount: LetsEncryptURL & "/new-account",
)
MockACMEApi(
session: HttpSessionRef.new(), directory: directory, acmeServerURL: LetsEncryptURL
)
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return self.acmeServerURL & "/acme/1234"
method post*(
self: MockACMEApi, uri: Uri, payload: SignedACMERequest
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)

View File

@@ -0,0 +1,48 @@
import base64, strutils, chronos/apps/http/httpclient, json
import ../../errors
import ../../transports/tls/certificate_ffi
type ACMEError* = object of LPError
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let responseBody = bytesToString(await response.getBodyBytes()).parseJson()
return responseBody
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
except Exception as exc: # this is required for nim 1.6
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
let personalizationStr = "libp2p_autotls"
if cert_init_drbg(
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to initialize certCtx")
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to generate cert key")
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")

238
libp2p/autotls/client.nim Normal file
View File

@@ -0,0 +1,238 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import
net,
results,
chronos,
chronicles,
bearssl/rand,
bio,
json,
sequtils,
chronos/apps/http/httpclient
import ./acme/client
import ../peeridauth/client
import ../nameresolving/dnsresolver
import ../wire
import ../crypto/crypto
import ../peerinfo
import ../utils/heartbeat
logScope:
topics = "libp2p autotls"
const
DefaultDnsServers =
@[
initTAddress("1.1.1.1:53"),
initTAddress("1.0.0.1:53"),
initTAddress("[2606:4700:4700::1111]:53"),
]
DefaultRenewCheckTime = 1.hours
DefaultDnsRetries = 10
DefaultDnsRetryTime = 1.seconds
type AutoTLSCertificate* = object
cert*: TLSCertificate
expiry*: Moment
type AutoTLSClient = ref object
bearer*: Opt[BearerToken]
peerInfo: Opt[PeerInfo]
peerIDAuthClient: PeerIDAuthClient
type AutoTLSManager* = object
autoTLSClient: AutoTLSClient
acmeClient: ACMEClient
cert*: Opt[AutoTLSCertificate]
certReady*: AsyncEvent
dnsResolver: DnsResolver
fut: Future[void]
ipAddress: Opt[IpAddress]
renewCheckTime: Duration
proc new*(
T: typedesc[AutoTLSManager],
acmeClient: ref ACMEClient = nil,
dnsResolver: DnsResolver = DnsResolver.new(DefaultDnsServers),
ipAddress: Opt[IpAddress] = Opt.none(IpAddress),
): AutoTLSManager =
T(
fut: nil,
cert: Opt.none(AutoTLSCertificate),
certReady: newAsyncEvent(),
acmeClient: acmeClient,
dnsResolver: dnsResolver,
peerInfo: Opt.none(PeerInfo),
bearerToken: Opt.none(BearerToken),
renewCheckTime: DefaultRenewCheckTime,
ipAddress: ipAddress,
)
proc checkDNSRecords(
self: AutoTLSManager,
ip4Domain: string,
acmeChalDomain: string,
keyAuthorization: KeyAuthorization
retries: int = DefaultDnsRetries,
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
var txt: seq[string]
var ip4: seq[TransportAddress]
for _ in 0 .. retries:
txt = await self.dnsResolver.resolveTxt(acmeChalDomain)
try:
ip4 = await self.dnsResolver.resolveIp(ip4Domain, 0.Port)
except CatchableError as exc:
error "Failed to resolve IP", description = exc.msg # retry
if txt.len > 0 and txt[0] == keyAuthorization and ip4.len > 0:
return true
await sleepAsync(DefaultDnsRetryTime)
return false
method issueCertificate(
self: AutoTLSManager
): Future[void] {.base, async: (raises: [AutoTLSError, CancelledError]).} =
trace "Issuing new certificate"
let peerInfo = self.peerInfo.valueOr:
raise newException(AutoTLSError, "Cannot issue new certificate: peerInfo not set")
# generate autotls domain string: "*.{peerID}.libp2p.direct"
let base36PeerId = encodePeerId(peerInfo.peerId)
let baseDomain = base36PeerId & "." & AutoTLSDNSServer
let domain = "*." & baseDomain
trace "Requesting ACME challenge"
let keyAuthorization = self.acmeClient.getKeyAuthorization(@[domain])
trace "Sending challenge to AutoTLS broker"
let strMultiaddresses: seq[string] = peerInfo.addrs.mapIt($it)
let payload = %*{"value": keyAuthorization, "addresses": strMultiaddresses}
let registrationURL = "https://" & AutoTLSBroker & "/v1/_acme-challenge"
var response: HttpClientResponseRef
var bearerToken: BearerToken
if self.bearerToken.isSome:
(bearerToken, response) = await peerIdAuthSend(
registrationURL,
self.httpSession,
peerInfo,
payload,
bearerToken = self.bearerToken,
)
else:
# authenticate, send challenge and save bearerToken for future requests
(bearerToken, response) =
await peerIdAuthSend(registrationURL, self.httpSession, peerInfo, payload)
self.bearerToken = Opt.some(bearerToken)
if response.status != HttpOk:
raise newException(
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
)
# no need to do anything from this point forward if there are not public ip addresses on host
let hostPrimaryIP: IpAddress =
try:
let ip = self.ipAddress.valueOr:
checkedGetPrimaryIPAddr()
if not isPublicIPv4(ip):
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
ip
except GetPrimaryIPError as exc:
raise newException(AutoTLSError, "Failed to get primary IP address for host", exc)
except CatchableError as exc:
raise newException(
AutoTLSError, "Unexpected error while getting primary IP address for host", exc
)
debug "Waiting for DNS record to be set"
# if my ip address is 100.10.10.3 then the ip4Domain will be:
# 100-10-10-3.{peerIdBase36}.libp2p.direct
# and acme challenge TXT domain will be:
# _acme-challenge.{peerIdBase36}.libp2p.direct
let dashedIpAddr = ($hostPrimaryIP).replace(".", "-")
let acmeChalDomain = "_acme-challenge." & baseDomain
let ip4Domain = dashedIpAddr & "." & baseDomain
if not await self.checkDNSRecords(ip4Domain, acmeChalDomain, keyAuthorization):
raise newException(AutoTLSError, "DNS records not set")
debug "Notifying challenge completion to ACME server"
let chalURL = dns01Challenge.getJSONField("url").getStr
await self.acmeClient.notifyChallengeCompleted(chalURL)
debug "Finalize cert request with CSR"
if not await self.acmeClient.finalizeCertificate(domain, finalizeURL, orderURL):
raise newException(AutoTLSError, "ACME certificate finalization request failed")
debug "Downloading certificate"
let (rawCert, expiry) = await self.acmeClient.downloadCertificate(orderURL)
trace "Installing certificate"
try:
self.cert = Opt.some(TLSCertificate.init(rawCert))
self.certExpiry = Opt.some(asMoment(expiry))
except TLSStreamProtocolError:
raise newException(AutoTLSError, "Could not parse downloaded certificates")
self.certReady.fire()
proc manageCertificate(
self: AutoTLSManager
): Future[void] {.async: (raises: [AutoTLSError, CancelledError]).} =
trace "Starting AutoTLS manager"
debug "Registering ACME Client"
if self.acmeClient.isNil:
self.acmeClient = await ACMEClient.new()
heartbeat "Certificate Management", self.renewCheckTime:
if self.cert.isNone or self.certExpiry.isNone:
try:
await self.issueCertificate()
except CatchableError as exc:
error "Failed to issue certificate", err = exc.msg
break
# AutoTLSManager will renew the cert 1h before it expires
let expiry = self.certExpiry.get
let waitTime: Duration = expiry - Moment.now - self.renewCheckTime
if waitTime <= self.renewCheckTime:
try:
await self.issueCertificate()
except CatchableError as exc:
error "Failed to renew certificate", err = exc.msg
break
method start*(
self: AutoTLSManager, peerInfo: PeerInfo
): Future[void] {.base, async: (raises: [CancelledError]).} =
if not self.fut.isNil:
warn "Starting AutoTLS twice"
return
self.peerInfo = Opt.some(peerInfo)
self.fut = self.manageCertificate()
method stop*(self: AutoTLSManager): Future[void] {.base, async: (raises: []).} =
trace "AutoTLS stop"
if self.fut.isNil:
warn "Stopping AutoTLS without starting it"
return
await self.fut.cancelAndWait()
self.fut = nil
if not self.acmeClient.isNil:
await self.acmeClient.session.closeWait()
if not self.httpSession.isNil:
await self.httpSession.closeWait()

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport, memorytransport],
transports/[transport, tcptransport, wstransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -35,7 +35,9 @@ import
utility
import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
TLSCertificate, TLSFlags, ServerFlags
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
@@ -169,6 +171,18 @@ proc withTcpTransport*(
TcpTransport.new(flags, upgr)
)
proc withWsTransport*(
b: SwitchBuilder,
tlsPrivateKey: TLSPrivateKey = nil,
tlsCertificate: TLSCertificate = nil,
tlsFlags: set[TLSFlags] = {},
flags: set[ServerFlags] = {},
): SwitchBuilder =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
)
when defined(libp2p_quic_support):
import transports/quictransport

View File

@@ -10,6 +10,7 @@
## This module implementes CID (Content IDentifier).
{.push raises: [].}
{.used.}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint, results

View File

@@ -140,7 +140,7 @@ proc triggerConnEvent*(
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Exception in triggerConnEvents",
warn "Exception in triggerConnEvent",
description = exc.msg, peer = peerId, event = $event
proc addPeerEventHandler*(
@@ -186,7 +186,7 @@ proc expectConnection*(
if key in c.expectedConnectionsOverLimit:
raise newException(
AlreadyExpectingConnectionError,
"Already expecting an incoming connection from that peer",
"Already expecting an incoming connection from that peer: " & shortLog(p),
)
let future = Future[Muxer].Raising([CancelledError]).init()

View File

@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
var buffer: seq[byte]
try:
buffer = hexToSeqByte(data)
except ValueError:
return err("secp: Hex to bytes failed")
except ValueError as e:
let errMsg = "secp: Hex to bytes failed: " & e.msg
return err(errMsg.cstring)
init(sig, buffer)
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =

View File

@@ -595,13 +595,13 @@ template exceptionToAssert(body: untyped): untyped =
try:
res = body
except OSError as exc:
raise exc
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
except IOError as exc:
raise exc
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
except Defect as exc:
raise exc
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
except Exception as exc:
raiseAssert exc.msg
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
when defined(nimHasWarnBareExcept):
{.pop.}
res
@@ -967,9 +967,9 @@ proc openStream*(
stream.flags.incl(Outbound)
stream.transp = transp
result = stream
except ResultError[ProtoError]:
except ResultError[ProtoError] as e:
await api.closeConnection(transp)
raise newException(DaemonLocalError, "Wrong message type!")
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
# must not specify raised exceptions as this is StreamCallback from chronos
@@ -1023,10 +1023,10 @@ proc addHandler*(
api.servers.add(P2PServer(server: server, address: maddress))
except DaemonLocalError as e:
await removeHandler()
raise e
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
except TransportError as e:
await removeHandler()
raise e
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
except CancelledError as e:
await removeHandler()
raise e
@@ -1503,10 +1503,14 @@ proc pubsubSubscribe*(
result = ticket
except DaemonLocalError as exc:
await api.closeConnection(transp)
raise exc
raise newException(
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except TransportError as exc:
await api.closeConnection(transp)
raise exc
raise newException(
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except CancelledError as exc:
await api.closeConnection(transp)
raise exc

View File

@@ -127,8 +127,8 @@ proc expandDnsAddr(
var peerIdBytes: seq[byte]
try:
peerIdBytes = lastPart.protoArgument().tryGet()
except ResultError[string]:
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
except ResultError[string] as e:
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
@@ -178,7 +178,7 @@ proc internalConnect(
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
if Opt.some(self.localPeerId) == peerId:
raise newException(DialFailedError, "can't dial self!")
raise newException(DialFailedError, "internalConnect can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
@@ -186,8 +186,8 @@ proc internalConnect(
defer:
try:
lock.release()
except AsyncLockError:
raiseAssert "lock must have been acquired in line above"
except AsyncLockError as e:
raiseAssert "lock must have been acquired in line above: " & e.msg
if reuseConnection:
peerId.withValue(peerId):
@@ -198,7 +198,9 @@ proc internalConnect(
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(DialFailedError, exc.msg)
raise newException(
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
)
let muxed =
try:
@@ -208,11 +210,15 @@ proc internalConnect(
raise exc
except CatchableError as exc:
slot.release()
raise newException(DialFailedError, exc.msg)
raise newException(
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
raise newException(
DialFailedError, "Unable to establish outgoing link in internalConnect"
)
try:
self.connManager.storeMuxer(muxed)
@@ -228,7 +234,11 @@ proc internalConnect(
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
raise newException(
DialFailedError,
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
exc,
)
method connect*(
self: Dialer,
@@ -260,7 +270,7 @@ method connect*(
if allowUnknownPeerId == false:
raise newException(
DialFailedError, "Address without PeerID and unknown peer id disabled!"
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
)
return
@@ -273,7 +283,7 @@ proc negotiateStream(
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
return conn
@@ -289,13 +299,13 @@ method tryDial*(
try:
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if mux.isNil():
raise newException(DialFailedError, "No valid multiaddress")
raise newException(DialFailedError, "No valid multiaddress in tryDial")
await mux.close()
return mux.connection.observedAddr
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
@@ -309,14 +319,17 @@ method dial*(
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
raise newException(
DialFailedError,
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled"
trace "Dial canceled", description = exc.msg
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
method dial*(
self: Dialer,
@@ -347,17 +360,20 @@ method dial*(
stream = await self.connManager.getStream(conn)
if isNil(stream):
raise newException(DialFailedError, "Couldn't get muxed stream")
raise newException(
DialFailedError,
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", conn
trace "Dial canceled", conn, description = exc.msg
await cleanup()
raise exc
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
await cleanup()
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t

View File

@@ -113,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
try:
query.peers.putNoWait(pa)
except AsyncQueueFullError as exc:
debug "Cannot push discovered peer to queue"
debug "Cannot push discovered peer to queue", description = exc.msg
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())

View File

@@ -10,6 +10,7 @@
## This module implements MultiCodec.
{.push raises: [].}
{.used.}
import tables, hashes
import vbuffer

View File

@@ -22,6 +22,7 @@
## 2. MURMUR
{.push raises: [].}
{.used.}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
@@ -566,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
## Create MultiHash from BASE58 encoded string representation ``data``.
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format")
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
if len(a) != len(b):

View File

@@ -87,7 +87,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
raise exc
except LPStreamError as exc:
await s.conn.close()
raise exc
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
method closed*(s: LPChannel): bool =
s.closedLocal

View File

@@ -587,10 +587,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
let channel =
try:
m.channels[header.streamId]
except KeyError:
except KeyError as e:
raise newException(
YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId,
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
e.msg,
e,
)
if header.msgType == WindowUpdate:

View File

@@ -78,23 +78,23 @@ proc getDnsResponse(
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError:
raise newException(IOError, "DNS server timeout")
except AsyncTimeoutError as e:
raise newException(IOError, "DNS server timeout: " & e.msg, e)
let rawResponse = sock.getMessage()
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise exc
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
except OSError as exc:
raise exc
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
except ValueError as exc:
raise exc
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert exc.msg
raiseAssert "Exception parsing DN response: " & exc.msg
finally:
await sock.closeWait()

View File

@@ -11,6 +11,7 @@
{.push raises: [].}
{.push public.}
{.used.}
import
std/[hashes, strutils],

View File

@@ -0,0 +1,335 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import base64, json, strutils, uri, times
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
import ../peerinfo, ../crypto/crypto, ../varint.nim
logScope:
topics = "libp2p peeridauth"
const
NimLibp2pUserAgent = "nim-libp2p"
PeerIDAuthPrefix* = "libp2p-PeerID"
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
ChallengeDefaultLen = 48
type PeerIDAuthClient* = ref object of RootObj
session: HttpSessionRef
rng: ref HmacDrbgContext
type PeerIDAuthError* = object of LPError
type PeerIDAuthResponse* = object
status*: int
headers*: HttpTable
body*: seq[byte]
type BearerToken* = object
token*: string
expires*: Opt[DateTime]
type PeerIDAuthOpaque* = string
type PeerIDAuthSignature* = string
type PeerIDAuthChallenge* = string
type PeerIDAuthAuthenticationResponse* = object
challengeClient*: PeerIDAuthChallenge
opaque*: PeerIDAuthOpaque
serverPubkey*: PublicKey
type PeerIDAuthAuthorizationResponse* = object
sig*: PeerIDAuthSignature
bearer*: BearerToken
response*: PeerIDAuthResponse
type SigParam = object
k: string
v: seq[byte]
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
proc sampleChar(
ctx: var HmacDrbgContext, choices: string
): char {.raises: [ValueError].} =
## Samples a random character from the input string using the DRBG context
if choices.len == 0:
raise newException(ValueError, "Cannot sample from an empty string")
var idx: uint32
ctx.generate(idx)
return choices[uint32(idx mod uint32(choices.len))]
proc randomChallenge(
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
var rng = rng[]
var challenge = ""
try:
for _ in 0 ..< challengeLen:
challenge.add(rng.sampleChar(ChallengeCharset))
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
PeerIDAuthChallenge(challenge)
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
# Helper to extract quoted value from key
for segment in data.split(","):
if key in segment:
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
proc genDataToSign(
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
): seq[byte] {.raises: [PeerIDAuthError].} =
var buf: seq[byte] = prefix.toByteSeq()
for p in parts:
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
raise newException(PeerIDAuthError, "could not encode fields length to varint")
buf.add varintLen
buf.add (p.k & "=").toByteSeq()
buf.add p.v
return buf
proc getSigParams(
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
): seq[SigParam] =
if clientSender:
@[
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
]
else:
@[
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
]
proc sign(
privateKey: PrivateKey,
challenge: PeerIDAuthChallenge,
publicKey: PublicKey,
hostname: string,
clientSender: bool = true,
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
PeerIDAuthSignature(
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
)
proc checkSignature*(
serverSig: PeerIDAuthSignature,
serverPublicKey: PublicKey,
challengeServer: PeerIDAuthChallenge,
clientPublicKey: PublicKey,
hostname: string,
): bool {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
var serverSignature: Signature
try:
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
raise newException(
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
)
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
serverSignature.verify(
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
)
method post*(
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
let rawResponse = await HttpClientRequestRef
.post(
self.session,
uri,
body = payload,
headers = [
("Content-Type", "application/json"),
("User-Agent", NimLibp2pUserAgent),
("Authorization", authHeader),
],
)
.get()
.send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
method get*(
self: PeerIDAuthClient, uri: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
proc requestAuthentication*(
self: PeerIDAuthClient, uri: Uri
): Future[PeerIDAuthAuthenticationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let response =
try:
await self.get($uri)
except HttpError as exc:
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
if wwwAuthenticate == "":
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
let serverPubkey: PublicKey =
try:
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
PeerIDAuthAuthenticationResponse(
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
opaque: extractField(wwwAuthenticate, "opaque"),
serverPubkey: serverPubkey,
)
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
try:
pubkey.getBytes().valueOr:
raise
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
except ValueError as exc:
raise newException(
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
)
proc parse3339DateTime(
timeStr: string
): DateTime {.raises: [ValueError, TimeParseError].} =
let parts = timeStr.split('.')
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
let millis = parseInt(parts[1].strip(chars = {'Z'}))
result = base + initDuration(milliseconds = millis)
proc requestAuthorization*(
self: PeerIDAuthClient,
peerInfo: PeerInfo,
uri: Uri,
challengeClient: PeerIDAuthChallenge,
challengeServer: PeerIDAuthChallenge,
serverPubkey: PublicKey,
opaque: PeerIDAuthOpaque,
payload: auto,
): Future[PeerIDAuthAuthorizationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
let authHeader =
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
let response =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
)
let authenticationInfo = response.headers.getString("authentication-info")
let bearerExpires =
try:
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
except ValueError, PeerIDAuthError, TimeParseError:
Opt.none(DateTime)
PeerIDAuthAuthorizationResponse(
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
bearer: BearerToken(
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
),
response: response,
)
proc sendWithoutBearer(
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
# Authenticate in three ways as per the PeerID Auth spec
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
let authenticationResponse = await self.requestAuthentication(uri)
let challengeServer = self.rng.randomChallenge()
let authorizationResponse = await self.requestAuthorization(
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
)
if not checkSignature(
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
peerInfo.publicKey, uri.hostname,
):
raise newException(PeerIDAuthError, "Failed to validate server's signature")
return (authorizationResponse.bearer, authorizationResponse.response)
proc sendWithBearer(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken,
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
raise newException(PeerIDAuthError, "Bearer expired")
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
let response =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
)
return (bearer, response)
proc send*(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken = BearerToken(),
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.token == "":
await self.sendWithoutBearer(uri, peerInfo, payload)
else:
await self.sendWithBearer(uri, peerInfo, payload, bearer)
proc close*(
self: PeerIDAuthClient
): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,41 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos, chronos/apps/http/httpclient
import ../crypto/crypto
import ./client
export client
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
mockedStatus*: int
mockedHeaders*: HttpTable
mockedBody*: seq[byte]
proc new*(
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
method post*(
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)
method get*(
self: MockPeerIDAuthClient, uri: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)

View File

@@ -101,8 +101,10 @@ proc new*(
let pubkey =
try:
key.getPublicKey().tryGet()
except CatchableError:
raise newException(PeerInfoError, "invalid private key")
except CatchableError as e:
raise newException(
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
)
let peerId = PeerId.init(key).tryGet()

View File

@@ -87,7 +87,7 @@ method dialMe*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "read Dial response failed", e)
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))

View File

@@ -107,7 +107,9 @@ proc startSync*(
description = err.msg
raise newException(
DcutrError,
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
err.msg,
err,
)
finally:
if stream != nil:

View File

@@ -148,7 +148,7 @@ proc dialPeerV1*(
raise exc
except LPStreamError as exc:
trace "error writing hop request", description = exc.msg
raise newException(RelayV1DialError, "error writing hop request", exc)
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
let msgRcvFromRelayOpt =
try:
@@ -158,7 +158,8 @@ proc dialPeerV1*(
except LPStreamError as exc:
trace "error reading stop response", description = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, "error reading stop response", exc)
raise
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
@@ -173,10 +174,16 @@ proc dialPeerV1*(
)
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise newException(
RelayV1DialError,
"Hop can't open destination stream after sendStatus: " & exc.msg,
exc,
)
except ValueError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, exc.msg)
raise newException(
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
)
result = conn
proc dialPeerV2*(
@@ -199,7 +206,8 @@ proc dialPeerV2*(
raise exc
except CatchableError as exc:
trace "error reading stop response", description = exc.msg
raise newException(RelayV2DialError, exc.msg)
raise
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")

View File

@@ -76,7 +76,7 @@ proc dial*(
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Destination doesn't exist")
except RelayDialError as e:
raise e
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
except CatchableError:
raise newException(RelayDialError, "dial address not valid")
@@ -100,13 +100,13 @@ proc dial*(
raise e
except DialFailedError as e:
safeClose(rc)
raise newException(RelayDialError, "dial relay peer failed", e)
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
except RelayV1DialError as e:
safeClose(rc)
raise e
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
except RelayV2DialError as e:
safeClose(rc)
raise e
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
method dial*(
self: RelayTransport,
@@ -121,7 +121,8 @@ method dial*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(transport.TransportDialError, e.msg, e)
raise
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:

View File

@@ -69,8 +69,8 @@ proc bridge*(
while not connSrc.closed() and not connDst.closed():
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(futSrc, futDst)
except ValueError:
raiseAssert("Futures list is not empty")
except ValueError as e:
raiseAssert("Futures list is not empty: " & e.msg)
if futSrc.finished():
bufRead = await futSrc
if bufRead > 0:

View File

@@ -0,0 +1,159 @@
import ../../protobuf/minprotobuf
import ../../varint
import ../../utility
import results
import ../../multiaddress
import stew/objects
import stew/assign2
import options
type
Record* {.public.} = object
key*: Option[seq[byte]]
value*: Option[seq[byte]]
timeReceived*: Option[string]
MessageType* = enum
putValue = 0
getValue = 1
addProvider = 2
getProviders = 3
findNode = 4
ping = 5 # Deprecated
ConnectionType* = enum
notConnected = 0
connected = 1
canConnect = 2 # Unused
cannotConnect = 3 # Unused
Peer* {.public.} = object
id*: seq[byte]
addrs*: seq[MultiAddress]
connection*: ConnectionType
Message* {.public.} = object
msgType*: MessageType
key*: Option[seq[byte]]
record*: Option[Record]
closerPeers*: seq[Peer]
providerPeers*: seq[Peer]
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.writeOpt(1, record.key)
pb.writeOpt(2, record.value)
pb.writeOpt(5, record.timeReceived)
pb.finish()
return pb
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, peer.id)
for address in peer.addrs:
pb.write(2, address.data.buffer)
pb.write(3, uint32(ord(peer.connection)))
pb.finish()
return pb
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, uint32(ord(msg.msgType)))
pb.writeOpt(2, msg.key)
msg.record.withValue(record):
pb.writeOpt(3, msg.record)
for peer in msg.closerPeers:
pb.write(8, peer.encode())
for peer in msg.providerPeers:
pb.write(9, peer.encode())
pb.finish()
return pb
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
opt.withValue(v):
pb.write(field, v)
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
pb.write(field, value.encode())
proc getOptionField[T: ProtoScalar | string | seq[byte]](
pb: ProtoBuffer, field: int, output: var Option[T]
): ProtoResult[void] =
var f: T
if ?pb.getField(field, f):
assign(output, some(f))
ok()
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
var r: Record
?pb.getOptionField(1, r.key)
?pb.getOptionField(2, r.value)
?pb.getOptionField(5, r.timeReceived)
return ok(some(r))
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
var
p: Peer
id: seq[byte]
?pb.getRequiredField(1, p.id)
discard ?pb.getRepeatedField(2, p.addrs)
var connVal: uint32
if ?pb.getField(3, connVal):
var connType: ConnectionType
if not checkedEnumAssign(connType, connVal):
return err(ProtoError.BadWireType)
p.connection = connType
return ok(some(p))
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
var
m: Message
key: seq[byte]
recPb: seq[byte]
closerPbs: seq[seq[byte]]
providerPbs: seq[seq[byte]]
var pb = initProtoBuffer(buf)
var msgTypeVal: uint32
?pb.getRequiredField(1, msgTypeVal)
var msgType: MessageType
if not checkedEnumAssign(msgType, msgTypeVal):
return err(ProtoError.BadWireType)
m.msgType = msgType
?pb.getOptionField(2, m.key)
if ?pb.getField(3, recPb):
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
discard ?pb.getRepeatedField(8, closerPbs)
for ppb in closerPbs:
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
peerOpt.withValue(peer):
m.closerPeers.add(peer)
discard ?pb.getRepeatedField(9, providerPbs)
for ppb in providerPbs:
let peer = ?Peer.decode(initProtoBuffer(ppb))
peer.withValue(peer):
m.providerPeers.add(peer)
return ok(some(m))

View File

@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
logScope:
topics = "libp2p perf"
type PerfClient* = ref object of RootObj
type Stats* = object
isFinal*: bool
uploadBytes*: uint
downloadBytes*: uint
duration*: Duration
type PerfClient* = ref object
stats: Stats
proc new*(T: typedesc[PerfClient]): T =
return T()
proc currentStats*(p: PerfClient): Stats =
return p.stats
proc perf*(
_: typedesc[PerfClient],
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
let start = Moment.now()
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite
p.stats = Stats()
await conn.close()
try:
var
size = sizeToWrite
buf: array[PerfSize, byte]
size = sizeToRead
let start = Moment.now()
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite.uint
let duration = Moment.now() - start
trace "finishing performance benchmark", duration
return duration
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.uploadBytes += toWrite.uint
p.stats = statsCopy
await conn.close()
size = sizeToRead
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead.uint
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.downloadBytes += toRead.uint
p.stats = statsCopy
except CancelledError as e:
raise e
except LPStreamError as e:
raise e
finally:
p.stats.isFinal = true
trace "finishing performance benchmark", duration = p.stats.duration
return p.stats.duration

View File

@@ -185,14 +185,14 @@ method init*(f: FloodSub) =
try:
await f.handleConn(conn, proto)
except CancelledError as exc:
trace "Unexpected cancellation in floodsub handler", conn
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
raise exc
f.handler = handler
f.codec = FloodSubCodec
method publish*(
f: FloodSub, topic: string, data: seq[byte]
f: FloodSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.async: (raises: []).} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)

View File

@@ -218,7 +218,7 @@ method init*(g: GossipSub) =
try:
await g.handleConn(conn, proto)
except CancelledError as exc:
trace "Unexpected cancellation in gossipsub handler", conn
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
raise exc
g.handler = handler
@@ -702,24 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(
proc makePeersForPublishUsingCustomConn(
g: GossipSub, topic: string
): HashSet[PubSubPeer] =
assert g.customConnCallbacks.isSome,
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
trace "Selecting peers via custom connection callback"
return g.customConnCallbacks.get().customPeerSelectionCB(
g.gossipsub.getOrDefault(topic),
g.subscribedDirectPeers.getOrDefault(topic),
g.mesh.getOrDefault(topic),
g.fanout.getOrDefault(topic),
)
proc makePeersForPublishDefault(
g: GossipSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
): HashSet[PubSubPeer] =
var peers: HashSet[PubSubPeer]
# add always direct peers
# Always include direct peers
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
if topic in g.topics: # if we're subscribed use the mesh
@@ -769,6 +772,29 @@ method publish*(
# ultimately is not sent)
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
return peers
method publish*(
g: GossipSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
let peers =
if useCustomConn:
g.makePeersForPublishUsingCustomConn(topic)
else:
g.makePeersForPublishDefault(topic, data)
if peers.len == 0:
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
debug "No peers for topic, skipping publish",
@@ -807,7 +833,12 @@ method publish*(
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
g.broadcast(
peers,
RPCMsg(messages: @[msg]),
isHighPriority = true,
useCustomConn = useCustomConn,
)
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])

View File

@@ -305,9 +305,9 @@ proc handleIHave*(
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
for dontWant in iDontWants:
for messageId in dontWant.messageIDs:
if peer.iDontWants[^1].len > 1000:
if peer.iDontWants[0].len >= IDontWantMaxCount:
break
peer.iDontWants[^1].incl(g.salt(messageId))
peer.iDontWants[0].incl(g.salt(messageId))
proc handleIWant*(
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
@@ -457,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
prunes = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
except KeyError as e:
raiseAssert "have peers: " & e.msg
)
# avoid pruning peers we are currently grafting in this heartbeat
prunes.keepIf do(x: PubSubPeer) -> bool:
@@ -513,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var peers = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
except KeyError as e:
raiseAssert "have peers: " & e.msg
)
# grafting so high score has priority
peers.sort(byScore, SortOrder.Descending)
@@ -538,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it.peerId notin backingOff:
avail.add(it)
# by spec, grab only 2
if avail.len > 1:
# by spec, grab only up to MaxOpportunisticGraftPeers
if avail.len >= MaxOpportunisticGraftPeers:
break
for peer in avail:
@@ -690,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
for peer in allPeers:
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
for msgId in ihave.messageIDs:
peer.sentIHaves[^1].incl(msgId)
peer.sentIHaves[0].incl(msgId)
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)

View File

@@ -50,6 +50,9 @@ const
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
IHaveMaxLength* = 5000
IDontWantMaxCount* = 1000
# maximum number of IDontWant messages in one slot of the history
MaxOpportunisticGraftPeers* = 2
type
TopicInfo* = object # gossip 1.1 related

View File

@@ -176,6 +176,7 @@ type
rng*: ref HmacDrbgContext
knownTopics*: HashSet[string]
customConnCallbacks*: Option[CustomConnectionCallbacks]
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
## handle peer disconnects
@@ -187,7 +188,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
p: PubSub,
peer: PubSubPeer,
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
@@ -200,13 +205,14 @@ proc send*(
## priority messages have been sent.
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
peer.send(msg, p.anonymize, isHighPriority)
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
@@ -261,12 +267,12 @@ proc broadcast*(
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
p.send(peer, msg, isHighPriority)
p.send(peer, msg, isHighPriority, useCustomConn)
else:
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
proc sendSubs*(
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
@@ -373,8 +379,14 @@ method getOrCreatePeer*(
p.onPubSubPeerEvent(peer, event)
# create new pubsub peer
let pubSubPeer =
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
let pubSubPeer = PubSubPeer.new(
peerId,
getConn,
onEvent,
protoNegotiated,
p.maxMessageSize,
customConnCallbacks = p.customConnCallbacks,
)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
@@ -558,7 +570,7 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
p.updateTopicMetrics(topic)
method publish*(
p: PubSub, topic: string, data: seq[byte]
p: PubSub, topic: string, data: seq[byte], useCustomConn: bool = false
): Future[int] {.base, async: (raises: []), public.} =
## publish to a ``topic``
##
@@ -648,6 +660,8 @@ proc init*[PubParams: object | bool](
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false,
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): P {.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
@@ -663,6 +677,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
else:
P(
@@ -678,6 +693,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
proc peerEventHandler(

View File

@@ -95,6 +95,21 @@ type
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
CustomConnCreationProc* = proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection {.gcsafe, raises: [].}
CustomPeerSelectionProc* = proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] {.gcsafe, raises: [].}
CustomConnectionCallbacks* = object
customConnCreationCB*: CustomConnCreationProc
customPeerSelectionCB*: CustomPeerSelectionProc
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
@@ -123,6 +138,7 @@ type
maxNumElementsInNonPriorityQueue*: int
# The max number of elements allowed in the non-priority queue.
disconnected: bool
customConnCallbacks*: Option[CustomConnectionCallbacks]
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
@@ -214,10 +230,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
await conn.close()
except CancelledError:
except CancelledError as e:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
trace "Unexpected cancellation in PubSubPeer.handle", description = e.msg
finally:
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
@@ -250,7 +266,7 @@ proc connectOnce(
await p.getConn().wait(5.seconds)
except AsyncTimeoutError as error:
trace "getConn timed out", description = error.msg
raise (ref LPError)(msg: "Cannot establish send connection")
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
# When the send channel goes up, subscriptions need to be sent to the
# remote peer - if we had multiple channels up and one goes down, all
@@ -356,21 +372,43 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
await sendMsgContinue(conn, conn.writeLp(msg))
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
proc sendMsg(
p: PubSubPeer, msg: seq[byte], useCustomConn: bool = false
): Future[void] {.async: (raises: []).} =
type ConnectionType = enum
ctCustom
ctSend
ctSlow
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
var slowPath = false
let (conn, connType) =
if useCustomConn and p.customConnCallbacks.isSome:
let address = p.address
(
p.customConnCallbacks.get().customConnCreationCB(address, p.peerId, p.codec),
ctCustom,
)
elif p.sendConn != nil and not p.sendConn.closed():
(p.sendConn, ctSend)
else:
slowPath = true
(nil, ctSlow)
if not slowPath:
trace "sending encoded msg to peer",
conntype = $connType, conn = conn, encoded = shortLog(msg)
let f = conn.writeLp(msg)
if not f.completed():
sendMsgContinue(conn, f)
else:
f
else:
trace "sending encoded msg to peer via slow path"
sendMsgSlow(p, msg)
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
proc sendEncoded*(
p: PubSubPeer, msg: seq[byte], isHighPriority: bool, useCustomConn: bool = false
): Future[void] =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
@@ -399,7 +437,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
maxSize = p.maxMessageSize, msgSize = msg.len
Future[void].completed()
elif isHighPriority or emptyQueues:
let f = p.sendMsg(msg)
let f = p.sendMsg(msg, useCustomConn)
if not f.finished:
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(pubsubpeer_queue_metrics):
@@ -458,7 +496,11 @@ iterator splitRPCMsg(
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
p: PubSubPeer,
msg: RPCMsg,
anonymize: bool,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
@@ -489,11 +531,11 @@ proc send*(
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
asyncSpawn p.sendEncoded(encoded, isHighPriority)
asyncSpawn p.sendEncoded(encoded, isHighPriority, useCustomConn)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -552,6 +594,8 @@ proc new*(
maxMessageSize: int,
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): T =
result = T(
getConn: getConn,
@@ -563,6 +607,7 @@ proc new*(
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
customConnCallbacks: customConnCallbacks,
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.iDontWants.addFirst(default(HashSet[SaltedId]))

View File

@@ -419,8 +419,8 @@ proc save(
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError:
doAssert false, "Should have key"
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns

View File

@@ -110,8 +110,8 @@ proc handleConn(
fut2 = sconn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError:
raiseAssert("Futures list is not empty")
except ValueError as e:
raiseAssert("Futures list is not empty: " & e.msg)
# at least one join() completed, cancel pending one, if any
if not fut1.finished:
await fut1.cancelAndWait()
@@ -182,14 +182,14 @@ method readOnce*(
except LPStreamEOFError as err:
s.isEof = true
await s.close()
raise err
raise newException(LPStreamEOFError, "Secure connection EOF: " & err.msg, err)
except CancelledError as exc:
raise exc
except LPStreamError as err:
debug "Error while reading message from secure connection, closing.",
error = err.name, message = err.msg, connection = s
await s.close()
raise err
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
var p = cast[ptr UncheckedArray[byte]](pbytes)
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))

View File

@@ -55,7 +55,7 @@ proc tryStartingDirectConn(
if not isRelayed.get(false) and address.isPublicMA():
return await tryConnect(address)
except CatchableError as err:
debug "Failed to create direct connection.", err = err.msg
debug "Failed to create direct connection.", description = err.msg
continue
return false
@@ -91,7 +91,7 @@ proc newConnectedPeerHandler(
except CancelledError as err:
raise err
except CatchableError as err:
debug "Hole punching failed during dcutr", err = err.msg
debug "Hole punching failed during dcutr", description = err.msg
method setup*(
self: HPService, switch: Switch
@@ -104,7 +104,7 @@ method setup*(
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
except LPError as err:
error "Failed to mount Dcutr", err = err.msg
error "Failed to mount Dcutr", description = err.msg
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent

View File

@@ -199,8 +199,10 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
elif s.pushing:
if not s.readQueue.empty():
discard s.readQueue.popFirstNoWait()
except AsyncQueueFullError, AsyncQueueEmptyError:
raiseAssert(getCurrentExceptionMsg())
except AsyncQueueFullError as e:
raiseAssert("closeImpl failed queue full: " & e.msg)
except AsyncQueueEmptyError as e:
raiseAssert("closeImpl failed queue empty: " & e.msg)
trace "Closed BufferStream", s

View File

@@ -34,8 +34,6 @@ when defined(libp2p_agents_metrics):
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
func shortLog*(conn: ChronosStream): auto =
try:
if conn == nil:

View File

@@ -52,6 +52,8 @@ func shortLog*(conn: Connection): string =
chronicles.formatIt(Connection):
shortLog(it)
declarePublicCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
method initStream*(s: Connection) =
if s.objName.len == 0:
s.objName = ConnectionTrackerName

View File

@@ -113,9 +113,9 @@ method initStream*(s: LPStream) {.base.} =
trackCounter(s.objName)
trace "Stream created", s, objName = s.objName, dir = $s.dir
proc join*(
method join*(
s: LPStream
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
): Future[void] {.base, async: (raises: [CancelledError], raw: true), public.} =
## Wait for the stream to be closed
s.closeEvent.wait()
@@ -135,9 +135,9 @@ method readOnce*(
## available
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
proc readExactly*(
method readExactly*(
s: LPStream, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[void] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Waits for `nbytes` to be available, then read
## them and return them
if s.atEof:
@@ -171,9 +171,9 @@ proc readExactly*(
trace "couldn't read all bytes, incomplete data", s, nbytes, read
raise newLPStreamIncompleteError()
proc readLine*(
method readLine*(
s: LPStream, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[string] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Reads up to `limit` bytes are read, or a `sep` is found
# TODO replace with something that exploits buffering better
var lim = if limit <= 0: -1 else: limit
@@ -199,9 +199,9 @@ proc readLine*(
if len(result) == lim:
break
proc readVarint*(
method readVarint*(
conn: LPStream
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[uint64] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
var buffer: array[10, byte]
for i in 0 ..< len(buffer):
@@ -218,9 +218,9 @@ proc readVarint*(
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(
method readLp*(
s: LPStream, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[seq[byte]] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()
@@ -244,9 +244,11 @@ method write*(
# Write `msg` to stream, waiting for the write to be finished
raiseAssert("[LPStream.write] abstract method not implemented!")
proc writeLp*(
method writeLp*(
s: LPStream, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
@@ -254,9 +256,11 @@ proc writeLp*(
buf[vbytes.len ..< buf.len] = msg
s.write(buf)
proc writeLp*(
method writeLp*(
s: LPStream, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
writeLp(s, msg.toOpenArrayByte(0, msg.high))
proc write*(
@@ -324,7 +328,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
debug "Unexpected bytes while waiting for EOF", s
except CancelledError:
discard
except LPStreamEOFError:
trace "Expected EOF came", s
except LPStreamEOFError as e:
trace "Expected EOF came", s, description = e.msg
except LPStreamError as exc:
debug "Unexpected error while waiting for EOF", s, description = exc.msg

View File

@@ -233,7 +233,7 @@ proc upgrader(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(UpgradeError, e.msg, e)
raise newException(UpgradeError, "catchable error upgrader: " & e.msg, e)
proc upgradeMonitor(
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
@@ -275,7 +275,8 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
await transport.accept()
except CatchableError as exc:
slot.release()
raise exc
raise
newException(CatchableError, "failed to accept connection: " & exc.msg, exc)
slot.trackConnection(conn)
if isNil(conn):
# A nil connection means that we might have hit a

View File

@@ -1,7 +1,8 @@
import std/sequtils
import pkg/chronos
import pkg/chronicles
import pkg/quic
import chronos
import chronicles
import metrics
import quic
import results
import ../multiaddress
import ../multicodec
@@ -58,6 +59,7 @@ method readOnce*(
result = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], result)
stream.cached = stream.cached[result ..^ 1]
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
except CatchableError as exc:
raise newLPStreamEOFError()
@@ -66,6 +68,7 @@ method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
{.pop.}
@@ -98,7 +101,7 @@ proc getStream*(
return QuicStream.new(stream, session.observedAddr, session.peerId)
except CatchableError as exc:
# TODO: incomingStream is using {.async.} with no raises
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
@@ -116,7 +119,7 @@ method newStream*(
try:
return await m.quicSession.getStream(Direction.Out)
except CatchableError as exc:
raise newException(MuxerError, exc.msg, exc)
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
## call the muxer stream handler for this channel
@@ -233,11 +236,16 @@ method start*(
except QuicConfigError as exc:
doAssert false, "invalid quic setup: " & $exc.msg
except TLSCertificateError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(
msg: "tlscert error in quic start: " & exc.msg, parent: exc
)
except QuicError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise
(ref QuicTransportError)(msg: "quicerror in quic start: " & exc.msg, parent: exc)
except TransportOsError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(
msg: "transport error in quic start: " & exc.msg, parent: exc
)
self.running = true
method stop*(transport: QuicTransport) {.async: (raises: []).} =
@@ -315,7 +323,7 @@ method dial*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(QuicTransportDialError, e.msg, e)
raise newException(QuicTransportDialError, "error in quic dial:" & e.msg, e)
method upgrade*(
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]

View File

@@ -133,7 +133,9 @@ method start*(
try:
createStreamServer(ta, flags = self.flags)
except common.TransportError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "transport error in TcpTransport start:" & exc.msg, parent: exc
)
self.servers &= server
@@ -250,9 +252,13 @@ method accept*(
except TransportUseClosedError as exc:
raise newTransportClosedError(exc)
except TransportOsError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "TransportOs error in accept:" & exc.msg, parent: exc
)
except common.TransportError as exc: # Needed for chronos 4.0.0 support
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "TransportError in accept: " & exc.msg, parent: exc
)
except CancelledError as exc:
cancelAcceptFuts()
raise exc
@@ -302,7 +308,8 @@ method dial*(
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise
(ref TcpTransportError)(msg: "TcpTransport dial error: " & exc.msg, parent: exc)
# If `stop` is called after `connect` but before `await` returns, we might
# end up with a race condition where `stop` returns but not all connections
@@ -318,7 +325,7 @@ method dial*(
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
except TransportOsError as exc:
safeCloseWait(transp)
raise (ref TcpTransportError)(msg: exc.msg)
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)

View File

@@ -118,8 +118,8 @@ proc makeASN1Time(time: Time): string {.inline.} =
try:
let f = initTimeFormat("yyyyMMddhhmmss")
format(time.utc(), f)
except TimeFormatParseError:
raiseAssert "time format is const and checked with test"
except TimeFormatParseError as e:
raiseAssert "time format is const and checked with test: " & e.msg
return str & "Z"
@@ -278,7 +278,7 @@ proc parse*(
validTo = parseCertTime($certParsed.valid_to)
except TimeParseError as e:
raise newException(
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
CertificateParsingError, "Failed to parse certificate validity time: " & $e.msg, e
)
P2pCertificate(

View File

@@ -243,7 +243,9 @@ method dial*(
raise e
except CatchableError as e:
safeCloseWait(transp)
raise newException(transport.TransportDialError, e.msg, e)
raise newException(
transport.TransportDialError, "error in dial TorTransport: " & e.msg, e
)
method start*(
self: TorTransport, addrs: seq[MultiAddress]

View File

@@ -160,7 +160,9 @@ method start*(
else:
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
except CatchableError as exc:
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
raise (ref WsTransportError)(
msg: "error in WsTransport start: " & exc.msg, parent: exc
)
self.httpservers &= httpserver
@@ -309,7 +311,9 @@ method accept*(
debug "OS Error", description = exc.msg
except CatchableError as exc:
info "Unexpected error accepting connection", description = exc.msg
raise newException(transport.TransportError, exc.msg, exc)
raise newException(
transport.TransportError, "Error in WsTransport accept: " & exc.msg, exc
)
method dial*(
self: WsTransport,
@@ -338,7 +342,9 @@ method dial*(
raise e
except CatchableError as e:
safeClose(transp)
raise newException(transport.TransportDialError, e.msg, e)
raise newException(
transport.TransportDialError, "error in WsTransport dial: " & e.msg, e
)
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(t).handles(address):

View File

@@ -54,8 +54,9 @@ when defined(libp2p_agents_metrics):
proc safeToLowerAscii*(s: string): Result[string, cstring] =
try:
ok(s.toLowerAscii())
except CatchableError:
err("toLowerAscii failed")
except CatchableError as e:
let errMsg = "toLowerAscii failed: " & e.msg
err(errMsg.cstring)
const
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"

View File

@@ -27,9 +27,9 @@ proc anyCompleted*[T](
if raceFut.completed:
return raceFut
requests.del(requests.find(raceFut))
except ValueError:
except ValueError as e:
raise newException(
AllFuturesFailedError, "None of the futures completed successfully"
AllFuturesFailedError, "None of the futures completed successfully: " & e.msg, e
)
except CancelledError as exc:
raise exc

View File

@@ -108,7 +108,9 @@ proc createStreamServer*[T](
): StreamServer {.raises: [LPError, MaInvalidAddress].} =
## Create new TCP stream server which bounds to ``ma`` address.
if not (RTRANSPMA.match(ma)):
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
raise newException(
MaInvalidAddress, "Incorrect or unsupported address in createStreamServer"
)
try:
return createStreamServer(
@@ -123,7 +125,7 @@ proc createStreamServer*[T](
init,
)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(LPError, "failed createStreamServer: " & exc.msg, exc)
proc createStreamServer*[T](
ma: MultiAddress,
@@ -146,7 +148,7 @@ proc createStreamServer*[T](
initTAddress(ma).tryGet(), flags, udata, sock, backlog, bufferSize, child, init
)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
@@ -178,7 +180,9 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPErro
try:
createAsyncSocket(address.getDomain(), socktype, protocol)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.

View File

@@ -9,91 +9,115 @@
set -e
CACHE_DIR="$1" # optional parameter pointing to a CI cache dir.
LIBP2P_COMMIT="124530a3" # Tags maye be used as well
[[ -n "$2" ]] && LIBP2P_COMMIT="$2" # allow overriding it on the command line
force=false
verbose=false
CACHE_DIR=""
LIBP2P_COMMIT="124530a3"
while [[ "$#" -gt 0 ]]; do
case "$1" in
-f|--force) force=true ;;
-v|--verbose) verbose=true ;;
-h|--help)
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
exit 0
;;
*)
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
if [[ -z "$CACHE_DIR" ]]; then
CACHE_DIR="$1"
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
LIBP2P_COMMIT="$1"
else
echo "Unknown argument: $1"
exit 1
fi
;;
esac
shift
done
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
if [[ ! -e "$SUBREPO_DIR" ]]; then
# we're probably in nim-libp2p's CI
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q $LIBP2P_COMMIT
cd ..
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q "$LIBP2P_COMMIT"
cd ..
fi
## env vars
# verbosity level
[[ -z "$V" ]] && V=0
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
TARGET_DIR="$(go env GOPATH)/bin"
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
target_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
REBUILD=0
NO_REBUILD=1
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
}
build_target() {
echo -e "$BUILD_MSG"
[[ "$V" == "0" ]] && exec &>/dev/null
echo -e "$BUILD_MSG"
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
echo "retry #${CURR}"
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
if $verbose; then
echo "retry #${CURR}"
fi
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
popd
popd
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully."
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully: $TARGET_BINARY"
}
if target_needs_rebuilding; then
build_target
if $force || target_needs_rebuilding; then
build_target
else
echo "No rebuild needed."
echo "No rebuild needed."
fi

View File

@@ -357,10 +357,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
.withAddress(wsAddress)
.withRng(crypto.newRng())
.withMplex()
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withWsTransport()
.withNoise()
.build()

View File

@@ -49,8 +49,10 @@ template checkTrackers*() =
{.push warning[BareExcept]: off.}
try:
GC_fullCollect()
except CatchableError:
discard
except Defect as exc:
raise exc # Reraise to maintain call stack
except Exception:
raiseAssert "Unexpected exception during GC collection"
when defined(nimHasWarnBareExcept):
{.pop.}
@@ -92,7 +94,9 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
testBufferStream.initStream()
testBufferStream
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
macro checkUntilTimeoutCustom*(
timeout: Duration, sleepInterval: Duration, code: untyped
): untyped =
## Periodically checks a given condition until it is true or a timeout occurs.
##
## `code`: untyped - A condition expression that should eventually evaluate to true.
@@ -101,17 +105,17 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
## Examples:
## ```nim
## # Example 1:
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
## asyncTest "checkUntilTimeoutCustom should pass if the condition is true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(2.seconds):
## checkUntilTimeoutCustom(2.seconds):
## a == b
##
## # Example 2: Multiple conditions
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
## asyncTest "checkUntilTimeoutCustom should pass if the conditions are true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(5.seconds)::
## checkUntilTimeoutCustom(5.seconds)::
## a == b
## a == 2
## b == 1
@@ -145,12 +149,12 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
if `combinedBoolExpr`:
return
else:
await sleepAsync(100.millis)
await sleepAsync(`sleepInterval`)
await checkExpiringInternal()
macro checkUntilTimeout*(code: untyped): untyped =
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
## Same as `checkUntilTimeoutCustom` but with a default timeout of 2s with 50ms interval.
##
## Examples:
## ```nim
@@ -171,7 +175,7 @@ macro checkUntilTimeout*(code: untyped): untyped =
## b == 1
## ```
result = quote:
checkUntilCustomTimeout(10.seconds, `code`)
checkUntilTimeoutCustom(2.seconds, 50.milliseconds, `code`)
proc unorderedCompare*[T](a, b: seq[T]): bool =
if a == b:

View File

@@ -0,0 +1,142 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import unittest2
import ../../libp2p/protobuf/minprotobuf
import ../../libp2p/protocols/kademlia/protobuf
import ../../libp2p/multiaddress
import options
import results
suite "kademlia protobuffers":
const invalidType = uint32(999)
proc valFromResultOption[T](res: ProtoResult[Option[T]]): T =
assert res.isOk()
assert res.value().isSome()
return res.value().unsafeGet()
test "record encode/decode":
let rec = Record(
key: some(@[1'u8, 2, 3]),
value: some(@[4'u8, 5, 6]),
timeReceived: some("2025-05-12T12:00:00Z"),
)
let encoded = rec.encode()
let decoded = Record.decode(encoded).valFromResultOption
check:
decoded.key.get() == rec.key.get()
decoded.value.get() == rec.value.get()
decoded.timeReceived.get() == rec.timeReceived.get()
test "peer encode/decode":
let maddr = MultiAddress.init("/ip4/127.0.0.1/tcp/9000").tryGet()
let peer =
Peer(id: @[1'u8, 2, 3], addrs: @[maddr], connection: ConnectionType.connected)
let encoded = peer.encode()
var decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
check:
decoded == peer
test "message encode/decode roundtrip":
let maddr = MultiAddress.init("/ip4/10.0.0.1/tcp/4001").tryGet()
let peer = Peer(id: @[9'u8], addrs: @[maddr], connection: canConnect)
let r = Record(key: some(@[1'u8]), value: some(@[2'u8]), timeReceived: some("t"))
let msg = Message(
msgType: MessageType.findNode,
key: some(@[7'u8]),
record: some(r),
closerPeers: @[peer],
providerPeers: @[peer],
)
let encoded = msg.encode()
let decoded = Message.decode(encoded.buffer).valFromResultOption
check:
decoded == msg
test "decode record with missing fields":
var pb = initProtoBuffer()
# no fields written
let rec = Record.decode(pb).valFromResultOption
check:
rec.key.isNone()
rec.value.isNone()
rec.timeReceived.isNone()
test "decode peer with missing id (invalid)":
var pb = initProtoBuffer()
check:
Peer.decode(pb).isErr()
test "decode peer with invalid connection type":
var pb = initProtoBuffer()
pb.write(1, @[1'u8, 2, 3]) # id field
pb.write(3, invalidType) # bogus connection type
check:
Peer.decode(pb).isErr()
test "decode message with invalid msgType":
var pb = initProtoBuffer()
pb.write(1, invalidType) # invalid MessageType
check:
Message.decode(pb.buffer).isErr()
test "decode message with invalid peer in closerPeers":
let badPeerBuf = @[0'u8, 1, 2] # junk
var pb = initProtoBuffer()
pb.write(8, badPeerBuf) # closerPeers field
check:
Message.decode(pb.buffer).isErr()
test "decode message with invalid embedded record":
# encode junk data into field 3 (record)
var pb = initProtoBuffer()
pb.write(1, uint32(MessageType.putValue)) # valid msgType
pb.write(3, @[0x00'u8, 0xFF, 0xAB]) # broken protobuf for record
check:
Message.decode(pb.buffer).isErr()
test "decode message with empty embedded record":
var recordPb = initProtoBuffer() # no fields
var pb = initProtoBuffer()
pb.write(1, uint32(MessageType.getValue))
pb.write(3, recordPb.buffer)
let decoded = Message.decode(pb.buffer).valFromResultOption
check:
decoded.record.isSome()
decoded.record.get().key.isNone()
test "peer with empty addr list and no connection":
let peer = Peer(id: @[0x42'u8], addrs: @[], connection: ConnectionType.notConnected)
let encoded = peer.encode()
let decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
check:
decoded == peer
test "message with empty closer/provider peers":
let msg = Message(
msgType: MessageType.ping,
key: none[seq[byte]](),
record: none[Record](),
closerPeers: @[],
providerPeers: @[],
)
let encoded = msg.encode()
let decoded = Message.decode(encoded.buffer).valFromResultOption
check:
decoded == msg
test "peer with addr but missing id":
var pb = initProtoBuffer()
let maddr = MultiAddress.init("/ip4/1.2.3.4/tcp/1234").tryGet()
pb.write(2, maddr.data.buffer)
check:
Peer.decode(pb).isErr()

View File

@@ -310,5 +310,5 @@ suite "FloodSub":
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkUntilTimeout:
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
messageReceived == 1

View File

@@ -0,0 +1,543 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers
suite "GossipSub Control Messages":
teardown:
checkTrackers()
asyncTest "handleIHave - peers with no budget should not request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
gossipSub.mcache.msgs.len == 1
asyncTest "handleIHave - peers with budget should request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
check:
peer.iHaveBudget > 0
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "handleIWant - peers with budget should request messages":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let messages = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
messages.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "GRAFT messages correctly add peers to mesh":
# Given 2 nodes
let
topic = "foobar"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
nodes = generateNodes(
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
await n0.stop()
await n1.stop()
# Second part of the hack
# Set values so peers can be GRAFTed
let newDValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
n0.parameters.applyDValues(newDValues)
n1.parameters.applyDValues(newDValues)
# When a GRAFT message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == 1)
# Then the peers are GRAFTed
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received GRAFT for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
nodes[0].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a GRAFT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not GRAFTed
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "PRUNE messages correctly removes peers from mesh":
# Given 2 nodes
let
topic = "foo"
backoff = 1
pruneMessage = ControlMessage(
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
)
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When another PRUNE message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received PRUNE for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
pruneMessage =
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
n0.subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not PRUNEd
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "IHAVE messages correctly advertise message ID to peers":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IHAVE observer
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
n1.addOnRecvObserver(checkForIHaves)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IHAVE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIHaves[0] == ControlIHave(topicID: topic, messageIDs: @[messageID])
asyncTest "IWANT messages correctly request messages by their IDs":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n1.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IWANT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n0.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both nodes subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When an IHAVE message is sent from node0
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IDONTWANT":
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
let
topic = "foobar"
nodes = generateNodes(3, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
let (bFinished, handlerB) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handlerB)
nodes[2].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
check:
nodes[2].mesh.peers(topic) == 1
# When we pre-emptively send a dontwant from C to B,
nodes[2].broadcast(
nodes[2].mesh[topic],
RPCMsg(
control: some(
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
)
),
isHighPriority = true,
)
# Then B doesn't relay the message to C.
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
discard await bFinished
# Then B sends IDONTWANT to C, but not A
checkUntilTimeout:
toSeq(nodes[2].mesh.getOrDefault(topic)).anyIt(it.iDontWants.anyIt(it.len == 1))
check:
toSeq(nodes[0].mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
asyncTest "IDONTWANT is broadcasted on publish":
# 2 nodes: A <=> B
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, sendIDontWantOnPublish = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
# Then IDONTWANT is sent to B on publish
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
asyncTest "IDONTWANT is sent only for 1.2":
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
let
topic = "foobar"
nodeA = generateNodes(1, gossip = true).toGossipSub()[0]
nodeB = generateNodes(1, gossip = true).toGossipSub()[0]
nodeC = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_11)
.toGossipSub()[0]
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
await connectNodes(nodeA, nodeB)
await connectNodes(nodeB, nodeC)
let (bFinished, handlerB) = createCompleteHandler()
nodeA.subscribe(topic, voidTopicHandler)
nodeB.subscribe(topic, handlerB)
nodeC.subscribe(topic, voidTopicHandler)
await waitSubGraph(@[nodeA, nodeB, nodeC], topic)
check:
nodeC.mesh.peers(topic) == 1
# When A sends a message to the topic
tryPublish await nodeA.publish(topic, newSeq[byte](10000)), 1
discard await bFinished
# Then B doesn't send IDONTWANT to both A and C (because C.gossipSubVersion == GossipSubCodec_11)
await waitForHeartbeat()
check:
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
asyncTest "Max IDONTWANT messages per heartbeat per peer":
# Given GossipSub node with 1 peer
let
topic = "foobar"
totalPeers = 1
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
defer:
await teardownGossipSub(gossipSub, conns)
let peer = peers[0]
# And sequence of iDontWants with more messages than max number (1200)
proc generateMessageIds(count: int): seq[MessageId] =
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
let iDontWants =
@[
ControlIWant(messageIDs: generateMessageIds(600)),
ControlIWant(messageIDs: generateMessageIds(600)),
]
# When node handles iDontWants
gossipSub.handleIDontWant(peer, iDontWants)
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
check:
peer.iDontWants[0].len == IDontWantMaxCount

View File

@@ -0,0 +1,97 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronos
import stew/byteutils
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../../libp2p/stream/connection
import ../helpers
type DummyConnection* = ref object of Connection
method write*(
self: DummyConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
let fut = newFuture[void]()
fut.complete()
return fut
proc new*(T: typedesc[DummyConnection]): DummyConnection =
let instance = T()
instance
suite "GossipSub Custom Connection Support":
teardown:
checkTrackers()
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
let
topic = "test"
handler = proc(topic: string, data: seq[byte]) {.async.} =
discard
nodes = generateNodes(2, gossip = true)
var
customConnCreated = false
peerSelectionCalled = false
GossipSub(nodes[0]).customConnCallbacks = some(
CustomConnectionCallbacks(
customConnCreationCB: proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection =
customConnCreated = true
return DummyConnection.new(),
customPeerSelectionCB: proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] =
peerSelectionCalled = true
return allPeers,
)
)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, handler)
await waitSub(nodes[0], nodes[1], topic)
tryPublish await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true), 1
check:
peerSelectionCalled
customConnCreated
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
let
topic = "test"
handler = proc(topic: string, data: seq[byte]) {.async.} =
discard
nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, handler)
await waitSub(nodes[0], nodes[1], topic)
var raised = false
try:
discard await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true)
except Defect:
raised = true
check raised

View File

@@ -135,108 +135,35 @@ suite "GossipSub Gossip Protocol":
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == 0
asyncTest "handleIHave/Iwant tests":
let topic = "foobar"
var (gossipSub, conns, peers) =
setupGossipSubWithPeers(30, topic, populateMesh = true)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
# Peers with no budget should not request messages
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
let id = @[0'u8, 1, 2, 3]
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let genmsg = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
genmsg.len == 1
check gossipSub.mcache.msgs.len == 1
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
let
numberOfNodes = 5
topic = "foobar"
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
var messages = addIHaveObservers(nodes)
# And are interconnected
await connectNodesStar(nodes)
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 4), PeerTableType.Gossipsub
)
checkUntilTimeout:
nodes.allIt(it.gossipsub.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
await waitForHeartbeat()
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# At least one of the nodes should have received an iHave message
# The check is made this way because the mesh structure changes from run to run
let receivedIHaves = receivedIHavesRef[]
check:
anyIt(receivedIHaves, it > 0)
checkUntilTimeout:
messages[].mapIt(it[].len).anyIt(it > 0)
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
let
@@ -255,8 +182,7 @@ suite "GossipSub Gossip Protocol":
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
@@ -267,11 +193,11 @@ suite "GossipSub Gossip Protocol":
await waitForHeartbeat()
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat()
# None of the nodes should have received an iHave message
let receivedIHaves = receivedIHavesRef[]
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len == 0
@@ -283,14 +209,17 @@ suite "GossipSub Gossip Protocol":
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
)
nodes = generateNodes(
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
)
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.5),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
@@ -298,15 +227,17 @@ suite "GossipSub Gossip Protocol":
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 8 of the nodes should have received an iHave message
# That's because the gossip factor is 0.5 over 16 available nodes
let receivedIHaves = receivedIHavesRef[]
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= 8
@@ -318,17 +249,17 @@ suite "GossipSub Gossip Protocol":
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
@@ -336,15 +267,17 @@ suite "GossipSub Gossip Protocol":
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 6 of the nodes should have received an iHave message
# That's because the dLazy is 6
let receivedIHaves = receivedIHavesRef[]
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
@@ -352,13 +285,12 @@ suite "GossipSub Gossip Protocol":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iDontWant messages
var receivedIDontWantsRef = new seq[int]
addIDontWantObservers(nodes, receivedIDontWantsRef)
var messages = addIDontWantObservers(nodes)
# And are connected in a line
await connectNodes(nodes[0], nodes[1])
@@ -366,19 +298,21 @@ suite "GossipSub Gossip Protocol":
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == 1
nodes[1].gossipsub.getOrDefault(topic).len == 2
nodes[2].gossipsub.getOrDefault(topic).len == 1
# When node 0 sends a large message
let largeMsg = newSeq[byte](1000)
check (await nodes[0].publish(topic, largeMsg)) == 1
await waitForHeartbeat()
tryPublish await nodes[0].publish(topic, largeMsg), 1
# Only node 2 should have received the iDontWant message
let receivedIDontWants = receivedIDontWantsRef[]
check:
receivedIDontWants[0] == 0
receivedIDontWants[1] == 0
receivedIDontWants[2] == 1
checkUntilTimeout:
messages[].mapIt(it[].len)[2] == 1
messages[].mapIt(it[].len)[1] == 0
messages[].mapIt(it[].len)[0] == 0
asyncTest "e2e - GossipSub peer exchange":
# A, B & C are subscribed to something
@@ -386,57 +320,45 @@ suite "GossipSub Gossip Protocol":
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let nodes =
generateNodes(2, gossip = true, enablePX = true) &
generateNodes(1, gossip = true, sendSignedPeerRecord = true)
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, enablePX = true).toGossipSub() &
generateNodes(1, gossip = true, sendSignedPeerRecord = true).toGossipSub()
startNodesAndDeferStop(nodes)
var
gossip0 = GossipSub(nodes[0])
gossip1 = GossipSub(nodes[1])
gossip2 = GossipSub(nodes[2])
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
nodes[2].subscribe("foobar", handler)
for x in 0 ..< 3:
for y in 0 ..< 3:
if x != y:
await waitSub(nodes[x], nodes[y], "foobar")
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitSubAllNodes(nodes, topic)
# Setup record handlers for all nodes
var
passed0: Future[void] = newFuture[void]()
passed2: Future[void] = newFuture[void]()
gossip0.routingRecordsHandler.add(
nodes[0].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == "foobar"
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed0.complete()
)
gossip1.routingRecordsHandler.add(
nodes[1].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
raiseAssert "should not get here"
)
gossip2.routingRecordsHandler.add(
nodes[2].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == "foobar"
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed2.complete()
)
# Unsubscribe from the topic
nodes[1].unsubscribe("foobar", handler)
# Unsubscribe from the topic
nodes[1].unsubscribe(topic, voidTopicHandler)
# Then verify what nodes receive the PX
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
@@ -444,308 +366,24 @@ suite "GossipSub Gossip Protocol":
results[0].isCompleted()
results[1].isCompleted()
asyncTest "e2e - iDontWant":
# 3 nodes: A <=> B <=> C
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
# and check that B doesn't relay the message to C.
# We also check that B sends IDONTWANT to C, but not A
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let nodes = generateNodes(3, gossip = true, msgIdProvider = dumbMsgIdProvider)
startNodesAndDeferStop(nodes)
await nodes[0].switch.connect(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await nodes[1].switch.connect(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} =
doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
nodes[2].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
var gossip3: GossipSub = GossipSub(nodes[2])
check:
gossip3.mesh.peers("foobar") == 1
gossip3.broadcast(
gossip3.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
)
),
isHighPriority = true,
)
checkUntilTimeout:
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
await bFinished
checkUntilTimeout:
toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 1)
check:
toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
asyncTest "e2e - iDontWant is broadcasted on publish":
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let nodes = generateNodes(
2, gossip = true, msgIdProvider = dumbMsgIdProvider, sendIDontWantOnPublish = true
)
startNodesAndDeferStop(nodes)
await nodes[0].switch.connect(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
proc handlerA(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip2: GossipSub = GossipSub(nodes[1])
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
checkUntilTimeout:
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
asyncTest "e2e - iDontWant is sent only for 1.2":
# 3 nodes: A <=> B <=> C
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
# and check that B doesn't relay the message to C.
# We also check that B sends IDONTWANT to C, but not A
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let
nodeA = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
nodeB = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
nodeC = generateNodes(
1,
gossip = true,
msgIdProvider = dumbMsgIdProvider,
gossipSubVersion = GossipSubCodec_11,
)[0]
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
await nodeA.switch.connect(
nodeB.switch.peerInfo.peerId, nodeB.switch.peerInfo.addrs
)
await nodeB.switch.connect(
nodeC.switch.peerInfo.peerId, nodeC.switch.peerInfo.addrs
)
let bFinished = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
bFinished.complete()
nodeA.subscribe("foobar", handler)
nodeB.subscribe("foobar", handlerB)
nodeC.subscribe("foobar", handler)
await waitSubGraph(@[nodeA, nodeB, nodeC], "foobar")
var gossipA: GossipSub = GossipSub(nodeA)
var gossipB: GossipSub = GossipSub(nodeB)
var gossipC: GossipSub = GossipSub(nodeC)
check:
gossipC.mesh.peers("foobar") == 1
tryPublish await nodeA.publish("foobar", newSeq[byte](10000)), 1
await bFinished
# "check" alone isn't suitable for testing that a condition is true after some time has passed. Below we verify that
# peers A and C haven't received an IDONTWANT message from B, but we need wait some time for potential in flight messages to arrive.
await waitForHeartbeat()
check:
toSeq(gossipC.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
toSeq(gossipA.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
asyncTest "Peer must send right gosspipsub version":
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let node0 = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
let node1 = generateNodes(
1,
gossip = true,
msgIdProvider = dumbMsgIdProvider,
gossipSubVersion = GossipSubCodec_10,
)[0]
let
topic = "foobar"
node0 = generateNodes(1, gossip = true)[0]
node1 = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_10)[0]
startNodesAndDeferStop(@[node0, node1])
await node0.switch.connect(
node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs
)
await connectNodes(node0, node1)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
node0.subscribe("foobar", handler)
node1.subscribe("foobar", handler)
await waitSubGraph(@[node0, node1], "foobar")
node0.subscribe(topic, voidTopicHandler)
node1.subscribe(topic, voidTopicHandler)
await waitSubGraph(@[node0, node1], topic)
var gossip0: GossipSub = GossipSub(node0)
var gossip1: GossipSub = GossipSub(node1)
checkUntilTimeout:
gossip0.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
gossip0.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
checkUntilTimeout:
gossip1.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
asyncTest "IHAVE messages correctly advertise message ID to peers":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IHAVE observer
var receivedIHave = newFuture[(string, seq[MessageId])]()
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
for msg in msgs.control.get.ihave:
receivedIHave.complete((msg.topicID, msg.messageIDs))
n1.addObserver(PubSubObserver(onRecv: checkForIhaves))
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IHAVE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
let r = await receivedIHave.waitForState(HEARTBEAT_TIMEOUT)
check:
r.isCompleted((topic, @[messageID]))
asyncTest "IWANT messages correctly request messages by their IDs":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var receivedIWant = newFuture[seq[MessageId]]()
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
for msg in msgs.control.get.iwant:
receivedIWant.complete(msg.messageIDs)
n1.addObserver(PubSubObserver(onRecv: checkForIwants))
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IWANT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
let r = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
check:
r.isCompleted(@[messageID])
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var receivedIWant = newFuture[seq[MessageId]]()
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
for msg in msgs.control.get.iwant:
receivedIWant.complete(msg.messageIDs)
n0.addObserver(PubSubObserver(onRecv: checkForIwants))
# And the nodes are connected
await connectNodesStar(nodes)
# And both nodes subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When an IHAVE message is sent from node0
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
let iWantResult = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
check:
iWantResult.isCompleted(@[messageID])
gossip1.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10

View File

@@ -0,0 +1,348 @@
{.used.}
import std/[sequtils]
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers
suite "GossipSub Heartbeat":
teardown:
checkTrackers()
asyncTest "Mesh is rebalanced during heartbeat - pruning peers":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# When DValues of Node0 are updated to lower than defaults
const
newDLow = 2
newDHigh = 4
newDValues = some(
DValues(
dLow: some(newDLow),
dHigh: some(newDHigh),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
)
node0.parameters.applyDValues(newDValues)
# Then mesh of Node0 is rebalanced and peers are pruned to adapt to new values
checkUntilTimeout:
node0.mesh[topic].len >= newDLow and node0.mesh[topic].len <= newDHigh
asyncTest "Mesh is rebalanced during heartbeat - grafting new peers":
const
numberOfNodes = 10
topic = "foobar"
dLow = 3
dHigh = 4
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(dLow: some(dLow), dHigh: some(dHigh), d: some(3), dOut: some(1))
),
pruneBackoff = 20.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len >= dLow and
node0.mesh.getOrDefault(topic).len <= dHigh
# When peers of Node0 mesh are disconnected
let peersToDisconnect = node0.mesh[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh[topic].len >= dLow and node0.mesh[topic].len <= dHigh
node0.mesh[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "Mesh is rebalanced during heartbeat - opportunistic grafting":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(
dLow: some(3),
dHigh: some(4),
d: some(3),
dOut: some(1),
dLazy: some(3),
dScore: some(2),
)
),
pruneBackoff = 20.milliseconds,
opportunisticGraftThreshold = 600,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Keep track of initial mesh of Node0
let startingMesh = node0.mesh[topic].toSeq()
# When scores are assigned to Peers of Node0
var expectedGrafts: seq[PubSubPeer] = @[]
var score = 100.0
for peer in node0.gossipsub[topic]:
if peer in node0.mesh[topic]:
# Assign scores in starting Mesh
peer.score = score
score += 100.0
else:
# Assign scores higher than median to Peers not in starting Mesh and expect them to be grafted
peer.score = 800.0
expectedGrafts &= peer
# Then during heartbeat Peers with lower than median scores are pruned and max 2 Peers are grafted
await waitForHeartbeat(heartbeatInterval)
let actualGrafts = node0.mesh[topic].toSeq().filterIt(it notin startingMesh)
check:
actualGrafts.len == MaxOpportunisticGraftPeers
actualGrafts.allIt(it in expectedGrafts)
asyncTest "Fanout maintenance during heartbeat - expired peers are dropped":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let nodes = generateNodes(
numberOfNodes,
gossip = true,
fanoutTTL = 60.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
let node0 = nodes[0]
checkUntilTimeout:
node0.gossipsub.hasKey(topic)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 3
# Then Node0 fanout peers are populated
checkUntilTimeout:
node0.fanout.hasKey(topic)
node0.fanout[topic].len > 0
# And after heartbeat Node0 fanout peers are dropped (because fanoutTTL < heartbeatInterval)
checkUntilTimeout:
not node0.fanout.hasKey(topic)
asyncTest "Fanout maintenance during heartbeat - fanout peers are replenished":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 1
# Then Node0 fanout peers are populated
let maxFanoutPeers = node0.parameters.d
checkUntilTimeout:
node0.fanout[topic].len == maxFanoutPeers
# When all peers but first one of Node0 fanout are disconnected
let peersToDisconnect = node0.fanout[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
# Then Node0 fanout peers are replenished during heartbeat
# expecting 10[numberOfNodes] - 1[Node0] - (6[maxFanoutPeers] - 1[first peer not disconnected]) = 4
let expectedLen = numberOfNodes - 1 - (maxFanoutPeers - 1)
checkUntilTimeout:
node0.fanout[topic].len == expectedLen
node0.fanout[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "iDontWants history - last element is pruned during heartbeat":
const
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
let nodes = generateNodes(
2,
gossip = true,
sendIDontWantOnPublish = true,
historyLength = historyLength,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Get Node0 as Peer of Node1
let peer = nodes[1].mesh[topic].toSeq()[0]
# Wait for history to populate
checkUntilTimeout:
peer.iDontWants.len == historyLength
# When Node0 sends 5 messages to the topic
const msgCount = 5
for i in 0 ..< msgCount:
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
# Then Node1 receives 5 iDontWant messages from Node0
checkUntilTimeoutCustom(3.seconds, 50.milliseconds):
peer.iDontWants[0].len == msgCount
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peer.iDontWants[i].len == 0
# Then iDontWant messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = msgCount
# Until they reach last element and are pruned
checkUntilTimeout:
peer.iDontWants.mapIt(it.len) == expectedHistory
asyncTest "sentIHaves history - last element is pruned during heartbeat":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
gossipThreshold = -100.0
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
heartbeatInterval = heartbeatInterval,
gossipThreshold = gossipThreshold,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Find Peer outside of mesh to which Node 0 will send IHave
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
# Wait for history to populate
checkUntilTimeout:
peerOutsideMesh.sentIHaves.len == historyLength
# When a nodeOutsideMesh receives an IHave message, it responds with an IWant to request the full message from Node0
# Setting `peer.score < gossipThreshold` to prevent the nodeOutsideMesh from sending the IWant
# As when IWant is processed, messages are removed from sentIHaves history
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
for p in nodeOutsideMesh.gossipsub[topic].toSeq():
p.score = 2 * gossipThreshold
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# When next heartbeat occurs
# Then IHave is sent and sentIHaves is populated
checkUntilTimeout:
peerOutsideMesh.sentIHaves[0].len == 1
# Need to clear mCache as node would keep populating sentIHaves until cache is shifted enough times
nodes[0].clearMCache()
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peerOutsideMesh.sentIHaves[i].len == 0
# Then sentIHaves messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = 1
# Until they reach last element and are pruned
checkUntilTimeout:
peerOutsideMesh.sentIHaves.mapIt(it.len) == expectedHistory

View File

@@ -9,21 +9,16 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import chronicles
import std/[sequtils]
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers, ../utils/[futures]
import ../helpers
suite "GossipSub Mesh Management":
teardown:
checkTrackers()
asyncTest "topic params":
let params = TopicParams.init()
params.validateParameters().tryGet()
asyncTest "subscribe/unsubscribeAll":
let topic = "foobar"
let (gossipSub, conns, peers) =
@@ -177,36 +172,57 @@ suite "GossipSub Mesh Management":
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
asyncTest "dont prune peers if mesh len is less than d_high":
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
# Given GossipSub node starting with 13 peers in mesh
let
topic = "foobar"
totalPeers = 13
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
totalPeers, topic, populateGossipsub = true, populateMesh = true
)
defer:
await teardownGossipSub(gossipSub, conns)
# And mesh is larger than dHigh
gossipSub.parameters.dLow = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dHigh = 8
gossipSub.parameters.dOut = 3
gossipSub.parameters.dScore = 13
check gossipSub.mesh[topic].len == totalPeers
# When mesh is rebalanced
gossipSub.rebalanceMesh(topic)
# Then prunning is not triggered when mesh is not larger than dScore
check gossipSub.mesh[topic].len == totalPeers
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let expectedNumberOfPeers = numberOfNodes - 1
await waitForPeersInTable(
nodes,
topic,
newSeqWith(numberOfNodes, expectedNumberOfPeers),
PeerTableType.Gossipsub,
)
for i in 0 ..< numberOfNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len == expectedNumberOfPeers
gossip.fanout.len == 0
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len == expectedNumberOfPeers
node.fanout.len == 0
asyncTest "prune peers if mesh len is higher than d_high":
asyncTest "Nodes graft peers according to DValues - numberOfNodes > dHigh":
let
numberOfNodes = 15
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
@@ -218,100 +234,13 @@ suite "GossipSub Mesh Management":
d = 6
dLow = 4
await waitForPeersInTable(
nodes,
topic,
newSeqWith(numberOfNodes, expectedNumberOfPeers),
PeerTableType.Gossipsub,
)
for i in 0 ..< numberOfNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
gossip.fanout.len == 0
asyncTest "GossipSub unsub - resub faster than backoff":
# For this test to work we'd require a way to disable fanout.
# There's not a way to toggle it, and mocking it didn't work as there's not a reliable mock available.
skip()
return
# Instantiate handlers and validators
var handlerFut0 = newFuture[bool]()
proc handler0(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut0.complete(true)
var handlerFut1 = newFuture[bool]()
proc handler1(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut1.complete(true)
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
check topic == "foobar"
validatorFut.complete(true)
result = ValidationResult.Accept
# Setup nodes and start switches
let
nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 5.seconds)
topic = "foobar"
# Connect nodes
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# Subscribe both nodes to the topic and node1 (receiver) to the validator
nodes[0].subscribe(topic, handler0)
nodes[1].subscribe(topic, handler1)
nodes[1].addValidator("foobar", validator)
await sleepAsync(DURATION_TIMEOUT)
# Wait for both nodes to verify others' subscription
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], topic)
subs &= waitSub(nodes[0], nodes[1], topic)
await allFuturesThrowing(subs)
# When unsubscribing and resubscribing in a short time frame, the backoff period should be triggered
nodes[1].unsubscribe(topic, handler1)
await sleepAsync(DURATION_TIMEOUT)
nodes[1].subscribe(topic, handler1)
await sleepAsync(DURATION_TIMEOUT)
# Backoff is set to 5 seconds, and the amount of sleeping time since the unsubsribe until now is 3-4s~
# Meaning, the subscription shouldn't have been processed yet because it's still in backoff period
# When publishing under this condition
discard await nodes[0].publish("foobar", "Hello!".toBytes())
await sleepAsync(DURATION_TIMEOUT)
# Then the message should not be received:
check:
validatorFut.toState().isPending()
handlerFut1.toState().isPending()
handlerFut0.toState().isPending()
validatorFut.reset()
handlerFut0.reset()
handlerFut1.reset()
# If we wait backoff period to end, around 1-2s
await waitForMesh(nodes[0], nodes[1], topic, 3.seconds)
discard await nodes[0].publish("foobar", "Hello!".toBytes())
await sleepAsync(DURATION_TIMEOUT)
# Then the message should be received
check:
validatorFut.toState().isCompleted()
handlerFut1.toState().isCompleted()
handlerFut0.toState().isPending()
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len >= dLow and
node.mesh.getOrDefault(topic).len <= dHigh
node.fanout.len == 0
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async.} =
@@ -473,217 +402,126 @@ suite "GossipSub Mesh Management":
await waitForHeartbeat()
# Then all nodes should be subscribed to the topics initially
for node in nodes:
for topic in topics:
check node.topics.contains(topic)
check node.gossipsub[topic].len() == numberOfNodes - 1
check node.mesh[topic].len() == numberOfNodes - 1
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(it.topics.contains(topic))
nodes.allIt(it.gossipsub.getOrDefault(topic).len() == numberOfNodes - 1)
nodes.allIt(it.mesh.getOrDefault(topic).len() == numberOfNodes - 1)
# When they unsubscribe from all topics
for topic in topics:
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then topics should be removed from mesh and gossipsub
for node in nodes:
for topic in topics:
check topic notin node.topics
check topic notin node.mesh
check topic notin node.gossipsub
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(not it.topics.contains(topic))
nodes.allIt(topic notin it.gossipsub)
nodes.allIt(topic notin it.mesh)
asyncTest "GRAFT messages correctly add peers to mesh":
# Given 2 nodes
let
asyncTest "Unsubscribe backoff":
const
numberOfNodes = 3
topic = "foobar"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
unsubscribeBackoff = 1.seconds # 1s is the minimum
let nodes = generateNodes(
numberOfNodes, gossip = true, unsubscribeBackoff = unsubscribeBackoff
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
# When Node0 unsubscribes from the topic
nodes[0].unsubscribe(topic, voidTopicHandler)
# And subscribes back straight away
nodes[0].subscribe(topic, voidTopicHandler)
# Then its mesh is pruned and peers have applied unsubscribeBackoff
# Waiting more than one heartbeat (60ms) and less than unsubscribeBackoff (1s)
await sleepAsync(unsubscribeBackoff.div(2))
check:
not nodes[0].mesh.hasKey(topic)
# When unsubscribeBackoff period is done
await sleepAsync(unsubscribeBackoff)
# Then on the next heartbeat mesh is rebalanced and peers are regrafted
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
asyncTest "Prune backoff":
const
numberOfNodes = 9
topic = "foobar"
pruneBackoff = 1.seconds # 1s is the minimum
dValues = some(
DValues(
dLow: some(6),
dHigh: some(8),
d: some(6),
dLazy: some(6),
dScore: some(4),
dOut: some(2),
)
)
let
nodes = generateNodes(
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
numberOfNodes, gossip = true, dValues = dValues, pruneBackoff = pruneBackoff
)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
await n0.stop()
await n1.stop()
# Second part of the hack
# Set values so peers can be GRAFTed
let newDValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
n0.parameters.applyDValues(newDValues)
n1.parameters.applyDValues(newDValues)
# When a GRAFT message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 1), PeerTableType.Mesh
)
# Then the peers are GRAFTed
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received GRAFT for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
nodes[0].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a GRAFT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not GRAFTed
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "PRUNE messages correctly removes peers from mesh":
# Given 2 nodes
let
topic = "foo"
backoff = 1
pruneMessage = ControlMessage(
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
# When DValues of Node0 are updated to lower than initial dValues
const newDValues = some(
DValues(
dLow: some(2),
dHigh: some(4),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
)
node0.parameters.applyDValues(newDValues)
startNodesAndDeferStop(nodes)
# Then Node0 mesh is pruned to newDValues.dHigh length
# And pruned peers have applied pruneBackoff
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When DValues of Node0 are updated back to the initial dValues
node0.parameters.applyDValues(dValues)
# Waiting more than one heartbeat (60ms) and less than pruneBackoff (1s)
await sleepAsync(pruneBackoff.div(2))
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# When pruneBackoff period is done
await sleepAsync(pruneBackoff)
# Then the peer is PRUNEd
# Then on the next heartbeat mesh is rebalanced and peers are regrafted to the initial d value
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When another PRUNE message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received PRUNE for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
pruneMessage =
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
n0.subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not PRUNEd
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
node0.mesh.getOrDefault(topic).len == dValues.get.d.get

View File

@@ -0,0 +1,302 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
import ../../libp2p/protocols/pubsub/rpc/[messages, message]
import ../helpers
suite "GossipSub Message Cache":
teardown:
checkTrackers()
asyncTest "Received messages are added to the message cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
asyncTest "Message cache history shifts on heartbeat and is cleared on shift":
const
numberOfNodes = 2
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# When heartbeat happens, circular history shifts to the next position
# Waiting for 5(historyLength) heartbeats
await waitForHeartbeat(historyLength)
# Then history is cleared when the position with the message is reached again
# And message is removed
check:
nodes[1].mcache.window(topic).len == 0
not nodes[1].mcache.contains(messageId)
asyncTest "IHave propagation capped by history window":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to NodeOutsideMesh for received IHave messages
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
nodeOutsideMesh.addOnRecvObserver(checkForIHaves)
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# On each heartbeat, Node0 retrieves messages in its mcache and sends IHave to NodeOutsideMesh
# On heartbeat, Node0 mcache advances to the next position (rotating the message cache window)
# Node0 will gossip about messages from the last few positions, depending on the mcache window size (historyGossip)
# By waiting more than 'historyGossip' (2x3 = 6) heartbeats, we ensure Node0 does not send IHave messages for messages older than the window size
await waitForHeartbeat(2 * historyGossip)
# Then nodeInsideMesh receives 3 (historyGossip) IHave messages
check:
receivedIHaves[].len == historyGossip
asyncTest "Message is retrieved from cache when handling IWant and relayed to a peer outside the mesh":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to Node0 for received IWant messages
var (receivedIWantsNode0, checkForIWant) = createCheckForIWant()
nodes[0].addOnRecvObserver(checkForIWant)
# Find Peer outside of mesh to which Node 0 will relay received message
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
# Add observer to NodeOutsideMesh for received messages
var (receivedMessagesNodeOutsideMesh, checkForMessage) = createCheckForMessages()
nodeOutsideMesh.addOnRecvObserver(checkForMessage)
# When NodeInsideMesh publishes a message to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, "Hello!".toBytes()), 1
# Then Node0 receives the message from NodeInsideMesh and saves it in its cache
checkUntilTimeout:
nodes[0].mcache.window(topic).len == 1
let messageId = nodes[0].mcache.window(topic).toSeq()[0]
# When Node0 sends an IHave message to NodeOutsideMesh during a heartbeat
# Then NodeOutsideMesh responds with an IWant message to Node0
checkUntilTimeout:
receivedIWantsNode0[].anyIt(messageId in it.messageIDs)
# When Node0 handles the IWant message, it retrieves the message from its message cache using the MessageId
# Then Node0 relays the original message to NodeOutsideMesh
checkUntilTimeout:
messageId in
receivedMessagesNodeOutsideMesh[].mapIt(
nodeOutsideMesh.msgIdProvider(it).value()
)
asyncTest "Published and received messages are added to the seen cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message
# Get messageId from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# And both nodes save it in their seen cache
# Node0 when publish, Node1 when received
check:
nodes[0].hasSeen(nodes[0].salt(messageId))
nodes[1].hasSeen(nodes[1].salt(messageId))
asyncTest "Received messages are dropped if they are already in seen cache":
# 3 Nodes, Node 0 <==> Node 1 and Node 2 not connected and not subscribed yet
const
numberOfNodes = 3
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes two messages to the topic
tryPublish await nodes[0].publish(topic, "Hello".toBytes()), 1
tryPublish await nodes[0].publish(topic, "World".toBytes()), 1
# Then Node1 receives the messages
# Getting messageIds from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 2
let messageId1 = nodes[1].mcache.window(topic).toSeq()[0]
let messageId2 = nodes[1].mcache.window(topic).toSeq()[1]
# And Node0 doesn't receive messages
check:
nodes[2].mcache.window(topic).len == 0
# When Node2 connects with Node0 and subscribes to the topic
await connectNodes(nodes[0], nodes[2])
nodes[2].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# And messageIds are added to node0PeerNode2 sentIHaves to allow processing IWant
# let node0PeerNode2 =
let node0PeerNode2 = nodes[0].getPeerByPeerId(topic, nodes[2].peerInfo.peerId)
node0PeerNode2.sentIHaves[0].incl(messageId1)
node0PeerNode2.sentIHaves[0].incl(messageId2)
# And messageId1 is added to seen messages cache of Node2
check:
not nodes[2].addSeen(nodes[2].salt(messageId1))
# And Node2 sends IWant to Node0 requesting both messages
let iWantMessage =
ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageId1, messageId2])])
let node2PeerNode0 = nodes[2].getPeerByPeerId(topic, nodes[0].peerInfo.peerId)
nodes[2].broadcast(
@[node2PeerNode0], RPCMsg(control: some(iWantMessage)), isHighPriority = false
)
await waitForHeartbeat()
# Then Node2 receives only messageId2 and messageId1 is dropped
check:
nodes[2].mcache.window(topic).len == 1
nodes[2].mcache.window(topic).toSeq()[0] == messageId2
asyncTest "Published messages are dropped if they are already in seen cache":
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok("fixed_message_id_string".toBytes())
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(
numberOfNodes, gossip = true, msgIdProvider = customMsgIdProvider
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
await waitForHeartbeat()
# Given Node0 has msgId already in seen cache
let data = "Hello".toBytes()
let msg = Message.init(
some(nodes[0].peerInfo), data, topic, some(nodes[0].msgSeqno), nodes[0].sign
)
let msgId = nodes[0].msgIdProvider(msg)
check:
not nodes[0].addSeen(nodes[0].salt(msgId.value()))
# When Node0 publishes the message to the topic
discard await nodes[0].publish(topic, data)
await waitForHeartbeat()
# Then Node1 doesn't receive the message
check:
nodes[1].mcache.window(topic).len == 0

View File

@@ -247,7 +247,7 @@ suite "GossipSub Message Handling":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
@@ -262,9 +262,9 @@ suite "GossipSub Message Handling":
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
)
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
@@ -424,9 +424,6 @@ suite "GossipSub Message Handling":
sendCounter = 0
validatedCounter = 0
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
inc recvCounter
@@ -446,8 +443,8 @@ suite "GossipSub Message Handling":
nodes[0].addObserver(obs0)
nodes[1].addObserver(obs1)
nodes[1].subscribe("foo", handler)
nodes[1].subscribe("bar", handler)
nodes[1].subscribe("foo", voidTopicHandler)
nodes[1].subscribe("bar", voidTopicHandler)
proc validator(
topic: string, message: Message
@@ -467,7 +464,7 @@ suite "GossipSub Message Handling":
# Send message that will be rejected by the receiver's validator
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
check:
checkUntilTimeout:
recvCounter == 2
validatedCounter == 1
sendCounter == 2
@@ -842,34 +839,3 @@ suite "GossipSub Message Handling":
publishResult == 0
results[0].isPending()
results[1].isPending()
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
# check value before & after decoding equal using protoc cmd tool for reference
asyncTest "ControlMessage RPCMsg encoding and decoding":
let id: seq[byte] = @[123]
let message = RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
iwant: @[ControlIWant(messageIDs: @[id])],
graft: @[ControlGraft(topicID: "foobar")],
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
idontwant: @[ControlIWant(messageIDs: @[id])],
)
)
)
#data encoded using protoc cmd tool
let expectedEncoded: seq[byte] =
@[
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
97, 114, 24, 10, 42, 3, 10, 1, 123,
]
let actualEncoded = encodeRpcMsg(message, true)
check:
actualEncoded == expectedEncoded
let actualDecoded = decodeRpcMsg(expectedEncoded).value
check:
actualDecoded == message

View File

@@ -0,0 +1,395 @@
{.used.}
import unittest2
import chronos
import results
import ../../libp2p/protocols/pubsub/gossipsub/[types]
import ../../libp2p/protocols/pubsub/[gossipsub, pubsubpeer]
import ../../libp2p/[peerid, multiaddress]
suite "GossipSubParams validation":
proc newDefaultValidParams(): GossipSubParams =
result = GossipSubParams.init()
test "default parameters are valid":
var params = newDefaultValidParams()
check params.validateParameters().isOk()
test "dOut fails when equal to dLow":
const errorMessage =
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
var params = newDefaultValidParams()
params.dLow = 4
params.d = 8
params.dOut = params.dLow
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "dOut fails when bigger than d/2":
const errorMessage =
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
var params = newDefaultValidParams()
params.dLow = 4
params.d = 5
params.dOut = 3
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "dOut succeeds when less than dLow and equals d/2":
var params = newDefaultValidParams()
params.dLow = 4
params.d = 6
params.dOut = 3
check params.validateParameters().isOk()
test "gossipThreshold fails when zero":
const errorMessage = "gossipsub: gossipThreshold parameter error, Must be < 0"
var params = newDefaultValidParams()
params.gossipThreshold = 0.0
var res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "gossipThreshold succeeds when negative":
var params = newDefaultValidParams()
params.gossipThreshold = -0.1
check params.validateParameters().isOk()
test "unsubscribeBackoff fails when zero":
const errorMessage =
"gossipsub: unsubscribeBackoff parameter error, Must be > 0 seconds"
var params = newDefaultValidParams()
params.unsubscribeBackoff = 0.seconds
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "unsubscribeBackoff succeeds when positive":
var params = newDefaultValidParams()
params.unsubscribeBackoff = 1.seconds
check params.validateParameters().isOk()
test "publishThreshold fails when equal to gossipThreshold":
const errorMessage =
"gossipsub: publishThreshold parameter error, Must be < gossipThreshold"
var params = newDefaultValidParams()
params.publishThreshold = params.gossipThreshold
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "publishThreshold succeeds when less than gossipThreshold":
var params = newDefaultValidParams()
params.publishThreshold = params.gossipThreshold - 1.0
check params.validateParameters().isOk()
test "graylistThreshold fails when equal to publishThreshold":
const errorMessage =
"gossipsub: graylistThreshold parameter error, Must be < publishThreshold"
var params = newDefaultValidParams()
params.graylistThreshold = params.publishThreshold
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "graylistThreshold succeeds when less than publishThreshold":
var params = newDefaultValidParams()
params.graylistThreshold = params.publishThreshold - 1.0
check params.validateParameters().isOk()
test "acceptPXThreshold fails when negative":
const errorMessage = "gossipsub: acceptPXThreshold parameter error, Must be >= 0"
var params = newDefaultValidParams()
params.acceptPXThreshold = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "acceptPXThreshold succeeds when zero":
var params = newDefaultValidParams()
params.acceptPXThreshold = 0.0
check params.validateParameters().isOk()
test "opportunisticGraftThreshold fails when negative":
const errorMessage =
"gossipsub: opportunisticGraftThreshold parameter error, Must be >= 0"
var params = newDefaultValidParams()
params.opportunisticGraftThreshold = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "opportunisticGraftThreshold succeeds when zero":
var params = newDefaultValidParams()
params.opportunisticGraftThreshold = 0.0
check params.validateParameters().isOk()
test "decayToZero fails when greater than 0.5":
const errorMessage =
"gossipsub: decayToZero parameter error, Should be close to 0.0"
var params = newDefaultValidParams()
params.decayToZero = 0.51
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "decayToZero fails when zero":
const errorMessage =
"gossipsub: decayToZero parameter error, Should be close to 0.0"
var params = newDefaultValidParams()
params.decayToZero = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "decayToZero succeeds when exactly 0.5":
var params = newDefaultValidParams()
params.decayToZero = 0.5
check params.validateParameters().isOk()
test "decayToZero succeeds when small positive value":
var params = newDefaultValidParams()
params.decayToZero = 0.00001
check params.validateParameters().isOk()
test "appSpecificWeight fails when negative":
const errorMessage =
"gossipsub: appSpecificWeight parameter error, Must be positive"
var params = newDefaultValidParams()
params.appSpecificWeight = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "appSpecificWeight succeeds when zero":
var params = newDefaultValidParams()
params.appSpecificWeight = 0.0
check params.validateParameters().isOk()
test "ipColocationFactorWeight fails when positive":
const errorMessage =
"gossipsub: ipColocationFactorWeight parameter error, Must be negative or 0"
var params = newDefaultValidParams()
params.ipColocationFactorWeight = 0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "ipColocationFactorWeight succeeds when zero":
var params = newDefaultValidParams()
params.ipColocationFactorWeight = 0.0
check params.validateParameters().isOk()
test "ipColocationFactorWeight succeeds when negative":
var params = newDefaultValidParams()
params.ipColocationFactorWeight = -10.0
check params.validateParameters().isOk()
test "ipColocationFactorThreshold fails when less than 1":
const errorMessage =
"gossipsub: ipColocationFactorThreshold parameter error, Must be at least 1"
var params = newDefaultValidParams()
params.ipColocationFactorThreshold = 0.9
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "ipColocationFactorThreshold succeeds when exactly 1":
var params = newDefaultValidParams()
params.ipColocationFactorThreshold = 1.0
check params.validateParameters().isOk()
test "behaviourPenaltyWeight fails when zero":
const errorMessage =
"gossipsub: behaviourPenaltyWeight parameter error, Must be negative"
var params = newDefaultValidParams()
params.behaviourPenaltyWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyWeight succeeds when negative":
var params = newDefaultValidParams()
params.behaviourPenaltyWeight = -0.0001
check params.validateParameters().isOk()
test "behaviourPenaltyDecay fails when negative":
const errorMessage =
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = -0.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyDecay fails when equal to 1":
const errorMessage =
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 1.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "behaviourPenaltyDecay succeeds when zero":
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 0.0
check params.validateParameters().isOk()
test "behaviourPenaltyDecay succeeds when between 0 and 1":
var params = newDefaultValidParams()
params.behaviourPenaltyDecay = 0.5
check params.validateParameters().isOk()
test "maxNumElementsInNonPriorityQueue fails when zero":
const errorMessage =
"gossipsub: maxNumElementsInNonPriorityQueue parameter error, Must be > 0"
var params = newDefaultValidParams()
params.maxNumElementsInNonPriorityQueue = 0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "maxNumElementsInNonPriorityQueue succeeds when positive":
var params = newDefaultValidParams()
params.maxNumElementsInNonPriorityQueue = 1
check params.validateParameters().isOk()
suite "TopicParams validation":
proc newDefaultValidTopicParams(): TopicParams =
result = TopicParams.init()
test "default topic parameters are valid":
var params = newDefaultValidTopicParams()
check params.validateParameters().isOk()
test "timeInMeshWeight fails when zero":
const errorMessage =
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshWeight fails when greater than 1":
const errorMessage =
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 1.1
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshWeight succeeds when exactly 1":
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 1.0
check params.validateParameters().isOk()
test "timeInMeshWeight succeeds when small positive value":
var params = newDefaultValidTopicParams()
params.timeInMeshWeight = 0.01
check params.validateParameters().isOk()
test "timeInMeshCap fails when zero":
const errorMessage =
"gossipsub: timeInMeshCap parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.timeInMeshCap = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "timeInMeshCap succeeds when positive":
var params = newDefaultValidTopicParams()
params.timeInMeshCap = 10.0
check params.validateParameters().isOk()
test "firstMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: firstMessageDeliveriesWeight parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.firstMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "firstMessageDeliveriesWeight succeeds when positive":
var params = newDefaultValidTopicParams()
params.firstMessageDeliveriesWeight = 1.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: meshMessageDeliveriesWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesWeight = -1.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesThreshold fails when zero":
const errorMessage =
"gossipsub: meshMessageDeliveriesThreshold parameter error, Should be a positive value"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesThreshold succeeds when positive":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 5.0
check params.validateParameters().isOk()
test "meshMessageDeliveriesCap fails when less than threshold":
const errorMessage =
"gossipsub: meshMessageDeliveriesCap parameter error, Should be >= meshMessageDeliveriesThreshold"
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 10.0
params.meshMessageDeliveriesCap = 9.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshMessageDeliveriesCap succeeds when equal to threshold":
var params = newDefaultValidTopicParams()
params.meshMessageDeliveriesThreshold = 10.0
params.meshMessageDeliveriesCap = 10.0
check params.validateParameters().isOk()
test "meshFailurePenaltyWeight fails when zero":
const errorMessage =
"gossipsub: meshFailurePenaltyWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.meshFailurePenaltyWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "meshFailurePenaltyWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.meshFailurePenaltyWeight = -1.0
check params.validateParameters().isOk()
test "invalidMessageDeliveriesWeight fails when zero":
const errorMessage =
"gossipsub: invalidMessageDeliveriesWeight parameter error, Should be a negative value"
var params = newDefaultValidTopicParams()
params.invalidMessageDeliveriesWeight = 0.0
let res = params.validateParameters()
check res.isErr()
check res.error == errorMessage
test "invalidMessageDeliveriesWeight succeeds when negative":
var params = newDefaultValidTopicParams()
params.invalidMessageDeliveriesWeight = -1.0
check params.validateParameters().isOk()

View File

@@ -11,7 +11,6 @@
import std/[sequtils]
import stew/byteutils
import metrics
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../libp2p/protocols/pubsub/rpc/[messages]
@@ -47,7 +46,7 @@ suite "GossipSub Scoring":
# also ensure we cleanup properly the peersInIP table
gossipSub.peersInIP.len == 0
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
let
numberOfNodes = 3
topic = "foobar"
@@ -75,8 +74,7 @@ suite "GossipSub Scoring":
# When node 0 publishes a message to topic "foo"
let message = "Hello!".toBytes()
check (await nodes[0].publish(topic, message)) == 1
await waitForHeartbeat(2)
tryPublish await nodes[0].publish(topic, message), 1
# Then only node 1 should receive the message
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
@@ -84,93 +82,106 @@ suite "GossipSub Scoring":
results[0].isCompleted(true)
results[1].isPending()
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
let nodes =
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
asyncTest "Should not rate limit decodable messages below the size allowed":
const topic = "foobar"
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
await startNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
proc handle(topic: string, data: seq[byte]) {.async.} =
discard
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
gossip0.subscribe("foobar", handle)
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
try:
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
)
except KeyError:
0
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
nodes[0].broadcast(
nodes[0].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](10))]),
isHighPriority = true,
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check:
currentRateLimitHits() == rateLimitHits
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh["foobar"],
nodes[1].parameters.disconnectPeerAboveRateLimit = true
nodes[0].broadcast(
nodes[0].mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
isHighPriority = true,
)
await waitForHeartbeat()
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
check:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
asyncTest "Should rate limit undecodable messages above the size allowed":
const topic = "foobar"
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
let (nodes, gossip0, gossip1) = await initializeGossipTest()
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Simulate sending an undecodable message
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
await nodes[1].peers[nodes[0].switch.peerInfo.peerId].sendEncoded(
newSeqWith(33, 1.byte), isHighPriority = true
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
nodes[1].parameters.disconnectPeerAboveRateLimit = true
await nodes[0].peers[nodes[1].switch.peerInfo.peerId].sendEncoded(
newSeqWith(35, 1.byte), isHighPriority = true
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "Should rate limit decodable messages above the size allowed":
const topic = "foobar"
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
let msg = RPCMsg(
control: some(
@@ -178,7 +189,7 @@ suite "GossipSub Scoring":
prune:
@[
ControlPrune(
topicID: "foobar",
topicID: topic,
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
backoff: 123'u64,
)
@@ -186,21 +197,22 @@ suite "GossipSub Scoring":
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
nodes[1].parameters.disconnectPeerAboveRateLimit = true
let msg2 = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: "foobar",
topicID: topic,
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
backoff: 123'u64,
)
@@ -208,202 +220,187 @@ suite "GossipSub Scoring":
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
nodes[0].broadcast(nodes[0].mesh[topic], msg2, isHighPriority = true)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "Should rate limit invalid messages above the size allowed":
const topic = "foobar"
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
let topic = "foobar"
proc execValidator(
topic: string, message: messages.Message
): Future[ValidationResult] {.async: (raw: true).} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
): Future[ValidationResult] {.async.} =
return ValidationResult.Reject
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
nodes[0].addValidator(topic, execValidator)
nodes[1].addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh[topic],
nodes[1].parameters.disconnectPeerAboveRateLimit = true
nodes[0].broadcast(
nodes[0].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
isHighPriority = true,
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
let nodes = generateNodes(2, gossip = true)
asyncTest "DirectPeers: don't kick direct peer with low score":
const topic = "foobar"
let nodes = generateNodes(2, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await nodes.addDirectPeerStar()
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
nodes[1].parameters.disconnectBadPeers = true
nodes[1].parameters.graylistThreshold = 100000
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var (handlerFut, handler) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handler)
await waitForHeartbeat()
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
nodes[1].updateScores()
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
GossipSub(nodes[1]).updateScores()
# peer shouldn't be in our mesh
check:
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
GossipSub(nodes[1]).parameters.graylistThreshold
GossipSub(nodes[1]).updateScores()
topic notin nodes[1].mesh
nodes[1].peerStats[nodes[0].switch.peerInfo.peerId].score <
nodes[1].parameters.graylistThreshold
handlerFut = newFuture[void]()
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
tryPublish await nodes[0].publish(topic, toBytes("hellow")), 1
# Without directPeers, this would fail
await handlerFut.wait(1.seconds)
var futResult = await waitForState(handlerFut)
check:
futResult.isCompleted(true)
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
asyncTest "Peers disconnections mechanics":
const
numberOfNodes = 10
topic = "foobar"
let nodes =
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
for i in 0 ..< numberOfNodes:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
handler = proc(topicName: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
check topicName == topic
if not seenFut.finished() and seen.len >= numberOfNodes:
seenFut.complete()
dialer.subscribe("foobar", handler)
dialer.subscribe(topic, handler)
await waitSubGraph(nodes, "foobar")
await waitSubGraph(nodes, topic)
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1, 5.seconds, 3.minutes
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
await wait(seenFut, 5.minutes)
await seenFut.wait(2.seconds)
check:
seen.len >= runs
seen.len >= numberOfNodes
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
topic in node.gossipsub
node.fanout.len == 0
node.mesh[topic].len > 0
# Removing some subscriptions
for i in 0 ..< runs:
for i in 0 ..< numberOfNodes:
if i mod 3 != 0:
nodes[i].unsubscribeAll("foobar")
nodes[i].unsubscribeAll(topic)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
await nodes[0].waitForHeartbeatByEvent(2)
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0 ..< runs:
for i in 0 ..< numberOfNodes:
if i mod 3 != 0:
nodes[i].subscribe("foobar", handler)
nodes[i].subscribe(topic, voidTopicHandler)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
await nodes[0].waitForHeartbeatByEvent(2)
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
asyncTest "GossipSub scoring - decayInterval":
let nodes = generateNodes(2, gossip = true)
var gossip = GossipSub(nodes[0])
const testDecayInterval = 50.milliseconds
gossip.parameters.decayInterval = testDecayInterval
asyncTest "DecayInterval":
const
topic = "foobar"
decayInterval = 50.milliseconds
let nodes =
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
startNodesAndDeferStop(nodes)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var (handlerFut, handler) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handler)
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
await handlerFut
var futResult = await waitForState(handlerFut)
check:
futResult.isCompleted(true)
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries =
100
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
nodes[0].topicParams[topic].meshMessageDeliveriesDecay = 0.9
# We should have decayed 5 times, though allowing 4..6
await sleepAsync(testDecayInterval * 5)
await sleepAsync(decayInterval * 5)
check:
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
50.0 .. 66.0

View File

@@ -12,7 +12,9 @@ import
protocols/pubsub/errors,
protocols/pubsub/rpc/message,
protocols/pubsub/rpc/messages,
protocols/pubsub/rpc/protobuf,
]
import ../utils/async_tests
let rng = newRng()
@@ -139,3 +141,34 @@ suite "Message":
)
check byteSize(rpcMsg) == 28 + 32 + 2 + 2 + 38 # Total: 102 bytes
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
# check value before & after decoding equal using protoc cmd tool for reference
asyncTest "ControlMessage RPCMsg encoding and decoding":
let id: seq[byte] = @[123]
let message = RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
iwant: @[ControlIWant(messageIDs: @[id])],
graft: @[ControlGraft(topicID: "foobar")],
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
idontwant: @[ControlIWant(messageIDs: @[id])],
)
)
)
#data encoded using protoc cmd tool
let expectedEncoded: seq[byte] =
@[
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
97, 114, 24, 10, 42, 3, 10, 1, 123,
]
let actualEncoded = encodeRpcMsg(message, true)
check:
actualEncoded == expectedEncoded
let actualDecoded = decodeRpcMsg(expectedEncoded).value
check:
actualDecoded == message

View File

@@ -1,6 +1,7 @@
{.used.}
import
testgossipsubfanout, testgossipsubgossip, testgossipsubmeshmanagement,
testgossipsubmessagehandling, testgossipsubscoring, testfloodsub, testmcache,
testtimedcache, testmessage
testgossipsubcontrolmessages, testgossipsubfanout, testgossipsubcustomconn,
testgossipsubgossip, testgossipsubheartbeat, testgossipsubmeshmanagement,
testgossipsubmessagecache, testgossipsubmessagehandling, testgossipsubparams,
testgossipsubscoring, testfloodsub, testmcache, testtimedcache, testmessage

View File

@@ -4,7 +4,7 @@ const
libp2p_pubsub_verify {.booldefine.} = true
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables, sets, sequtils, sugar
import hashes, random, tables, sets, sequtils
import chronos, results, stew/byteutils, chronos/ratelimit
import
../../libp2p/[
@@ -20,6 +20,7 @@ import
]
import ../helpers
import chronicles
import metrics
export builders
@@ -32,6 +33,15 @@ const HEARTBEAT_TIMEOUT* = # TEST_GOSSIPSUB_HEARTBEAT_INTERVAL + 20%
proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
await sleepAsync(HEARTBEAT_TIMEOUT * multiplier)
proc waitForHeartbeat*(timeout: Duration) {.async.} =
await sleepAsync(timeout)
proc waitForHeartbeatByEvent*[T: PubSub](node: T, multiplier: int = 1) {.async.} =
for _ in 0 ..< multiplier:
let evnt = newAsyncEvent()
node.heartbeatEvents &= evnt
await evnt.wait()
type
TestGossipSub* = ref object of GossipSub
DValues* = object
@@ -126,7 +136,6 @@ proc setupGossipSubWithPeers*(
proc teardownGossipSub*(gossipSub: TestGossipSub, conns: seq[Connection]) {.async.} =
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
let mid =
@@ -168,6 +177,8 @@ proc generateNodes*(
sign: bool = libp2p_pubsub_sign,
sendSignedPeerRecord = false,
unsubscribeBackoff = 1.seconds,
pruneBackoff = 1.minutes,
fanoutTTL = 1.minutes,
maxMessageSize: int = 1024 * 1024,
enablePX: bool = false,
overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] =
@@ -178,6 +189,11 @@ proc generateNodes*(
floodPublish: bool = false,
dValues: Option[DValues] = DValues.none(),
gossipFactor: Option[float] = float.none(),
opportunisticGraftThreshold: float = 0.0,
historyLength = 20,
historyGossip = 5,
gossipThreshold = -100.0,
decayInterval = 1.seconds,
): seq[PubSub] =
for i in 0 ..< num:
let switch = newStandardSwitch(
@@ -197,12 +213,17 @@ proc generateNodes*(
var p = GossipSubParams.init()
p.heartbeatInterval = heartbeatInterval
p.floodPublish = floodPublish
p.historyLength = 20
p.historyGossip = 20
p.historyLength = historyLength
p.historyGossip = historyGossip
p.unsubscribeBackoff = unsubscribeBackoff
p.pruneBackoff = pruneBackoff
p.fanoutTTL = fanoutTTL
p.enablePX = enablePX
p.overheadRateLimit = overheadRateLimit
p.sendIDontWantOnPublish = sendIDontWantOnPublish
p.opportunisticGraftThreshold = opportunisticGraftThreshold
p.gossipThreshold = gossipThreshold
p.decayInterval = decayInterval
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
applyDValues(p, dValues)
p
@@ -232,6 +253,18 @@ proc generateNodes*(
proc toGossipSub*(nodes: seq[PubSub]): seq[GossipSub] =
return nodes.mapIt(GossipSub(it))
proc getNodeByPeerId*[T: PubSub](nodes: seq[T], peerId: PeerId): GossipSub =
let filteredNodes = nodes.filterIt(it.peerInfo.peerId == peerId)
check:
filteredNodes.len == 1
return filteredNodes[0]
proc getPeerByPeerId*[T: PubSub](node: T, topic: string, peerId: PeerId): PubSubPeer =
let filteredPeers = node.gossipsub[topic].toSeq().filterIt(it.peerId == peerId)
check:
filteredPeers.len == 1
return filteredPeers[0]
proc connectNodes*[T: PubSub](dialer: T, target: T) {.async.} =
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
"Could not connect same peer"
@@ -256,34 +289,18 @@ proc connectNodesSparse*[T: PubSub](nodes: seq[T], degree: int = 2) {.async.} =
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
await connectNodes(dialer, node)
proc activeWait(
interval: Duration, maximum: Moment, timeoutErrorMessage = "Timeout on activeWait"
) {.async.} =
await sleepAsync(interval)
doAssert Moment.now() < maximum, timeoutErrorMessage
proc waitSub*(sender, receiver: auto, key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
let fsub = GossipSub(sender)
let peerId = receiver.peerInfo.peerId
# this is for testing purposes only
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
while (
not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)
) and
(
not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)
) and (
not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerId(key, receiver.peerInfo.peerId)
)
:
trace "waitSub sleeping..."
await activeWait(5.milliseconds, timeout, "waitSub timeout!")
checkUntilTimeout:
(fsub.gossipsub.hasKey(key) and fsub.gossipsub.hasPeerId(key, peerId)) or
(fsub.mesh.hasKey(key) and fsub.mesh.hasPeerId(key, peerId)) or
(fsub.fanout.hasKey(key) and fsub.fanout.hasPeerId(key, peerId))
proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
let numberOfNodes = nodes.len
@@ -292,9 +309,8 @@ proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
if x != y:
await waitSub(nodes[x], nodes[y], topic)
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
proc waitSubGraph*[T: PubSub](nodes: seq[T], key: string) {.async.} =
proc isGraphConnected(): bool =
var
nodesMesh: Table[PeerId, seq[PeerId]]
seen: HashSet[PeerId]
@@ -314,10 +330,11 @@ proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
explore(n.peerInfo.peerId)
if seen.len == nodes.len:
ok.inc()
if ok == nodes.len:
return
trace "waitSubGraph sleeping..."
await activeWait(5.milliseconds, timeout, "waitSubGraph timeout!")
return ok == nodes.len
checkUntilTimeout:
isGraphConnected()
proc waitForMesh*(
sender: auto, receiver: auto, key: string, timeoutDuration = 5.seconds
@@ -326,67 +343,11 @@ proc waitForMesh*(
return
let
timeoutMoment = Moment.now() + timeoutDuration
gossipsubSender = GossipSub(sender)
receiverPeerId = receiver.peerInfo.peerId
while not gossipsubSender.mesh.hasPeerId(key, receiverPeerId):
trace "waitForMesh sleeping..."
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")
type PeerTableType* {.pure.} = enum
Gossipsub = "gossipsub"
Mesh = "mesh"
Fanout = "fanout"
proc waitForPeersInTable*(
nodes: seq[auto],
topic: string,
peerCounts: seq[int],
table: PeerTableType,
timeout = 5.seconds,
) {.async.} =
## Wait until each node in `nodes` has at least the corresponding number of peers from `peerCounts`
## in the specified table (mesh, gossipsub, or fanout) for the given topic
doAssert nodes.len == peerCounts.len, "Node count must match peer count expectations"
# Helper proc to check current state and update satisfaction status
proc checkState(
nodes: seq[auto],
topic: string,
peerCounts: seq[int],
table: PeerTableType,
satisfied: var seq[bool],
): bool =
for i in 0 ..< nodes.len:
if not satisfied[i]:
let fsub = GossipSub(nodes[i])
let currentCount =
case table
of PeerTableType.Mesh:
fsub.mesh.getOrDefault(topic).len
of PeerTableType.Gossipsub:
fsub.gossipsub.getOrDefault(topic).len
of PeerTableType.Fanout:
fsub.fanout.getOrDefault(topic).len
satisfied[i] = currentCount >= peerCounts[i]
return satisfied.allIt(it)
let timeoutMoment = Moment.now() + timeout
var
satisfied = newSeq[bool](nodes.len)
allSatisfied = false
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied) # Initial check
# Continue checking until all requirements are met or timeout
while not allSatisfied:
await activeWait(
5.milliseconds,
timeoutMoment,
"Timeout waiting for peer counts in " & $table & " for topic " & topic,
)
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied)
checkUntilTimeout:
gossipsubSender.mesh.hasPeerId(key, receiverPeerId)
proc startNodes*[T: PubSub](nodes: seq[T]) {.async.} =
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
@@ -441,37 +402,102 @@ proc createCompleteHandler*(): (
return (fut, handler)
proc addIHaveObservers*(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
proc createCheckForMessages*(): (
ref seq[Message], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
) =
var messages = new seq[Message]
let checkForMessage = proc(
peer: PubSubPeer, msgs: var RPCMsg
) {.gcsafe, raises: [].} =
for message in msgs.messages:
messages[].add(message)
return (messages, checkForMessage)
proc createCheckForIHave*(): (
ref seq[ControlIHave], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
) =
var messages = new seq[ControlIHave]
let checkForMessage = proc(
peer: PubSubPeer, msgs: var RPCMsg
) {.gcsafe, raises: [].} =
if msgs.control.isSome:
for msg in msgs.control.get.ihave:
messages[].add(msg)
return (messages, checkForMessage)
proc createCheckForIWant*(): (
ref seq[ControlIWant], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
) =
var messages = new seq[ControlIWant]
let checkForMessage = proc(
peer: PubSubPeer, msgs: var RPCMsg
) {.gcsafe, raises: [].} =
if msgs.control.isSome:
for msg in msgs.control.get.iwant:
messages[].add(msg)
return (messages, checkForMessage)
proc createCheckForIDontWant*(): (
ref seq[ControlIWant], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
) =
var messages = new seq[ControlIWant]
let checkForMessage = proc(
peer: PubSubPeer, msgs: var RPCMsg
) {.gcsafe, raises: [].} =
if msgs.control.isSome:
for msg in msgs.control.get.idontwant:
messages[].add(msg)
return (messages, checkForMessage)
proc addOnRecvObserver*[T: PubSub](
node: T, handler: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
) =
let pubsubObserver = PubSubObserver(onRecv: handler)
node.addObserver(pubsubObserver)
proc addIHaveObservers*[T: PubSub](nodes: seq[T]): (ref seq[ref seq[ControlIHave]]) =
let numberOfNodes = nodes.len
receivedIHaves[] = repeat(0, numberOfNodes)
var allMessages = new seq[ref seq[ControlIHave]]
allMessages[].setLen(numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iHave = msgs.control.get.ihave
if iHave.len > 0:
for msg in iHave:
if msg.topicID == topic:
receivedIHaves[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
nodes[i].addObserver(pubsubObserver)
var (messages, checkForMessage) = createCheckForIHave()
nodes[i].addOnRecvObserver(checkForMessage)
allMessages[i] = messages
proc addIDontWantObservers*(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
return allMessages
proc addIDontWantObservers*[T: PubSub](
nodes: seq[T]
): (ref seq[ref seq[ControlIWant]]) =
let numberOfNodes = nodes.len
receivedIDontWants[] = repeat(0, numberOfNodes)
var allMessages = new seq[ref seq[ControlIWant]]
allMessages[].setLen(numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iDontWant = msgs.control.get.idontwant
if iDontWant.len > 0:
receivedIDontWants[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
nodes[i].addObserver(pubsubObserver)
var (messages, checkForMessage) = createCheckForIDontWant()
nodes[i].addOnRecvObserver(checkForMessage)
allMessages[i] = messages
return allMessages
proc findAndUnsubscribePeers*[T: PubSub](
nodes: seq[T], peers: seq[PeerId], topic: string, handler: TopicHandler
) =
for i in 0 ..< nodes.len:
let node = nodes[i]
if peers.anyIt(it == node.peerInfo.peerId):
node.unsubscribe(topic, voidTopicHandler)
proc clearMCache*[T: PubSub](node: T) =
node.mcache.msgs.clear()
for i in 0 ..< node.mcache.history.len:
node.mcache.history[i].setLen(0)
node.mcache.pos = 0
# TODO: refactor helper methods from testgossipsub.nim
proc setupNodes*(count: int): seq[PubSub] =
@@ -514,3 +540,22 @@ proc baseTestProcedure*(
proc `$`*(peer: PubSubPeer): string =
shortLog(peer)
proc currentRateLimitHits*(): float64 =
try:
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
)
except KeyError:
0
proc addDirectPeer*[T: PubSub](node: T, target: T) {.async.} =
doAssert node.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
"Could not add same peer"
await node.addDirectPeer(target.switch.peerInfo.peerId, target.switch.peerInfo.addrs)
proc addDirectPeerStar*[T: PubSub](nodes: seq[T]) {.async.} =
for node in nodes:
for target in nodes:
if node.switch.peerInfo.peerId != target.switch.peerInfo.peerId:
await addDirectPeer(node, target)

View File

@@ -1,3 +1,3 @@
{.used.}
import testnative, testdaemon, ./pubsub/testpubsub, testinterop
import testnative, ./pubsub/testpubsub, testinterop, testdaemon

178
tests/testautotls.nim Normal file
View File

@@ -0,0 +1,178 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import sequtils, json
import chronos, chronos/apps/http/httpclient
import ../libp2p/[stream/connection, upgrademngrs/upgrade, autotls/acme/mockapi, wire]
import ./helpers
suite "AutoTLS ACME Client":
var api {.threadvar.}: MockACMEApi
var key {.threadvar.}: KeyPair
asyncTeardown:
await api.close()
checkTrackers()
asyncSetup:
api = await MockACMEApi.new()
api.mockedHeaders = HttpTable.init()
key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
asyncTest "register to acme server":
api.mockedBody = %*{"status": "valid"}
api.mockedHeaders.add("location", "some-expected-kid")
let registerResponse = await api.requestRegister(key)
check registerResponse.kid == "some-expected-kid"
asyncTest "request challenge for a domain":
api.mockedBody =
%*{
"status": "pending",
"authorizations": ["expected-authorizations-url"],
"finalize": "expected-finalize-url",
}
api.mockedHeaders.set("location", "expected-order-url")
let challengeResponse =
await api.requestNewOrder(@["some.dummy.domain.com"], key, "kid")
check challengeResponse.status == ACMEChallengeStatus.pending
check challengeResponse.authorizations == ["expected-authorizations-url"]
check challengeResponse.finalize == "expected-finalize-url"
check challengeResponse.orderURL == "expected-order-url"
# reset mocked obj for second request
api.mockedBody =
%*{
"challenges": [
{
"url": "expected-dns01-url",
"type": "dns-01",
"status": "pending",
"token": "expected-dns01-token",
}
]
}
let authorizationsResponse =
await api.requestAuthorizations(challengeResponse.authorizations, key, "kid")
check authorizationsResponse.challenges.len > 0
let dns01 = authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0]
check dns01.url == "expected-dns01-url"
check dns01.`type` == "dns-01"
check dns01.token == "expected-dns01-token"
check dns01.status == ACMEChallengeStatus.pending
asyncTest "register with unsupported keys":
let unsupportedSchemes = [PKScheme.Ed25519, PKScheme.Secp256k1, PKScheme.ECDSA]
for scheme in unsupportedSchemes:
let unsupportedKey = KeyPair.random(scheme, newRng()[]).get()
expect(ACMEError):
discard await api.requestRegister(unsupportedKey)
asyncTest "request challenge with invalid kid":
expect(ACMEError):
discard await api.requestChallenge(@["domain.com"], key, "invalid_kid_here")
asyncTest "challenge completed successful":
api.mockedBody = %*{"checkURL": "some-check-url"}
discard await api.requestCompleted("some-chal-url", key, "kid")
api.mockedBody = %*{"status": "valid"}
api.mockedHeaders.add("Retry-After", "1")
let completed = await api.checkChallengeCompleted("some-chal-url", key, "kid")
check completed == true
asyncTest "challenge completed max retries reached":
api.mockedBody = %*{"checkURL": "some-check-url"}
discard await api.requestCompleted("some-chal-url", key, "kid")
api.mockedBody = %*{"status": "pending"}
api.mockedHeaders.add("Retry-After", "1")
let completed =
await api.checkChallengeCompleted("some-chal-url", key, "kid", retries = 1)
check completed == false
asyncTest "challenge completed invalid":
api.mockedBody = %*{"checkURL": "some-check-url"}
discard await api.requestCompleted("some-chal-url", key, "kid")
api.mockedBody = %*{"status": "invalid"}
api.mockedHeaders.add("Retry-After", "1")
expect(ACMEError):
discard await api.checkChallengeCompleted("some-chal-url", key, "kid")
asyncTest "finalize certificate successful":
api.mockedBody = %*{"status": "valid"}
api.mockedHeaders.add("Retry-After", "1")
let finalized = await api.certificateFinalized(
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
)
check finalized == true
asyncTest "finalize certificate max retries reached":
api.mockedBody = %*{"status": "processing"}
api.mockedHeaders.add("Retry-After", "1")
let finalized = await api.certificateFinalized(
"some-domain", "some-finalize-url", "some-order-url", key, "kid", retries = 1
)
check finalized == false
asyncTest "finalize certificate invalid":
api.mockedBody = %*{"status": "invalid"}
api.mockedHeaders.add("Retry-After", "1")
expect(ACMEError):
discard await api.certificateFinalized(
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
)
asyncTest "expect error on invalid JSON response":
api.mockedBody = %*{"inexistent field": "invalid value"}
expect(ACMEError):
# avoid calling overloaded mock method requestNonce here since we want to test the actual thing
discard await procCall requestNonce(ACMEApi(api))
expect(ACMEError):
discard await api.requestRegister(key)
expect(ACMEError):
discard await api.requestNewOrder(@["some-domain"], key, "kid")
expect(ACMEError):
discard await api.requestAuthorizations(@["auth-1", "auth-2"], key, "kid")
expect(ACMEError):
discard await api.requestChallenge(@["domain-1", "domain-2"], key, "kid")
expect(ACMEError):
discard await api.requestCheck(
"some-check-url", ACMECheckKind.ACMEOrderCheck, key, "kid"
)
expect(ACMEError):
discard await api.requestCheck(
"some-check-url", ACMECheckKind.ACMEChallengeCheck, key, "kid"
)
expect(ACMEError):
discard await api.requestCompleted("some-chal-url", key, "kid")
expect(ACMEError):
discard await api.requestFinalize("some-domain", "some-finalize-url", key, "kid")
expect(ACMEError):
discard await api.requestGetOrder("some-order-url")

View File

@@ -0,0 +1,49 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos
import chronos/apps/http/httpclient
import ../libp2p/[stream/connection, upgrademngrs/upgrade, autotls/acme/api, wire]
import ./helpers
when defined(linux) and defined(amd64):
{.used.}
suite "AutoTLS Integration":
var api {.threadvar.}: ACMEApi
var key {.threadvar.}: KeyPair
asyncTeardown:
await api.close()
checkTrackers()
asyncSetup:
api = await ACMEApi.new(acmeServerURL = LetsEncryptURLStaging)
key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
asyncTest "test request challenge":
let registerResponse = await api.requestRegister(key)
# account was registered (kid set)
check registerResponse.kid != ""
if registerResponse.kid == "":
raiseAssert "unable to register acme account"
# challenge requested
let challenge =
await api.requestChallenge(@["some.dummy.domain.com"], key, registerResponse.kid)
check challenge.finalizeURL.len() > 0
check challenge.orderURL.len() > 0
check challenge.dns01.url.len() > 0
check challenge.dns01.`type`.len() > 0
check challenge.dns01.status == ACMEChallengeStatus.pending
check challenge.dns01.token.len() > 0

View File

@@ -108,17 +108,16 @@ proc pubsubTest(f: set[P2PDaemonFlags]): Future[bool] {.async.} =
if resultsCount == 2:
result = true
when isMainModule:
suite "libp2p-daemon test suite":
test "Simple spawn and get identity test":
check:
waitFor(identitySpawnTest()) == true
test "Connect/Accept peer/stream test":
check:
waitFor(connectStreamTest()) == true
asyncTest "GossipSub test":
checkUntilTimeout:
(await pubsubTest({PSGossipSub}))
asyncTest "FloodSub test":
checkUntilTimeout:
(await pubsubTest({PSFloodSub}))
suite "libp2p-daemon test suite":
test "Simple spawn and get identity test":
check:
waitFor(identitySpawnTest()) == true
test "Connect/Accept peer/stream test":
check:
waitFor(connectStreamTest()) == true
asyncTest "GossipSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSGossipSub}))
asyncTest "FloodSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSFloodSub}))

View File

@@ -10,8 +10,10 @@
# those terms.
import ./helpers
import unittest2
from std/exitprocs import nil
suite "Helpers":
suite "checkUntilTimeout helpers":
asyncTest "checkUntilTimeout should pass if the condition is true":
let a = 2
let b = 2
@@ -26,16 +28,52 @@ suite "Helpers":
a == 2
b == 2
asyncTest "checkUntilCustomTimeout should pass when the condition is true":
let a = 2
asyncTest "checkUntilTimeout should pass if condition becomes true after time":
var a = 1
let b = 2
checkUntilCustomTimeout(2.seconds):
proc makeConditionTrueLater() {.async.} =
await sleepAsync(100.milliseconds)
a = 2
asyncSpawn makeConditionTrueLater()
checkUntilTimeout:
a == b
asyncTest "checkUntilCustomTimeout should pass when the conditions are true":
asyncTest "checkUntilTimeoutCustom should pass when the condition is true":
let a = 2
let b = 2
checkUntilCustomTimeout(5.seconds):
checkUntilTimeoutCustom(2.seconds, 100.milliseconds):
a == b
asyncTest "checkUntilTimeoutCustom should pass when the conditions are true":
let a = 2
let b = 2
checkUntilTimeoutCustom(5.seconds, 100.milliseconds):
a == b
a == 2
b == 2
asyncTest "checkUntilTimeoutCustom should pass if condition becomes true after time":
var a = 1
let b = 2
proc makeConditionTrueLater() {.async.} =
await sleepAsync(100.milliseconds)
a = 2
asyncSpawn makeConditionTrueLater()
checkUntilTimeoutCustom(200.milliseconds, 10.milliseconds):
a == b
suite "checkUntilTimeout helpers - failed":
teardown:
require testStatusIMPL == TestStatus.Failed
testStatusIMPL = TestStatus.OK
exitProcs.setProgramResult(QuitSuccess)
asyncTest "checkUntilTimeout should timeout if condition is never true":
checkUntilTimeout:
false
asyncTest "checkUntilTimeoutCustom should timeout if condition is never true":
checkUntilTimeoutCustom(100.milliseconds, 10.milliseconds):
false

View File

@@ -105,7 +105,7 @@ suite "Hole Punching":
privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr)
)
checkUntilTimeout:
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
not isRelayed(
privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection
@@ -166,7 +166,7 @@ suite "Hole Punching":
privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr)
)
checkUntilTimeout:
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
not isRelayed(
privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection

12
tests/testintegration.nim Normal file
View File

@@ -0,0 +1,12 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import testpeeridauth_integration, testautotls_integration

View File

@@ -1,3 +1,5 @@
{.used.}
import helpers, commoninterop
import ../libp2p
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/relay

View File

@@ -9,7 +9,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/[strutils, sequtils, tables]
import std/[sequtils, tables]
import chronos
import
../libp2p/[
@@ -36,6 +36,9 @@ const unixPlatform =
defined(linux) or defined(solaris) or defined(macosx) or defined(freebsd) or
defined(netbsd) or defined(openbsd) or defined(dragonfly)
when unixPlatform:
import std/strutils
proc guessOsNameServers(): seq[TransportAddress] =
when unixPlatform:
var resultSeq = newSeqOfCap[TransportAddress](3)

View File

@@ -28,8 +28,10 @@ import
transports/tls/testcertificate
import
testnameresolve, testmultistream, testbufferstream, testidentify,
testautotls, testnameresolve, testmultistream, testbufferstream, testidentify,
testobservedaddrmanager, testconnmngr, testswitch, testnoise, testpeerinfo,
testpeerstore, testping, testmplex, testrelayv1, testrelayv2, testrendezvous,
testdiscovery, testyamux, testautonat, testautonatservice, testautorelay, testdcutr,
testhpservice, testutility, testhelpers, testwildcardresolverservice
testhpservice, testutility, testhelpers, testwildcardresolverservice, testperf
import kademlia/testencoding

113
tests/testpeeridauth.nim Normal file
View File

@@ -0,0 +1,113 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import uri, base64, times
import chronos, chronos/apps/http/httpclient
import
../libp2p/
[
stream/connection,
upgrademngrs/upgrade,
peeridauth/mockclient,
wire,
crypto/crypto,
]
import ./helpers
suite "PeerID Auth Client":
var client {.threadvar.}: MockPeerIDAuthClient
var rng {.threadvar.}: ref HmacDrbgContext
var peerInfo {.threadvar.}: PeerInfo
asyncTeardown:
await client.close()
checkTrackers()
asyncSetup:
rng = newRng()
client = MockPeerIDAuthClient.new(rng)
client.mockedHeaders = HttpTable.init()
peerInfo = PeerInfo.new(PrivateKey.random(PKScheme.RSA, rng[]).get())
asyncTest "request authentication":
let serverPrivateKey = PrivateKey.random(PKScheme.RSA, rng[]).get()
let serverPubkey = serverPrivateKey.getPublicKey().get()
let b64serverPubkey = serverPubkey.pubkeyBytes().encode(safe = true)
client.mockedHeaders.add(
"WWW-Authenticate",
"libp2p-PeerID " & "challenge-client=\"somechallengeclient\", public-key=\"" &
b64serverPubkey & "\", opaque=\"someopaque\"",
)
let authenticationResponse =
await client.requestAuthentication(parseUri("https://example.com/some/uri"))
check authenticationResponse.challengeClient ==
PeerIDAuthChallenge("somechallengeclient")
check authenticationResponse.opaque == PeerIDAuthOpaque("someopaque")
check authenticationResponse.serverPubkey == serverPubkey
asyncTest "request authorization":
let sig = PeerIDAuthSignature("somesig")
let bearer = BearerToken(token: "somebearer", expires: Opt.none(DateTime))
client.mockedHeaders.add(
"Authentication-Info",
"libp2p-PeerID " & "sig=\"" & sig & "\", " & "bearer=\"" & bearer.token & "\"",
)
let uri = parseUri("https://example.com/some/uri")
let serverPrivateKey = PrivateKey.random(PKScheme.RSA, rng[]).get()
let serverPubkey = serverPrivateKey.getPublicKey().get()
let authorizationResponse = await client.requestAuthorization(
peerInfo, uri, "some-challenge-client", "some-challenge-server", serverPubkey,
"some-opaque", "some-payload",
)
check authorizationResponse.bearer == bearer
check authorizationResponse.sig == sig
asyncTest "checkSignature successful":
# example from peer-id-auth spec
let serverPrivateKey = PrivateKey
.init(
"0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"
)
.get()
let serverPublicKey = serverPrivateKey.getPublicKey().get()
let challenge = "ERERERERERERERERERERERERERERERERERERERERERE="
let hostname = "example.com"
let sig =
"UA88qZbLUzmAxrD9KECbDCgSKAUBAvBHrOCF2X0uPLR1uUCF7qGfLPc7dw3Olo-LaFCDpk5sXN7TkLWPVvuXAA=="
let clientPublicKey = PublicKey
.init("080112208139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394")
.get()
check checkSignature(sig, serverPublicKey, challenge, clientPublicKey, hostname)
asyncTest "checkSignature failed":
# example from peer-id-auth spec (but with sig altered)
let serverPrivateKey = PrivateKey
.init(
"0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"
)
.get()
let serverPublicKey = serverPrivateKey.getPublicKey().get()
let challenge = "ERERERERERERERERERERERERERERERERERERERERERE="
let hostname = "example.com"
let sig =
"ZZZZZZZZZZZZZZZ9KECbDCgSKAUBAvBHrOCF2X0uPLR1uUCF7qGfLPc7dw3Olo-LaFCDpk5sXN7TkLWPVvuXAA=="
let clientPublicKey = PublicKey
.init("080112208139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394")
.get()
check checkSignature(sig, serverPublicKey, challenge, clientPublicKey, hostname) ==
false

View File

@@ -0,0 +1,58 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import json, uri
import chronos
import chronos/apps/http/httpclient
import ../libp2p/[stream/connection, upgrademngrs/upgrade, peeridauth, wire]
import ./helpers
when defined(linux) and defined(amd64):
{.used.}
const
AuthPeerURL = "https://registration.libp2p.direct/v1/_acme-challenge"
HttpPeerAuthFailed = 401
suite "PeerID Auth":
var client {.threadvar.}: PeerIDAuthClient
var peerInfo {.threadvar.}: PeerInfo
asyncTeardown:
await client.close()
checkTrackers()
asyncSetup:
let rng = newRng()
client = PeerIDAuthClient.new(rng)
peerInfo = PeerInfo.new(PrivateKey.random(PKScheme.RSA, rng[]).get())
asyncTest "test peerID send":
let payload =
%*{
"identifiers": [
{
"type": "dns",
"value":
"*.k51qzi5uqu5dj8c5nhiw2oceam0uebustsj7s36kjxwtscngp0y126o3b95mh9.libp2p.direct",
}
]
}
let (bearer, responseWithoutBearer) =
await client.send(parseUri(AuthPeerURL), peerInfo, payload)
check responseWithoutBearer.status != HttpPeerAuthFailed
doAssert bearer.token.len > 0
let (_, responseWithBearer) =
await client.send(parseUri(AuthPeerURL), peerInfo, payload, bearer)
check responseWithBearer.status != HttpPeerAuthFailed

Some files were not shown because too many files have changed in this diff Show More