mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 10:37:55 -05:00
Compare commits
59 Commits
test-non-m
...
v1110
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c0d4b873e | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 | ||
|
|
848fdde0a8 | ||
|
|
31e7dc68e2 | ||
|
|
08299a2059 | ||
|
|
2f3156eafb | ||
|
|
72e85101b0 | ||
|
|
d205260a3e | ||
|
|
97e576d146 | ||
|
|
888cb78331 | ||
|
|
1d4c261d2a | ||
|
|
83de0c0abd | ||
|
|
c501adc9ab | ||
|
|
f9fc24cc08 | ||
|
|
cd26244ccc | ||
|
|
cabab6aafe | ||
|
|
fb42a9b4aa | ||
|
|
141f4d9116 | ||
|
|
cb31152b53 | ||
|
|
3a7745f920 | ||
|
|
a89916fb1a | ||
|
|
c6cf46c904 | ||
|
|
b28a71ab13 | ||
|
|
95b9859bcd | ||
|
|
9e599753af | ||
|
|
2e924906bb | ||
|
|
e811c1ad32 | ||
|
|
86695b55bb | ||
|
|
8c3a4d882a | ||
|
|
4bad343ddc | ||
|
|
47b8a05c32 | ||
|
|
4e6f4af601 | ||
|
|
7275f6f9c3 | ||
|
|
c3dae6a7d4 | ||
|
|
bb404eda4a | ||
|
|
584710bd80 | ||
|
|
ad5eae9adf | ||
|
|
26fae7cd2d | ||
|
|
87d6655368 | ||
|
|
cd60b254a0 | ||
|
|
b88cdcdd4b | ||
|
|
4a5e06cb45 | ||
|
|
fff3a7ad1f | ||
|
|
05c894d487 | ||
|
|
8850e9ccd9 | ||
|
|
2746531851 | ||
|
|
2856db5490 | ||
|
|
b29e78ccae | ||
|
|
c9761c3588 | ||
|
|
e4ef21e07c | ||
|
|
61429aa0d6 | ||
|
|
c1ef011556 | ||
|
|
cd1424c09f | ||
|
|
878d627f93 | ||
|
|
1d6385ddc5 | ||
|
|
873f730b4e | ||
|
|
1c1547b137 | ||
|
|
9997f3e3d3 |
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -36,6 +36,8 @@ jobs:
|
||||
memory_management: refc
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
- ref: version-2-2
|
||||
memory_management: refc
|
||||
include:
|
||||
- platform:
|
||||
os: linux
|
||||
@@ -116,5 +118,5 @@ jobs:
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
23
.github/workflows/daily_amd64.yml
vendored
23
.github/workflows/daily_amd64.yml
vendored
@@ -6,9 +6,26 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily amd64
|
||||
test_amd64_latest:
|
||||
name: Daily amd64 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
test_amd64_pinned:
|
||||
name: Daily amd64 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
36
.github/workflows/daily_common.yml
vendored
36
.github/workflows/daily_common.yml
vendored
@@ -4,6 +4,11 @@ name: Daily Common
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
pinned_deps:
|
||||
description: 'Should dependencies be installed from pinned file or use latest versions'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
@@ -17,26 +22,18 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
use_sat_solver:
|
||||
description: 'Install dependencies with SAT Solver'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -81,8 +78,14 @@ jobs:
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies
|
||||
|
||||
- name: Install dependencies (pinned)
|
||||
if: ${{ inputs.pinned_deps }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Install dependencies (latest)
|
||||
if: ${{ inputs.pinned_deps != 'true' }}
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
@@ -91,11 +94,6 @@ jobs:
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
|
||||
dependency_solver="sat"
|
||||
else
|
||||
dependency_solver="legacy"
|
||||
fi
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
nimble testintegration
|
||||
|
||||
14
.github/workflows/daily_devel.yml
vendored
14
.github/workflows/daily_devel.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Daily Nim Devel
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_nim_devel:
|
||||
name: Daily Nim Devel
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['amd64']"
|
||||
12
.github/workflows/daily_i386.yml
vendored
12
.github/workflows/daily_i386.yml
vendored
@@ -10,6 +10,14 @@ jobs:
|
||||
name: Daily i386 (Linux)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
|
||||
15
.github/workflows/daily_sat.yml
vendored
15
.github/workflows/daily_sat.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Daily SAT
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily SAT
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
use_sat_solver: true
|
||||
8
.github/workflows/dependencies.yml
vendored
8
.github/workflows/dependencies.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2 }}
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NWAKU }}
|
||||
secret: ACTIONS_GITHUB_TOKEN_NWAKU
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIM_CODEX }}
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
|
||||
steps:
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ matrix.target.token }}
|
||||
token: ${{ secrets[matrix.target.secret] }}
|
||||
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
|
||||
8
.github/workflows/interop.yml
vendored
8
.github/workflows/interop.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
# without suffix action fails because "hole-punching-interop" artifacts have
|
||||
# the same name as "transport-interop" artifacts
|
||||
test-results-suffix: transport-interop
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
extra-versions: ${{ github.workspace }}/interop/transport/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
@@ -48,12 +48,12 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
2
.github/workflows/linters.yml
vendored
2
.github/workflows/linters.yml
vendored
@@ -22,6 +22,6 @@ jobs:
|
||||
uses: arnetheduck/nph-action@v1
|
||||
with:
|
||||
version: 0.6.1
|
||||
options: "examples libp2p tests tools *.nim*"
|
||||
options: "examples libp2p tests interop tools *.nim*"
|
||||
fail: true
|
||||
suggest: true
|
||||
|
||||
35
.pinned
35
.pinned
@@ -1,19 +1,22 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
quic;https://github.com/status-im/nim-quic.git@#d54e8f0f2e454604b767fadeae243d95c30c383f
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
|
||||
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
|
||||
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
|
||||
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
|
||||
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
|
||||
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
|
||||
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
|
||||
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
|
||||
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97
|
||||
|
||||
171
README.md
171
README.md
@@ -20,39 +20,120 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [Development](#development)
|
||||
- [Contribute](#contribute)
|
||||
- [Contributors](#contributors)
|
||||
- [Core Maintainers](#core-maintainers)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [License](#license)
|
||||
|
||||
## Background
|
||||
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
|
||||
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
|
||||
|
||||
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
|
||||
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
|
||||
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/).
|
||||
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
## Development
|
||||
Clone the repository and install the dependencies:
|
||||
```sh
|
||||
git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install -dy
|
||||
```
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
|
||||
|
||||
If you only want to run tests that don't require `go-libp2p-daemon`, use:
|
||||
```
|
||||
nimble testnative
|
||||
```
|
||||
|
||||
For a list of all available test suites, use:
|
||||
```
|
||||
nimble tasks
|
||||
```
|
||||
|
||||
### Contribute
|
||||
|
||||
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable quic transport support
|
||||
```bash
|
||||
nim c -d:libp2p_quic_support some_file.nim
|
||||
```
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
```
|
||||
|
||||
Set list of known libp2p agents for metrics:
|
||||
```bash
|
||||
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
|
||||
```
|
||||
|
||||
Specify gossipsub specific topics to measure in the metrics:
|
||||
```bash
|
||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||
```
|
||||
|
||||
|
||||
## Modules
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
@@ -80,10 +161,10 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
@@ -111,66 +192,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
|
||||
## Development
|
||||
Clone and Install dependencies:
|
||||
```sh
|
||||
git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
# to use dependencies computed by nimble
|
||||
nimble install -dy
|
||||
# OR to install the dependencies versions used in CI
|
||||
nimble install_pinned
|
||||
```
|
||||
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
|
||||
Or use `nimble tasks` to show all available tasks.
|
||||
|
||||
### Contribute
|
||||
|
||||
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
```
|
||||
|
||||
Set list of known libp2p agents for metrics:
|
||||
```bash
|
||||
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
|
||||
```
|
||||
|
||||
Specify gossipsub specific topics to measure in the metrics:
|
||||
```bash
|
||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed and distributed under either of
|
||||
|
||||
@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
|
||||
if dirExists("nimbledeps/pkgs2"):
|
||||
switch("NimblePath", "nimbledeps/pkgs2")
|
||||
|
||||
switch("warningAsError", "UnusedImport:on")
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
|
||||
@@ -3,9 +3,7 @@
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Examples](#examples)
|
||||
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.16.0`.
|
||||
Go with version `1.16.0`
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Follow one of the methods below:
|
||||
|
||||
## Script
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
We recommend using `1.16.0`, as previously stated.
|
||||
```sh
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
|
||||
If you find any issues, please head into our discord and ask for our assistance.
|
||||
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh -f
|
||||
```
|
||||
Or:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh --force
|
||||
```
|
||||
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
|
||||
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Usage
|
||||
|
||||
## Example
|
||||
# Examples
|
||||
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
|
||||
|
||||
## Getting Started
|
||||
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bullseye-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
|
||||
138
interop/hole-punching/hole_punching.nim
Normal file
138
interop/hole-punching/hole_punching.nim
Normal file
@@ -0,0 +1,138 @@
|
||||
import std/[os, options, strformat, sequtils]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
switch,
|
||||
multicodec,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping,
|
||||
]
|
||||
import ../../tests/[stubs/autonatclientstub, errorhelpers]
|
||||
|
||||
logScope:
|
||||
topics = "hp interop node"
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng = rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "All relay addresses", relayAddr
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
|
||||
try:
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
@@ -13,6 +13,6 @@ COPY . nim-libp2p/
|
||||
|
||||
RUN \
|
||||
cd nim-libp2p && \
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./tests/transport-interop/main.nim
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
|
||||
|
||||
ENTRYPOINT ["/app/nim-libp2p/tests/transport-interop/main"]
|
||||
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]
|
||||
@@ -47,12 +47,9 @@ proc main() {.async.} =
|
||||
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
discard switchBuilder.withWsTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
|
||||
)
|
||||
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
@@ -83,7 +80,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
|
||||
let
|
||||
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
|
||||
dialingStart = Moment.now()
|
||||
@@ -99,7 +96,18 @@ proc main() {.async.} =
|
||||
pingRTTMilllis: float(pingDelay.milliseconds),
|
||||
)
|
||||
)
|
||||
quit(0)
|
||||
|
||||
discard waitFor(main().withTimeout(testTimeout))
|
||||
quit(1)
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(testTimeout))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
@@ -52,7 +52,6 @@ else:
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
transports/quictransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
@@ -71,3 +70,7 @@ else:
|
||||
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
|
||||
multicodec, builders, pubsub
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import libp2p/transports/quictransport
|
||||
export quictransport
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.10.0"
|
||||
version = "1.11.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2", "results",
|
||||
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -30,7 +30,7 @@ proc runTest(filename: string, moreoptions: string = "") =
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
exec excstr & " -r " & " tests/" & filename
|
||||
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
@@ -62,6 +62,9 @@ task testfilter, "Run PKI filter test":
|
||||
runTest("testpkifilter")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
|
||||
|
||||
task testintegration, "Runs integraion tests":
|
||||
runTest("testintegration")
|
||||
|
||||
task test, "Runs the test suite":
|
||||
runTest("testall")
|
||||
exec "nimble testfilter"
|
||||
|
||||
478
libp2p/autotls/acme/api.nim
Normal file
478
libp2p/autotls/acme/api.nim
Normal file
@@ -0,0 +1,478 @@
|
||||
import options, base64, sequtils, strutils, json
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
|
||||
|
||||
import ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export ACMEError
|
||||
|
||||
const
|
||||
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
|
||||
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
|
||||
Alg = "RS256"
|
||||
DefaultChalCompletedRetries = 10
|
||||
DefaultChalCompletedRetryTime = 1.seconds
|
||||
DefaultFinalizeRetries = 10
|
||||
DefaultFinalizeRetryTime = 1.seconds
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Nonce* = string
|
||||
type Kid* = string
|
||||
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
newOrder*: string
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: ACMEDirectory
|
||||
session: HttpSessionRef
|
||||
acmeServerURL*: string
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
headers*: HttpTable
|
||||
|
||||
type JWK = object
|
||||
kty: string
|
||||
n: string
|
||||
e: string
|
||||
|
||||
# whether the request uses Kid or not
|
||||
type ACMERequestType = enum
|
||||
ACMEJwkRequest
|
||||
ACMEKidRequest
|
||||
|
||||
type ACMERequestHeader = object
|
||||
alg: string
|
||||
typ: string
|
||||
nonce: string
|
||||
url: string
|
||||
case kind: ACMERequestType
|
||||
of ACMEJwkRequest:
|
||||
jwk: JWK
|
||||
of ACMEKidRequest:
|
||||
kid: Kid
|
||||
|
||||
type ACMERegisterRequest* = object
|
||||
termsOfServiceAgreed: bool
|
||||
contact: seq[string]
|
||||
|
||||
type ACMEAccountStatus = enum
|
||||
valid
|
||||
deactivated
|
||||
revoked
|
||||
|
||||
type ACMERegisterResponseBody = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMERegisterResponse* = object
|
||||
kid*: Kid
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
|
||||
type ACMEChallenge = object
|
||||
url*: string
|
||||
`type`*: string
|
||||
status*: ACMEChallengeStatus
|
||||
token*: string
|
||||
|
||||
type ACMEChallengeIdentifier = object
|
||||
`type`: string
|
||||
value: string
|
||||
|
||||
type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEChallengeStatus
|
||||
authorizations: seq[string]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEChallengeStatus
|
||||
authorizations*: seq[string]
|
||||
finalize*: string
|
||||
orderURL*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalizeURL*: string
|
||||
orderURL*: string
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
checkURL: string
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
ready = "ready"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
ACMEChallengeCheck
|
||||
|
||||
type ACMECheckResponse* = object
|
||||
case kind: ACMECheckKind
|
||||
of ACMEOrderCheck:
|
||||
orderStatus: ACMEOrderStatus
|
||||
of ACMEChallengeCheck:
|
||||
chalStatus: ACMEChallengeStatus
|
||||
retryAfter: Duration
|
||||
|
||||
type ACMEFinalizeResponse* = object
|
||||
status: ACMEOrderStatus
|
||||
|
||||
type ACMEOrderResponse* = object
|
||||
certificate: string
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate: string
|
||||
certificateExpiry: DateTime
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except ACMEError as exc:
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except JsonKindError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except HttpError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
|
||||
except CatchableError as exc:
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
|
||||
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let session = HttpSessionRef.new()
|
||||
let directory = handleError("new API"):
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(session, acmeServerURL & "/directory").get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEDirectory)
|
||||
|
||||
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
|
||||
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(self.directory.newNonce)
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, url: string, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone:
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let newNonce = await self.requestNonce()
|
||||
if needsJwk:
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
ACMERequestHeader(
|
||||
kind: ACMEJwkRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
|
||||
)
|
||||
else:
|
||||
ACMERequestHeader(
|
||||
kind: ACMEKidRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, url, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, url).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
url: string,
|
||||
payload: auto,
|
||||
key: KeyPair,
|
||||
needsJwk: bool = false,
|
||||
kid: Opt[Kid] = Opt.none(Kid),
|
||||
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let acmeHeader = await self.acmeHeader(url, key, needsJwk, kid)
|
||||
handleError("createSignedAcmeRequest"):
|
||||
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
|
||||
let derPrivKey = key.seckey.rsakey.getBytes.get
|
||||
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
|
||||
token.sign(pemPrivKey)
|
||||
$token.toFlattenedJson()
|
||||
|
||||
proc requestRegister*(
|
||||
self: ACMEApi, key: KeyPair
|
||||
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
|
||||
handleError("acmeRegister"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newAccount, registerRequest, key, needsJwk = true
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newAccount, payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
|
||||
)
|
||||
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
let orderRequest = ACMEChallengeRequest(
|
||||
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
|
||||
)
|
||||
handleError("requestNewOrder"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newOrder, payload)
|
||||
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len() == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
orderURL: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let acmeResponse = await self.get(authorizations[0])
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let challengeResponse = await self.requestNewOrder(domains, key, kid)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalizeURL: challengeResponse.finalize,
|
||||
orderURL: challengeResponse.orderURL,
|
||||
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
|
||||
)
|
||||
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: string, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
let retryAfter =
|
||||
try:
|
||||
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
|
||||
except ValueError:
|
||||
DefaultChalCompletedRetryTime
|
||||
|
||||
case checkKind
|
||||
of ACMEOrderCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
of ACMEChallengeCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc requestCompleted*(
|
||||
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCompleted (send notify)"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
acmeResponse.body.to(ACMECompletedResponse)
|
||||
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
checkURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.pending:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.valid:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed challenge completion: expected 'valid', got '" &
|
||||
$checkResponse.chalStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.requestCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: string, finalizeURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let derCSR = createCSR(domain)
|
||||
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalizeURL, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
orderURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed certificate finalization: expected 'valid', got '" &
|
||||
$checkResponse.orderStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
return false
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: string,
|
||||
finalizeURL: string,
|
||||
orderURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(orderURL, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, orderURL: string
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let acmeResponse = await self.get(orderURL)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, orderURL: string
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let orderResponse = await self.requestGetOrder(orderURL)
|
||||
|
||||
handleError("downloadCertificate"):
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.get(self.session, orderResponse.certificate)
|
||||
.get()
|
||||
.send()
|
||||
ACMECertificateResponse(
|
||||
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
37
libp2p/autotls/acme/mockapi.nim
Normal file
37
libp2p/autotls/acme/mockapi.nim
Normal file
@@ -0,0 +1,37 @@
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
parent*: ACMEApi
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: JsonNode
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[MockACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(), directory: directory, acmeServerURL: LetsEncryptURL
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, url: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
48
libp2p/autotls/acme/utils.nim
Normal file
48
libp2p/autotls/acme/utils.nim
Normal file
@@ -0,0 +1,48 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
table.getString(key)
|
||||
|
||||
proc base64UrlEncode*(data: seq[byte]): string =
|
||||
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
|
||||
var encoded = base64.encode(data, safe = true)
|
||||
encoded.removeSuffix("=")
|
||||
encoded.removeSuffix("=")
|
||||
return encoded
|
||||
|
||||
proc getResponseBody*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let responseBody = bytesToString(await response.getBodyBytes()).parseJson()
|
||||
return responseBody
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise
|
||||
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
|
||||
except Exception as exc: # this is required for nim 1.6
|
||||
raise
|
||||
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
|
||||
|
||||
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
let personalizationStr = "libp2p_autotls"
|
||||
if cert_init_drbg(
|
||||
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
|
||||
) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to initialize certCtx")
|
||||
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to generate cert key")
|
||||
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
@@ -23,7 +23,7 @@ import
|
||||
stream/connection,
|
||||
multiaddress,
|
||||
crypto/crypto,
|
||||
transports/[transport, tcptransport, quictransport, memorytransport],
|
||||
transports/[transport, tcptransport, wstransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
@@ -35,7 +35,9 @@ import
|
||||
utility
|
||||
import services/wildcardresolverservice
|
||||
|
||||
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
|
||||
TLSCertificate, TLSFlags, ServerFlags
|
||||
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
@@ -169,12 +171,27 @@ proc withTcpTransport*(
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
proc withWsTransport*(
|
||||
b: SwitchBuilder,
|
||||
tlsPrivateKey: TLSPrivateKey = nil,
|
||||
tlsCertificate: TLSCertificate = nil,
|
||||
tlsFlags: set[TLSFlags] = {},
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import transports/quictransport
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
## This module implementes CID (Content IDentifier).
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
|
||||
@@ -140,7 +140,7 @@ proc triggerConnEvent*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
warn "Exception in triggerConnEvent",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(
|
||||
@@ -186,7 +186,7 @@ proc expectConnection*(
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(
|
||||
AlreadyExpectingConnectionError,
|
||||
"Already expecting an incoming connection from that peer",
|
||||
"Already expecting an incoming connection from that peer: " & shortLog(p),
|
||||
)
|
||||
|
||||
let future = Future[Muxer].Raising([CancelledError]).init()
|
||||
|
||||
@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = hexToSeqByte(data)
|
||||
except ValueError:
|
||||
return err("secp: Hex to bytes failed")
|
||||
except ValueError as e:
|
||||
let errMsg = "secp: Hex to bytes failed: " & e.msg
|
||||
return err(errMsg.cstring)
|
||||
init(sig, buffer)
|
||||
|
||||
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||
|
||||
@@ -595,13 +595,13 @@ template exceptionToAssert(body: untyped): untyped =
|
||||
try:
|
||||
res = body
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Defect as exc:
|
||||
raise exc
|
||||
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
res
|
||||
@@ -967,9 +967,9 @@ proc openStream*(
|
||||
stream.flags.incl(Outbound)
|
||||
stream.transp = transp
|
||||
result = stream
|
||||
except ResultError[ProtoError]:
|
||||
except ResultError[ProtoError] as e:
|
||||
await api.closeConnection(transp)
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
|
||||
|
||||
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
# must not specify raised exceptions as this is StreamCallback from chronos
|
||||
@@ -1023,10 +1023,10 @@ proc addHandler*(
|
||||
api.servers.add(P2PServer(server: server, address: maddress))
|
||||
except DaemonLocalError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
|
||||
except TransportError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
|
||||
except CancelledError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
@@ -1503,10 +1503,14 @@ proc pubsubSubscribe*(
|
||||
result = ticket
|
||||
except DaemonLocalError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except TransportError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
|
||||
@@ -124,9 +124,13 @@ proc expandDnsAddr(
|
||||
for resolvedAddress in resolved:
|
||||
let lastPart = resolvedAddress[^1].tryGet()
|
||||
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||
let
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
except ResultError[string] as e:
|
||||
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
else:
|
||||
result.add((resolvedAddress, peerId))
|
||||
@@ -174,7 +178,7 @@ proc internalConnect(
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(DialFailedError, "can't dial self!")
|
||||
raise newException(DialFailedError, "internalConnect can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||
@@ -182,8 +186,8 @@ proc internalConnect(
|
||||
defer:
|
||||
try:
|
||||
lock.release()
|
||||
except AsyncLockError:
|
||||
raiseAssert "lock must have been acquired in line above"
|
||||
except AsyncLockError as e:
|
||||
raiseAssert "lock must have been acquired in line above: " & e.msg
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
@@ -194,7 +198,9 @@ proc internalConnect(
|
||||
try:
|
||||
self.connManager.getOutgoingSlot(forceDial)
|
||||
except TooManyConnectionsError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
let muxed =
|
||||
try:
|
||||
@@ -204,11 +210,15 @@ proc internalConnect(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
raise newException(
|
||||
DialFailedError, "Unable to establish outgoing link in internalConnect"
|
||||
)
|
||||
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
@@ -224,7 +234,11 @@ proc internalConnect(
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoing upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -256,7 +270,7 @@ method connect*(
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled!"
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
|
||||
)
|
||||
|
||||
return
|
||||
@@ -269,7 +283,7 @@ proc negotiateStream(
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
@@ -285,13 +299,13 @@ method tryDial*(
|
||||
try:
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
raise newException(DialFailedError, "No valid multiaddress in tryDial")
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
|
||||
|
||||
method dial*(
|
||||
self: Dialer, peerId: PeerId, protos: seq[string]
|
||||
@@ -305,14 +319,17 @@ method dial*(
|
||||
try:
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
|
||||
)
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled"
|
||||
trace "Dial canceled", description = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error dialing", description = exc.msg
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
@@ -343,17 +360,20 @@ method dial*(
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
|
||||
)
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
trace "Dial canceled", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
|
||||
|
||||
method addTransport*(self: Dialer, t: Transport) =
|
||||
self.transports &= t
|
||||
|
||||
@@ -113,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||
try:
|
||||
query.peers.putNoWait(pa)
|
||||
except AsyncQueueFullError as exc:
|
||||
debug "Cannot push discovered peer to queue"
|
||||
debug "Cannot push discovered peer to queue", description = exc.msg
|
||||
|
||||
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
## This module implements MultiCodec.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import vbuffer
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
## 2. MURMUR
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
@@ -566,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
|
||||
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
|
||||
## Create MultiHash from BASE58 encoded string representation ``data``.
|
||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
|
||||
|
||||
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
if len(a) != len(b):
|
||||
|
||||
@@ -87,7 +87,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
@@ -587,10 +587,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
except KeyError as e:
|
||||
raise newException(
|
||||
YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
|
||||
e.msg,
|
||||
e,
|
||||
)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
|
||||
@@ -78,23 +78,23 @@ proc getDnsResponse(
|
||||
|
||||
try:
|
||||
await receivedDataFuture.wait(5.seconds) #unix default
|
||||
except AsyncTimeoutError:
|
||||
raise newException(IOError, "DNS server timeout")
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(IOError, "DNS server timeout: " & e.msg, e)
|
||||
|
||||
let rawResponse = sock.getMessage()
|
||||
try:
|
||||
parseResponse(string.fromBytes(rawResponse))
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except ValueError as exc:
|
||||
raise exc
|
||||
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception parsing DN response: " & exc.msg
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[hashes, strutils],
|
||||
|
||||
335
libp2p/peeridauth/client.nim
Normal file
335
libp2p/peeridauth/client.nim
Normal file
@@ -0,0 +1,335 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import base64, json, strutils, uri, times
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
|
||||
import ../peerinfo, ../crypto/crypto, ../varint.nim
|
||||
|
||||
logScope:
|
||||
topics = "libp2p peeridauth"
|
||||
|
||||
const
|
||||
NimLibp2pUserAgent = "nim-libp2p"
|
||||
PeerIDAuthPrefix* = "libp2p-PeerID"
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
type PeerIDAuthError* = object of LPError
|
||||
|
||||
type PeerIDAuthResponse* = object
|
||||
status*: int
|
||||
headers*: HttpTable
|
||||
body*: seq[byte]
|
||||
|
||||
type BearerToken* = object
|
||||
token*: string
|
||||
expires*: Opt[DateTime]
|
||||
|
||||
type PeerIDAuthOpaque* = string
|
||||
type PeerIDAuthSignature* = string
|
||||
type PeerIDAuthChallenge* = string
|
||||
|
||||
type PeerIDAuthAuthenticationResponse* = object
|
||||
challengeClient*: PeerIDAuthChallenge
|
||||
opaque*: PeerIDAuthOpaque
|
||||
serverPubkey*: PublicKey
|
||||
|
||||
type PeerIDAuthAuthorizationResponse* = object
|
||||
sig*: PeerIDAuthSignature
|
||||
bearer*: BearerToken
|
||||
response*: PeerIDAuthResponse
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
ctx: var HmacDrbgContext, choices: string
|
||||
): char {.raises: [ValueError].} =
|
||||
## Samples a random character from the input string using the DRBG context
|
||||
if choices.len == 0:
|
||||
raise newException(ValueError, "Cannot sample from an empty string")
|
||||
var idx: uint32
|
||||
ctx.generate(idx)
|
||||
return choices[uint32(idx mod uint32(choices.len))]
|
||||
|
||||
proc randomChallenge(
|
||||
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
|
||||
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
|
||||
var rng = rng[]
|
||||
var challenge = ""
|
||||
try:
|
||||
for _ in 0 ..< challengeLen:
|
||||
challenge.add(rng.sampleChar(ChallengeCharset))
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
|
||||
PeerIDAuthChallenge(challenge)
|
||||
|
||||
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
|
||||
# Helper to extract quoted value from key
|
||||
for segment in data.split(","):
|
||||
if key in segment:
|
||||
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
|
||||
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
|
||||
|
||||
proc genDataToSign(
|
||||
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
|
||||
): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
var buf: seq[byte] = prefix.toByteSeq()
|
||||
for p in parts:
|
||||
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
|
||||
raise newException(PeerIDAuthError, "could not encode fields length to varint")
|
||||
buf.add varintLen
|
||||
buf.add (p.k & "=").toByteSeq()
|
||||
buf.add p.v
|
||||
return buf
|
||||
|
||||
proc getSigParams(
|
||||
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
|
||||
): seq[SigParam] =
|
||||
if clientSender:
|
||||
@[
|
||||
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
|
||||
]
|
||||
else:
|
||||
@[
|
||||
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
|
||||
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
]
|
||||
|
||||
proc sign(
|
||||
privateKey: PrivateKey,
|
||||
challenge: PeerIDAuthChallenge,
|
||||
publicKey: PublicKey,
|
||||
hostname: string,
|
||||
clientSender: bool = true,
|
||||
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
|
||||
PeerIDAuthSignature(
|
||||
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
|
||||
)
|
||||
|
||||
proc checkSignature*(
|
||||
serverSig: PeerIDAuthSignature,
|
||||
serverPublicKey: PublicKey,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
clientPublicKey: PublicKey,
|
||||
hostname: string,
|
||||
): bool {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
|
||||
var serverSignature: Signature
|
||||
try:
|
||||
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
|
||||
|
||||
serverSignature.verify(
|
||||
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
uri,
|
||||
body = payload,
|
||||
headers = [
|
||||
("Content-Type", "application/json"),
|
||||
("User-Agent", NimLibp2pUserAgent),
|
||||
("Authorization", authHeader),
|
||||
],
|
||||
)
|
||||
.get()
|
||||
.send()
|
||||
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
proc requestAuthentication*(
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthAuthenticationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let response =
|
||||
try:
|
||||
await self.get($uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
|
||||
if wwwAuthenticate == "":
|
||||
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
|
||||
|
||||
let serverPubkey: PublicKey =
|
||||
try:
|
||||
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
|
||||
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
|
||||
|
||||
PeerIDAuthAuthenticationResponse(
|
||||
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
|
||||
opaque: extractField(wwwAuthenticate, "opaque"),
|
||||
serverPubkey: serverPubkey,
|
||||
)
|
||||
|
||||
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
try:
|
||||
pubkey.getBytes().valueOr:
|
||||
raise
|
||||
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
|
||||
except ValueError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
|
||||
)
|
||||
|
||||
proc parse3339DateTime(
|
||||
timeStr: string
|
||||
): DateTime {.raises: [ValueError, TimeParseError].} =
|
||||
let parts = timeStr.split('.')
|
||||
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
|
||||
let millis = parseInt(parts[1].strip(chars = {'Z'}))
|
||||
result = base + initDuration(milliseconds = millis)
|
||||
|
||||
proc requestAuthorization*(
|
||||
self: PeerIDAuthClient,
|
||||
peerInfo: PeerInfo,
|
||||
uri: Uri,
|
||||
challengeClient: PeerIDAuthChallenge,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
serverPubkey: PublicKey,
|
||||
opaque: PeerIDAuthOpaque,
|
||||
payload: auto,
|
||||
): Future[PeerIDAuthAuthorizationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
|
||||
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
|
||||
let authHeader =
|
||||
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
)
|
||||
|
||||
let authenticationInfo = response.headers.getString("authentication-info")
|
||||
|
||||
let bearerExpires =
|
||||
try:
|
||||
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
|
||||
except ValueError, PeerIDAuthError, TimeParseError:
|
||||
Opt.none(DateTime)
|
||||
|
||||
PeerIDAuthAuthorizationResponse(
|
||||
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
|
||||
bearer: BearerToken(
|
||||
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
|
||||
),
|
||||
response: response,
|
||||
)
|
||||
|
||||
proc sendWithoutBearer(
|
||||
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
# Authenticate in three ways as per the PeerID Auth spec
|
||||
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
|
||||
|
||||
let authenticationResponse = await self.requestAuthentication(uri)
|
||||
|
||||
let challengeServer = self.rng.randomChallenge()
|
||||
let authorizationResponse = await self.requestAuthorization(
|
||||
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
|
||||
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
|
||||
)
|
||||
|
||||
if not checkSignature(
|
||||
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
|
||||
peerInfo.publicKey, uri.hostname,
|
||||
):
|
||||
raise newException(PeerIDAuthError, "Failed to validate server's signature")
|
||||
|
||||
return (authorizationResponse.bearer, authorizationResponse.response)
|
||||
|
||||
proc sendWithBearer(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken,
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
)
|
||||
return (bearer, response)
|
||||
|
||||
proc send*(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken = BearerToken(),
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.token == "":
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
else:
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer)
|
||||
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
41
libp2p/peeridauth/mockclient.nim
Normal file
41
libp2p/peeridauth/mockclient.nim
Normal file
@@ -0,0 +1,41 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto
|
||||
|
||||
import ./client
|
||||
|
||||
export client
|
||||
|
||||
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
|
||||
mockedStatus*: int
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
|
||||
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
@@ -101,8 +101,10 @@ proc new*(
|
||||
let pubkey =
|
||||
try:
|
||||
key.getPublicKey().tryGet()
|
||||
except CatchableError:
|
||||
raise newException(PeerInfoError, "invalid private key")
|
||||
except CatchableError as e:
|
||||
raise newException(
|
||||
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
|
||||
)
|
||||
|
||||
let peerId = PeerId.init(key).tryGet()
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import ../../../switch, ../../../multiaddress, ../../../peerid
|
||||
import core
|
||||
@@ -87,7 +87,7 @@ method dialMe*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(AutonatError, "read Dial response failed", e)
|
||||
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
|
||||
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
|
||||
import core
|
||||
@@ -107,7 +107,9 @@ proc startSync*(
|
||||
description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
|
||||
err.msg,
|
||||
err,
|
||||
)
|
||||
finally:
|
||||
if stream != nil:
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
|
||||
import core
|
||||
import
|
||||
|
||||
@@ -148,7 +148,7 @@ proc dialPeerV1*(
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
trace "error writing hop request", description = exc.msg
|
||||
raise newException(RelayV1DialError, "error writing hop request", exc)
|
||||
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
|
||||
|
||||
let msgRcvFromRelayOpt =
|
||||
try:
|
||||
@@ -158,7 +158,8 @@ proc dialPeerV1*(
|
||||
except LPStreamError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, "error reading stop response", exc)
|
||||
raise
|
||||
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
|
||||
|
||||
try:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
@@ -173,10 +174,16 @@ proc dialPeerV1*(
|
||||
)
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
raise newException(
|
||||
RelayV1DialError,
|
||||
"Hop can't open destination stream after sendStatus: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
raise newException(
|
||||
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
|
||||
)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -199,7 +206,8 @@ proc dialPeerV2*(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
raise newException(RelayV2DialError, exc.msg)
|
||||
raise
|
||||
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
|
||||
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
|
||||
@@ -76,7 +76,7 @@ proc dial*(
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayDialError, "Destination doesn't exist")
|
||||
except RelayDialError as e:
|
||||
raise e
|
||||
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
|
||||
except CatchableError:
|
||||
raise newException(RelayDialError, "dial address not valid")
|
||||
|
||||
@@ -100,13 +100,13 @@ proc dial*(
|
||||
raise e
|
||||
except DialFailedError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayDialError, "dial relay peer failed", e)
|
||||
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
|
||||
except RelayV1DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
|
||||
except RelayV2DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
|
||||
|
||||
method dial*(
|
||||
self: RelayTransport,
|
||||
@@ -121,7 +121,8 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise
|
||||
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
|
||||
@@ -69,8 +69,8 @@ proc bridge*(
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
|
||||
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
@@ -0,0 +1,159 @@
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../varint
|
||||
import ../../utility
|
||||
import results
|
||||
import ../../multiaddress
|
||||
import stew/objects
|
||||
import stew/assign2
|
||||
import options
|
||||
|
||||
type
|
||||
Record* {.public.} = object
|
||||
key*: Option[seq[byte]]
|
||||
value*: Option[seq[byte]]
|
||||
timeReceived*: Option[string]
|
||||
|
||||
MessageType* = enum
|
||||
putValue = 0
|
||||
getValue = 1
|
||||
addProvider = 2
|
||||
getProviders = 3
|
||||
findNode = 4
|
||||
ping = 5 # Deprecated
|
||||
|
||||
ConnectionType* = enum
|
||||
notConnected = 0
|
||||
connected = 1
|
||||
canConnect = 2 # Unused
|
||||
cannotConnect = 3 # Unused
|
||||
|
||||
Peer* {.public.} = object
|
||||
id*: seq[byte]
|
||||
addrs*: seq[MultiAddress]
|
||||
connection*: ConnectionType
|
||||
|
||||
Message* {.public.} = object
|
||||
msgType*: MessageType
|
||||
key*: Option[seq[byte]]
|
||||
record*: Option[Record]
|
||||
closerPeers*: seq[Peer]
|
||||
providerPeers*: seq[Peer]
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
|
||||
|
||||
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.writeOpt(1, record.key)
|
||||
pb.writeOpt(2, record.value)
|
||||
pb.writeOpt(5, record.timeReceived)
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, peer.id)
|
||||
for address in peer.addrs:
|
||||
pb.write(2, address.data.buffer)
|
||||
pb.write(3, uint32(ord(peer.connection)))
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, uint32(ord(msg.msgType)))
|
||||
|
||||
pb.writeOpt(2, msg.key)
|
||||
|
||||
msg.record.withValue(record):
|
||||
pb.writeOpt(3, msg.record)
|
||||
|
||||
for peer in msg.closerPeers:
|
||||
pb.write(8, peer.encode())
|
||||
|
||||
for peer in msg.providerPeers:
|
||||
pb.write(9, peer.encode())
|
||||
|
||||
pb.finish()
|
||||
|
||||
return pb
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
|
||||
opt.withValue(v):
|
||||
pb.write(field, v)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
|
||||
pb.write(field, value.encode())
|
||||
|
||||
proc getOptionField[T: ProtoScalar | string | seq[byte]](
|
||||
pb: ProtoBuffer, field: int, output: var Option[T]
|
||||
): ProtoResult[void] =
|
||||
var f: T
|
||||
if ?pb.getField(field, f):
|
||||
assign(output, some(f))
|
||||
ok()
|
||||
|
||||
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var r: Record
|
||||
?pb.getOptionField(1, r.key)
|
||||
?pb.getOptionField(2, r.value)
|
||||
?pb.getOptionField(5, r.timeReceived)
|
||||
return ok(some(r))
|
||||
|
||||
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var
|
||||
p: Peer
|
||||
id: seq[byte]
|
||||
|
||||
?pb.getRequiredField(1, p.id)
|
||||
|
||||
discard ?pb.getRepeatedField(2, p.addrs)
|
||||
|
||||
var connVal: uint32
|
||||
if ?pb.getField(3, connVal):
|
||||
var connType: ConnectionType
|
||||
if not checkedEnumAssign(connType, connVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
p.connection = connType
|
||||
|
||||
return ok(some(p))
|
||||
|
||||
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
|
||||
var
|
||||
m: Message
|
||||
key: seq[byte]
|
||||
recPb: seq[byte]
|
||||
closerPbs: seq[seq[byte]]
|
||||
providerPbs: seq[seq[byte]]
|
||||
|
||||
var pb = initProtoBuffer(buf)
|
||||
|
||||
var msgTypeVal: uint32
|
||||
?pb.getRequiredField(1, msgTypeVal)
|
||||
|
||||
var msgType: MessageType
|
||||
if not checkedEnumAssign(msgType, msgTypeVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
|
||||
m.msgType = msgType
|
||||
|
||||
?pb.getOptionField(2, m.key)
|
||||
|
||||
if ?pb.getField(3, recPb):
|
||||
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
|
||||
|
||||
discard ?pb.getRepeatedField(8, closerPbs)
|
||||
for ppb in closerPbs:
|
||||
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peerOpt.withValue(peer):
|
||||
m.closerPeers.add(peer)
|
||||
|
||||
discard ?pb.getRepeatedField(9, providerPbs)
|
||||
for ppb in providerPbs:
|
||||
let peer = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peer.withValue(peer):
|
||||
m.providerPeers.add(peer)
|
||||
|
||||
return ok(some(m))
|
||||
@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
type Stats* = object
|
||||
isFinal*: bool
|
||||
uploadBytes*: uint
|
||||
downloadBytes*: uint
|
||||
duration*: Duration
|
||||
|
||||
type PerfClient* = ref object
|
||||
stats: Stats
|
||||
|
||||
proc new*(T: typedesc[PerfClient]): T =
|
||||
return T()
|
||||
|
||||
proc currentStats*(p: PerfClient): Stats =
|
||||
return p.stats
|
||||
|
||||
proc perf*(
|
||||
_: typedesc[PerfClient],
|
||||
conn: Connection,
|
||||
sizeToWrite: uint64 = 0,
|
||||
sizeToRead: uint64 = 0,
|
||||
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
|
||||
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite
|
||||
p.stats = Stats()
|
||||
|
||||
await conn.close()
|
||||
try:
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
|
||||
size = sizeToRead
|
||||
let start = Moment.now()
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite.uint
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.uploadBytes += toWrite.uint
|
||||
p.stats = statsCopy
|
||||
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead.uint
|
||||
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.downloadBytes += toRead.uint
|
||||
p.stats = statsCopy
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
raise e
|
||||
finally:
|
||||
p.stats.isFinal = true
|
||||
|
||||
trace "finishing performance benchmark", duration = p.stats.duration
|
||||
|
||||
return p.stats.duration
|
||||
|
||||
@@ -185,14 +185,14 @@ method init*(f: FloodSub) =
|
||||
try:
|
||||
await f.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in floodsub handler", conn
|
||||
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
f.handler = handler
|
||||
f.codec = FloodSubCodec
|
||||
|
||||
method publish*(
|
||||
f: FloodSub, topic: string, data: seq[byte]
|
||||
f: FloodSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(f).publish(topic, data)
|
||||
|
||||
@@ -29,7 +29,7 @@ import
|
||||
../../utility,
|
||||
../../switch
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat
|
||||
@@ -218,7 +218,7 @@ method init*(g: GossipSub) =
|
||||
try:
|
||||
await g.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in gossipsub handler", conn
|
||||
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
g.handler = handler
|
||||
@@ -702,24 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
# Send unsubscribe (in reverse order to sub/graft)
|
||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||
|
||||
method publish*(
|
||||
proc makePeersForPublishUsingCustomConn(
|
||||
g: GossipSub, topic: string
|
||||
): HashSet[PubSubPeer] =
|
||||
assert g.customConnCallbacks.isSome,
|
||||
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
|
||||
|
||||
trace "Selecting peers via custom connection callback"
|
||||
|
||||
return g.customConnCallbacks.get().customPeerSelectionCB(
|
||||
g.gossipsub.getOrDefault(topic),
|
||||
g.subscribedDirectPeers.getOrDefault(topic),
|
||||
g.mesh.getOrDefault(topic),
|
||||
g.fanout.getOrDefault(topic),
|
||||
)
|
||||
|
||||
proc makePeersForPublishDefault(
|
||||
g: GossipSub, topic: string, data: seq[byte]
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
): HashSet[PubSubPeer] =
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
# add always direct peers
|
||||
# Always include direct peers
|
||||
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
||||
|
||||
if topic in g.topics: # if we're subscribed use the mesh
|
||||
@@ -769,6 +772,29 @@ method publish*(
|
||||
# ultimately is not sent)
|
||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||
|
||||
return peers
|
||||
|
||||
method publish*(
|
||||
g: GossipSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
let peers =
|
||||
if useCustomConn:
|
||||
g.makePeersForPublishUsingCustomConn(topic)
|
||||
else:
|
||||
g.makePeersForPublishDefault(topic, data)
|
||||
|
||||
if peers.len == 0:
|
||||
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
|
||||
debug "No peers for topic, skipping publish",
|
||||
@@ -807,7 +833,12 @@ method publish*(
|
||||
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
|
||||
g.sendIDontWant(msg, msgId, peers)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
g.broadcast(
|
||||
peers,
|
||||
RPCMsg(messages: @[msg]),
|
||||
isHighPriority = true,
|
||||
useCustomConn = useCustomConn,
|
||||
)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
|
||||
@@ -305,9 +305,9 @@ proc handleIHave*(
|
||||
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIDs:
|
||||
if peer.iDontWants[^1].len > 1000:
|
||||
if peer.iDontWants[0].len >= IDontWantMaxCount:
|
||||
break
|
||||
peer.iDontWants[^1].incl(g.salt(messageId))
|
||||
peer.iDontWants[0].incl(g.salt(messageId))
|
||||
|
||||
proc handleIWant*(
|
||||
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
|
||||
@@ -457,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
prunes = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# avoid pruning peers we are currently grafting in this heartbeat
|
||||
prunes.keepIf do(x: PubSubPeer) -> bool:
|
||||
@@ -513,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
var peers = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# grafting so high score has priority
|
||||
peers.sort(byScore, SortOrder.Descending)
|
||||
@@ -538,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
it.peerId notin backingOff:
|
||||
avail.add(it)
|
||||
|
||||
# by spec, grab only 2
|
||||
if avail.len > 1:
|
||||
# by spec, grab only up to MaxOpportunisticGraftPeers
|
||||
if avail.len >= MaxOpportunisticGraftPeers:
|
||||
break
|
||||
|
||||
for peer in avail:
|
||||
@@ -690,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
for msgId in ihave.messageIDs:
|
||||
peer.sentIHaves[^1].incl(msgId)
|
||||
peer.sentIHaves[0].incl(msgId)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
|
||||
@@ -50,6 +50,9 @@ const
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
|
||||
IHaveMaxLength* = 5000
|
||||
IDontWantMaxCount* = 1000
|
||||
# maximum number of IDontWant messages in one slot of the history
|
||||
MaxOpportunisticGraftPeers* = 2
|
||||
|
||||
type
|
||||
TopicInfo* = object # gossip 1.1 related
|
||||
|
||||
@@ -31,7 +31,7 @@ import
|
||||
../../errors,
|
||||
../../utility
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
export tables, sets
|
||||
@@ -176,6 +176,7 @@ type
|
||||
rng*: ref HmacDrbgContext
|
||||
|
||||
knownTopics*: HashSet[string]
|
||||
customConnCallbacks*: Option[CustomConnectionCallbacks]
|
||||
|
||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
## handle peer disconnects
|
||||
@@ -187,7 +188,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(
|
||||
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
|
||||
p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
@@ -200,13 +205,14 @@ proc send*(
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
@@ -261,12 +267,12 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg, isHighPriority)
|
||||
p.send(peer, msg, isHighPriority, useCustomConn)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
|
||||
|
||||
proc sendSubs*(
|
||||
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
|
||||
@@ -373,8 +379,14 @@ method getOrCreatePeer*(
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
let pubSubPeer =
|
||||
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
|
||||
let pubSubPeer = PubSubPeer.new(
|
||||
peerId,
|
||||
getConn,
|
||||
onEvent,
|
||||
protoNegotiated,
|
||||
p.maxMessageSize,
|
||||
customConnCallbacks = p.customConnCallbacks,
|
||||
)
|
||||
debug "created new pubsub peer", peerId
|
||||
|
||||
p.peers[peerId] = pubSubPeer
|
||||
@@ -558,7 +570,7 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
|
||||
p.updateTopicMetrics(topic)
|
||||
|
||||
method publish*(
|
||||
p: PubSub, topic: string, data: seq[byte]
|
||||
p: PubSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.base, async: (raises: []), public.} =
|
||||
## publish to a ``topic``
|
||||
##
|
||||
@@ -648,6 +660,8 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
parameters: PubParams = false,
|
||||
customConnCallbacks: Option[CustomConnectionCallbacks] =
|
||||
none(CustomConnectionCallbacks),
|
||||
): P {.raises: [InitializationError], public.} =
|
||||
let pubsub =
|
||||
when PubParams is bool:
|
||||
@@ -663,6 +677,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
else:
|
||||
P(
|
||||
@@ -678,6 +693,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
|
||||
proc peerEventHandler(
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||
import stew/results
|
||||
import std/[sequtils, tables, hashes, options, sets, deques]
|
||||
import results
|
||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||
import chronos/ratelimit
|
||||
import
|
||||
@@ -95,6 +95,21 @@ type
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
|
||||
CustomConnCreationProc* = proc(
|
||||
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
|
||||
): Connection {.gcsafe, raises: [].}
|
||||
|
||||
CustomPeerSelectionProc* = proc(
|
||||
allPeers: HashSet[PubSubPeer],
|
||||
directPeers: HashSet[PubSubPeer],
|
||||
meshPeers: HashSet[PubSubPeer],
|
||||
fanoutPeers: HashSet[PubSubPeer],
|
||||
): HashSet[PubSubPeer] {.gcsafe, raises: [].}
|
||||
|
||||
CustomConnectionCallbacks* = object
|
||||
customConnCreationCB*: CustomConnCreationProc
|
||||
customPeerSelectionCB*: CustomPeerSelectionProc
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
onEvent*: OnEvent # Connectivity updates for peer
|
||||
@@ -123,6 +138,7 @@ type
|
||||
maxNumElementsInNonPriorityQueue*: int
|
||||
# The max number of elements allowed in the non-priority queue.
|
||||
disconnected: bool
|
||||
customConnCallbacks*: Option[CustomConnectionCallbacks]
|
||||
|
||||
RPCHandler* =
|
||||
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
|
||||
@@ -214,10 +230,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
|
||||
conn, peer = p, closed = conn.closed, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
except CancelledError as e:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
trace "Unexpected cancellation in PubSubPeer.handle"
|
||||
trace "Unexpected cancellation in PubSubPeer.handle", description = e.msg
|
||||
finally:
|
||||
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
|
||||
|
||||
@@ -250,7 +266,7 @@ proc connectOnce(
|
||||
await p.getConn().wait(5.seconds)
|
||||
except AsyncTimeoutError as error:
|
||||
trace "getConn timed out", description = error.msg
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
|
||||
|
||||
# When the send channel goes up, subscriptions need to be sent to the
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
@@ -356,21 +372,43 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
|
||||
if p.sendConn != nil and not p.sendConn.closed():
|
||||
# Fast path that avoids copying msg (which happens for {.async.})
|
||||
let conn = p.sendConn
|
||||
proc sendMsg(
|
||||
p: PubSubPeer, msg: seq[byte], useCustomConn: bool = false
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
type ConnectionType = enum
|
||||
ctCustom
|
||||
ctSend
|
||||
ctSlow
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
var slowPath = false
|
||||
let (conn, connType) =
|
||||
if useCustomConn and p.customConnCallbacks.isSome:
|
||||
let address = p.address
|
||||
(
|
||||
p.customConnCallbacks.get().customConnCreationCB(address, p.peerId, p.codec),
|
||||
ctCustom,
|
||||
)
|
||||
elif p.sendConn != nil and not p.sendConn.closed():
|
||||
(p.sendConn, ctSend)
|
||||
else:
|
||||
slowPath = true
|
||||
(nil, ctSlow)
|
||||
|
||||
if not slowPath:
|
||||
trace "sending encoded msg to peer",
|
||||
conntype = $connType, conn = conn, encoded = shortLog(msg)
|
||||
let f = conn.writeLp(msg)
|
||||
if not f.completed():
|
||||
sendMsgContinue(conn, f)
|
||||
else:
|
||||
f
|
||||
else:
|
||||
trace "sending encoded msg to peer via slow path"
|
||||
sendMsgSlow(p, msg)
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||
proc sendEncoded*(
|
||||
p: PubSubPeer, msg: seq[byte], isHighPriority: bool, useCustomConn: bool = false
|
||||
): Future[void] =
|
||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||
##
|
||||
## Parameters:
|
||||
@@ -399,7 +437,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
|
||||
maxSize = p.maxMessageSize, msgSize = msg.len
|
||||
Future[void].completed()
|
||||
elif isHighPriority or emptyQueues:
|
||||
let f = p.sendMsg(msg)
|
||||
let f = p.sendMsg(msg, useCustomConn)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
@@ -458,7 +496,11 @@ iterator splitRPCMsg(
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(
|
||||
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
|
||||
p: PubSubPeer,
|
||||
msg: RPCMsg,
|
||||
anonymize: bool,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
||||
##
|
||||
@@ -489,11 +531,11 @@ proc send*(
|
||||
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority, useCustomConn)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -552,6 +594,8 @@ proc new*(
|
||||
maxMessageSize: int,
|
||||
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
|
||||
customConnCallbacks: Option[CustomConnectionCallbacks] =
|
||||
none(CustomConnectionCallbacks),
|
||||
): T =
|
||||
result = T(
|
||||
getConn: getConn,
|
||||
@@ -563,6 +607,7 @@ proc new*(
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(),
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.iDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[hashes, sets]
|
||||
import chronos/timer, stew/results
|
||||
import chronos/timer, results
|
||||
|
||||
import ../../utility
|
||||
|
||||
|
||||
@@ -419,8 +419,8 @@ proc save(
|
||||
)
|
||||
rdv.namespaces[nsSalted].add(rdv.registered.high)
|
||||
# rdv.registerEvent.fire()
|
||||
except KeyError:
|
||||
doAssert false, "Should have key"
|
||||
except KeyError as e:
|
||||
doAssert false, "Should have key: " & e.msg
|
||||
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
|
||||
@@ -110,8 +110,8 @@ proc handleConn(
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished:
|
||||
await fut1.cancelAndWait()
|
||||
@@ -182,14 +182,14 @@ method readOnce*(
|
||||
except LPStreamEOFError as err:
|
||||
s.isEof = true
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamEOFError, "Secure connection EOF: " & err.msg, err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name, message = err.msg, connection = s
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
|
||||
@@ -55,7 +55,7 @@ proc tryStartingDirectConn(
|
||||
if not isRelayed.get(false) and address.isPublicMA():
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
debug "Failed to create direct connection.", description = err.msg
|
||||
continue
|
||||
return false
|
||||
|
||||
@@ -91,7 +91,7 @@ proc newConnectedPeerHandler(
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
debug "Hole punching failed during dcutr", err = err.msg
|
||||
debug "Hole punching failed during dcutr", description = err.msg
|
||||
|
||||
method setup*(
|
||||
self: HPService, switch: Switch
|
||||
@@ -104,7 +104,7 @@ method setup*(
|
||||
let dcutrProto = Dcutr.new(switch)
|
||||
switch.mount(dcutrProto)
|
||||
except LPError as err:
|
||||
error "Failed to mount Dcutr", err = err.msg
|
||||
error "Failed to mount Dcutr", description = err.msg
|
||||
|
||||
self.newConnectedPeerHandler = proc(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
|
||||
@@ -199,8 +199,10 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
|
||||
elif s.pushing:
|
||||
if not s.readQueue.empty():
|
||||
discard s.readQueue.popFirstNoWait()
|
||||
except AsyncQueueFullError, AsyncQueueEmptyError:
|
||||
raiseAssert(getCurrentExceptionMsg())
|
||||
except AsyncQueueFullError as e:
|
||||
raiseAssert("closeImpl failed queue full: " & e.msg)
|
||||
except AsyncQueueEmptyError as e:
|
||||
raiseAssert("closeImpl failed queue empty: " & e.msg)
|
||||
|
||||
trace "Closed BufferStream", s
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@ when defined(libp2p_agents_metrics):
|
||||
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
|
||||
|
||||
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
|
||||
|
||||
func shortLog*(conn: ChronosStream): auto =
|
||||
try:
|
||||
if conn == nil:
|
||||
|
||||
@@ -52,6 +52,8 @@ func shortLog*(conn: Connection): string =
|
||||
chronicles.formatIt(Connection):
|
||||
shortLog(it)
|
||||
|
||||
declarePublicCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
|
||||
|
||||
method initStream*(s: Connection) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = ConnectionTrackerName
|
||||
|
||||
@@ -113,9 +113,9 @@ method initStream*(s: LPStream) {.base.} =
|
||||
trackCounter(s.objName)
|
||||
trace "Stream created", s, objName = s.objName, dir = $s.dir
|
||||
|
||||
proc join*(
|
||||
method join*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
|
||||
): Future[void] {.base, async: (raises: [CancelledError], raw: true), public.} =
|
||||
## Wait for the stream to be closed
|
||||
s.closeEvent.wait()
|
||||
|
||||
@@ -135,9 +135,9 @@ method readOnce*(
|
||||
## available
|
||||
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
|
||||
|
||||
proc readExactly*(
|
||||
method readExactly*(
|
||||
s: LPStream, pbytes: pointer, nbytes: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[void] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Waits for `nbytes` to be available, then read
|
||||
## them and return them
|
||||
if s.atEof:
|
||||
@@ -171,9 +171,9 @@ proc readExactly*(
|
||||
trace "couldn't read all bytes, incomplete data", s, nbytes, read
|
||||
raise newLPStreamIncompleteError()
|
||||
|
||||
proc readLine*(
|
||||
method readLine*(
|
||||
s: LPStream, limit = 0, sep = "\r\n"
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[string] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Reads up to `limit` bytes are read, or a `sep` is found
|
||||
# TODO replace with something that exploits buffering better
|
||||
var lim = if limit <= 0: -1 else: limit
|
||||
@@ -199,9 +199,9 @@ proc readLine*(
|
||||
if len(result) == lim:
|
||||
break
|
||||
|
||||
proc readVarint*(
|
||||
method readVarint*(
|
||||
conn: LPStream
|
||||
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
var buffer: array[10, byte]
|
||||
|
||||
for i in 0 ..< len(buffer):
|
||||
@@ -218,9 +218,9 @@ proc readVarint*(
|
||||
if true: # can't end with a raise apparently
|
||||
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
||||
|
||||
proc readLp*(
|
||||
method readLp*(
|
||||
s: LPStream, maxSize: int
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[seq[byte]] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## read length prefixed msg, with the length encoded as a varint
|
||||
let
|
||||
length = await s.readVarint()
|
||||
@@ -244,9 +244,11 @@ method write*(
|
||||
# Write `msg` to stream, waiting for the write to be finished
|
||||
raiseAssert("[LPStream.write] abstract method not implemented!")
|
||||
|
||||
proc writeLp*(
|
||||
method writeLp*(
|
||||
s: LPStream, msg: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
): Future[void] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
|
||||
.} =
|
||||
## Write `msg` with a varint-encoded length prefix
|
||||
let vbytes = PB.toBytes(msg.len().uint64)
|
||||
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
|
||||
@@ -254,9 +256,11 @@ proc writeLp*(
|
||||
buf[vbytes.len ..< buf.len] = msg
|
||||
s.write(buf)
|
||||
|
||||
proc writeLp*(
|
||||
method writeLp*(
|
||||
s: LPStream, msg: string
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
): Future[void] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
|
||||
.} =
|
||||
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
||||
|
||||
proc write*(
|
||||
@@ -324,7 +328,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
|
||||
debug "Unexpected bytes while waiting for EOF", s
|
||||
except CancelledError:
|
||||
discard
|
||||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except LPStreamEOFError as e:
|
||||
trace "Expected EOF came", s, description = e.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, description = exc.msg
|
||||
|
||||
@@ -233,7 +233,7 @@ proc upgrader(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(UpgradeError, e.msg, e)
|
||||
raise newException(UpgradeError, "catchable error upgrader: " & e.msg, e)
|
||||
|
||||
proc upgradeMonitor(
|
||||
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
|
||||
@@ -275,7 +275,8 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
|
||||
await transport.accept()
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
raise
|
||||
newException(CatchableError, "failed to accept connection: " & exc.msg, exc)
|
||||
slot.trackConnection(conn)
|
||||
if isNil(conn):
|
||||
# A nil connection means that we might have hit a
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/quic
|
||||
import chronos
|
||||
import chronicles
|
||||
import metrics
|
||||
import quic
|
||||
import results
|
||||
import ../multiaddress
|
||||
import ../multicodec
|
||||
@@ -58,6 +59,7 @@ method readOnce*(
|
||||
result = min(nbytes, stream.cached.len)
|
||||
copyMem(pbytes, addr stream.cached[0], result)
|
||||
stream.cached = stream.cached[result ..^ 1]
|
||||
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
|
||||
except CatchableError as exc:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
@@ -66,6 +68,7 @@ method write*(
|
||||
stream: QuicStream, bytes: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mapExceptions(await stream.stream.write(bytes))
|
||||
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
|
||||
|
||||
{.pop.}
|
||||
|
||||
@@ -98,7 +101,7 @@ proc getStream*(
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
except CatchableError as exc:
|
||||
# TODO: incomingStream is using {.async.} with no raises
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
|
||||
|
||||
method getWrapped*(self: QuicSession): P2PConnection =
|
||||
nil
|
||||
@@ -116,7 +119,7 @@ method newStream*(
|
||||
try:
|
||||
return await m.quicSession.getStream(Direction.Out)
|
||||
except CatchableError as exc:
|
||||
raise newException(MuxerError, exc.msg, exc)
|
||||
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
|
||||
|
||||
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
@@ -233,11 +236,16 @@ method start*(
|
||||
except QuicConfigError as exc:
|
||||
doAssert false, "invalid quic setup: " & $exc.msg
|
||||
except TLSCertificateError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "tlscert error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
except QuicError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref QuicTransportError)(msg: "quicerror in quic start: " & exc.msg, parent: exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "transport error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
self.running = true
|
||||
|
||||
method stop*(transport: QuicTransport) {.async: (raises: []).} =
|
||||
@@ -315,7 +323,7 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(QuicTransportDialError, e.msg, e)
|
||||
raise newException(QuicTransportDialError, "error in quic dial:" & e.msg, e)
|
||||
|
||||
method upgrade*(
|
||||
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
|
||||
|
||||
@@ -133,7 +133,9 @@ method start*(
|
||||
try:
|
||||
createStreamServer(ta, flags = self.flags)
|
||||
except common.TransportError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "transport error in TcpTransport start:" & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.servers &= server
|
||||
|
||||
@@ -250,9 +252,13 @@ method accept*(
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportOs error in accept:" & exc.msg, parent: exc
|
||||
)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportError in accept: " & exc.msg, parent: exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
cancelAcceptFuts()
|
||||
raise exc
|
||||
@@ -302,7 +308,8 @@ method dial*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref TcpTransportError)(msg: "TcpTransport dial error: " & exc.msg, parent: exc)
|
||||
|
||||
# If `stop` is called after `connect` but before `await` returns, we might
|
||||
# end up with a race condition where `stop` returns but not all connections
|
||||
@@ -318,7 +325,7 @@ method dial*(
|
||||
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
|
||||
except TransportOsError as exc:
|
||||
safeCloseWait(transp)
|
||||
raise (ref TcpTransportError)(msg: exc.msg)
|
||||
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
|
||||
|
||||
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||
|
||||
|
||||
@@ -118,8 +118,8 @@ proc makeASN1Time(time: Time): string {.inline.} =
|
||||
try:
|
||||
let f = initTimeFormat("yyyyMMddhhmmss")
|
||||
format(time.utc(), f)
|
||||
except TimeFormatParseError:
|
||||
raiseAssert "time format is const and checked with test"
|
||||
except TimeFormatParseError as e:
|
||||
raiseAssert "time format is const and checked with test: " & e.msg
|
||||
|
||||
return str & "Z"
|
||||
|
||||
@@ -278,7 +278,7 @@ proc parse*(
|
||||
validTo = parseCertTime($certParsed.valid_to)
|
||||
except TimeParseError as e:
|
||||
raise newException(
|
||||
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
|
||||
CertificateParsingError, "Failed to parse certificate validity time: " & $e.msg, e
|
||||
)
|
||||
|
||||
P2pCertificate(
|
||||
|
||||
@@ -243,7 +243,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeCloseWait(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in dial TorTransport: " & e.msg, e
|
||||
)
|
||||
|
||||
method start*(
|
||||
self: TorTransport, addrs: seq[MultiAddress]
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
transport,
|
||||
@@ -160,7 +160,9 @@ method start*(
|
||||
else:
|
||||
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
|
||||
except CatchableError as exc:
|
||||
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref WsTransportError)(
|
||||
msg: "error in WsTransport start: " & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.httpservers &= httpserver
|
||||
|
||||
@@ -309,7 +311,9 @@ method accept*(
|
||||
debug "OS Error", description = exc.msg
|
||||
except CatchableError as exc:
|
||||
info "Unexpected error accepting connection", description = exc.msg
|
||||
raise newException(transport.TransportError, exc.msg, exc)
|
||||
raise newException(
|
||||
transport.TransportError, "Error in WsTransport accept: " & exc.msg, exc
|
||||
)
|
||||
|
||||
method dial*(
|
||||
self: WsTransport,
|
||||
@@ -338,7 +342,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeClose(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in WsTransport dial: " & e.msg, e
|
||||
)
|
||||
|
||||
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
|
||||
if procCall Transport(t).handles(address):
|
||||
|
||||
@@ -54,8 +54,9 @@ when defined(libp2p_agents_metrics):
|
||||
proc safeToLowerAscii*(s: string): Result[string, cstring] =
|
||||
try:
|
||||
ok(s.toLowerAscii())
|
||||
except CatchableError:
|
||||
err("toLowerAscii failed")
|
||||
except CatchableError as e:
|
||||
let errMsg = "toLowerAscii failed: " & e.msg
|
||||
err(errMsg.cstring)
|
||||
|
||||
const
|
||||
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"
|
||||
|
||||
@@ -27,9 +27,9 @@ proc anyCompleted*[T](
|
||||
if raceFut.completed:
|
||||
return raceFut
|
||||
requests.del(requests.find(raceFut))
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
raise newException(
|
||||
AllFuturesFailedError, "None of the futures completed successfully"
|
||||
AllFuturesFailedError, "None of the futures completed successfully: " & e.msg, e
|
||||
)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
@@ -108,7 +108,9 @@ proc createStreamServer*[T](
|
||||
): StreamServer {.raises: [LPError, MaInvalidAddress].} =
|
||||
## Create new TCP stream server which bounds to ``ma`` address.
|
||||
if not (RTRANSPMA.match(ma)):
|
||||
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
|
||||
raise newException(
|
||||
MaInvalidAddress, "Incorrect or unsupported address in createStreamServer"
|
||||
)
|
||||
|
||||
try:
|
||||
return createStreamServer(
|
||||
@@ -123,7 +125,7 @@ proc createStreamServer*[T](
|
||||
init,
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createStreamServer*[T](
|
||||
ma: MultiAddress,
|
||||
@@ -146,7 +148,7 @@ proc createStreamServer*[T](
|
||||
initTAddress(ma).tryGet(), flags, udata, sock, backlog, bufferSize, child, init
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
|
||||
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
|
||||
@@ -178,7 +180,9 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPErro
|
||||
try:
|
||||
createAsyncSocket(address.getDomain(), socktype, protocol)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(
|
||||
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
|
||||
)
|
||||
|
||||
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
|
||||
## Bind socket ``sock`` to MultiAddress ``ma``.
|
||||
|
||||
@@ -9,91 +9,115 @@
|
||||
|
||||
set -e
|
||||
|
||||
CACHE_DIR="$1" # optional parameter pointing to a CI cache dir.
|
||||
LIBP2P_COMMIT="124530a3" # Tags maye be used as well
|
||||
[[ -n "$2" ]] && LIBP2P_COMMIT="$2" # allow overriding it on the command line
|
||||
force=false
|
||||
verbose=false
|
||||
CACHE_DIR=""
|
||||
LIBP2P_COMMIT="124530a3"
|
||||
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-f|--force) force=true ;;
|
||||
-v|--verbose) verbose=true ;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
|
||||
if [[ -z "$CACHE_DIR" ]]; then
|
||||
CACHE_DIR="$1"
|
||||
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
|
||||
LIBP2P_COMMIT="$1"
|
||||
else
|
||||
echo "Unknown argument: $1"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
|
||||
if [[ ! -e "$SUBREPO_DIR" ]]; then
|
||||
# we're probably in nim-libp2p's CI
|
||||
SUBREPO_DIR="go-libp2p-daemon"
|
||||
rm -rf "$SUBREPO_DIR"
|
||||
git clone -q https://github.com/libp2p/go-libp2p-daemon
|
||||
cd "$SUBREPO_DIR"
|
||||
git checkout -q $LIBP2P_COMMIT
|
||||
cd ..
|
||||
SUBREPO_DIR="go-libp2p-daemon"
|
||||
rm -rf "$SUBREPO_DIR"
|
||||
git clone -q https://github.com/libp2p/go-libp2p-daemon
|
||||
cd "$SUBREPO_DIR"
|
||||
git checkout -q "$LIBP2P_COMMIT"
|
||||
cd ..
|
||||
fi
|
||||
|
||||
## env vars
|
||||
# verbosity level
|
||||
[[ -z "$V" ]] && V=0
|
||||
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
|
||||
|
||||
# Windows detection
|
||||
if uname | grep -qiE "mingw|msys"; then
|
||||
EXE_SUFFIX=".exe"
|
||||
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
|
||||
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
|
||||
EXE_SUFFIX=".exe"
|
||||
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
|
||||
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
|
||||
else
|
||||
EXE_SUFFIX=""
|
||||
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
|
||||
EXE_SUFFIX=""
|
||||
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
|
||||
fi
|
||||
|
||||
TARGET_DIR="$(go env GOPATH)/bin"
|
||||
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
|
||||
|
||||
target_needs_rebuilding() {
|
||||
REBUILD=0
|
||||
NO_REBUILD=1
|
||||
REBUILD=0
|
||||
NO_REBUILD=1
|
||||
|
||||
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
|
||||
fi
|
||||
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
|
||||
fi
|
||||
|
||||
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
|
||||
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
|
||||
return $NO_REBUILD
|
||||
else
|
||||
return $REBUILD
|
||||
fi
|
||||
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
|
||||
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
|
||||
return $NO_REBUILD
|
||||
else
|
||||
return $REBUILD
|
||||
fi
|
||||
}
|
||||
|
||||
build_target() {
|
||||
echo -e "$BUILD_MSG"
|
||||
[[ "$V" == "0" ]] && exec &>/dev/null
|
||||
echo -e "$BUILD_MSG"
|
||||
|
||||
pushd "$SUBREPO_DIR"
|
||||
# Go module downloads can fail randomly in CI VMs, so retry them a few times
|
||||
MAX_RETRIES=5
|
||||
CURR=0
|
||||
while [[ $CURR -lt $MAX_RETRIES ]]; do
|
||||
FAILED=0
|
||||
go get ./... && break || FAILED=1
|
||||
CURR=$(( CURR + 1 ))
|
||||
echo "retry #${CURR}"
|
||||
done
|
||||
if [[ $FAILED == 1 ]]; then
|
||||
echo "Error: still fails after retrying ${MAX_RETRIES} times."
|
||||
exit 1
|
||||
fi
|
||||
go install ./...
|
||||
pushd "$SUBREPO_DIR"
|
||||
# Go module downloads can fail randomly in CI VMs, so retry them a few times
|
||||
MAX_RETRIES=5
|
||||
CURR=0
|
||||
while [[ $CURR -lt $MAX_RETRIES ]]; do
|
||||
FAILED=0
|
||||
go get ./... && break || FAILED=1
|
||||
CURR=$(( CURR + 1 ))
|
||||
if $verbose; then
|
||||
echo "retry #${CURR}"
|
||||
fi
|
||||
done
|
||||
if [[ $FAILED == 1 ]]; then
|
||||
echo "Error: still fails after retrying ${MAX_RETRIES} times."
|
||||
exit 1
|
||||
fi
|
||||
go install ./...
|
||||
|
||||
# record the last commit's timestamp
|
||||
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
|
||||
# record the last commit's timestamp
|
||||
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
|
||||
|
||||
popd
|
||||
popd
|
||||
|
||||
# update the CI cache
|
||||
if [[ -n "$CACHE_DIR" ]]; then
|
||||
rm -rf "$CACHE_DIR"
|
||||
mkdir "$CACHE_DIR"
|
||||
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
|
||||
fi
|
||||
echo "Binary built successfully."
|
||||
# update the CI cache
|
||||
if [[ -n "$CACHE_DIR" ]]; then
|
||||
rm -rf "$CACHE_DIR"
|
||||
mkdir "$CACHE_DIR"
|
||||
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
|
||||
fi
|
||||
echo "Binary built successfully: $TARGET_BINARY"
|
||||
}
|
||||
|
||||
if target_needs_rebuilding; then
|
||||
build_target
|
||||
if $force || target_needs_rebuilding; then
|
||||
build_target
|
||||
else
|
||||
echo "No rebuild needed."
|
||||
echo "No rebuild needed."
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import options, tables
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import helpers
|
||||
import ../libp2p
|
||||
@@ -358,10 +357,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
.withAddress(wsAddress)
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
.withWsTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
|
||||
@@ -49,8 +49,10 @@ template checkTrackers*() =
|
||||
{.push warning[BareExcept]: off.}
|
||||
try:
|
||||
GC_fullCollect()
|
||||
except CatchableError:
|
||||
discard
|
||||
except Defect as exc:
|
||||
raise exc # Reraise to maintain call stack
|
||||
except Exception:
|
||||
raiseAssert "Unexpected exception during GC collection"
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
|
||||
@@ -92,7 +94,9 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
|
||||
testBufferStream.initStream()
|
||||
testBufferStream
|
||||
|
||||
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
macro checkUntilTimeoutCustom*(
|
||||
timeout: Duration, sleepInterval: Duration, code: untyped
|
||||
): untyped =
|
||||
## Periodically checks a given condition until it is true or a timeout occurs.
|
||||
##
|
||||
## `code`: untyped - A condition expression that should eventually evaluate to true.
|
||||
@@ -101,17 +105,17 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
|
||||
## asyncTest "checkUntilTimeoutCustom should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(2.seconds):
|
||||
## checkUntilTimeoutCustom(2.seconds):
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
|
||||
## asyncTest "checkUntilTimeoutCustom should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(5.seconds)::
|
||||
## checkUntilTimeoutCustom(5.seconds)::
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
@@ -145,12 +149,12 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
if `combinedBoolExpr`:
|
||||
return
|
||||
else:
|
||||
await sleepAsync(100.millis)
|
||||
await sleepAsync(`sleepInterval`)
|
||||
|
||||
await checkExpiringInternal()
|
||||
|
||||
macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
|
||||
## Same as `checkUntilTimeoutCustom` but with a default timeout of 2s with 50ms interval.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
@@ -171,7 +175,7 @@ macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## b == 1
|
||||
## ```
|
||||
result = quote:
|
||||
checkUntilCustomTimeout(10.seconds, `code`)
|
||||
checkUntilTimeoutCustom(2.seconds, 50.milliseconds, `code`)
|
||||
|
||||
proc unorderedCompare*[T](a, b: seq[T]): bool =
|
||||
if a == b:
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
import std/[os, options, strformat, sequtils]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
switch,
|
||||
multicodec,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping,
|
||||
]
|
||||
import ../stubs/autonatclientstub
|
||||
import ../errorhelpers
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng = rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
try:
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService =
|
||||
AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
debug "Got relay address", relayMA
|
||||
let relayId = await switch.connect(relayMA)
|
||||
debug "Connected to relay", relayId
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(
|
||||
it.contains(multiCodec("p2p-circuit")).tryGet()
|
||||
)
|
||||
:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
quit(0)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
discard waitFor(main().withTimeout(4.minutes))
|
||||
quit(1)
|
||||
142
tests/kademlia/testencoding.nim
Normal file
142
tests/kademlia/testencoding.nim
Normal file
@@ -0,0 +1,142 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest2
|
||||
import ../../libp2p/protobuf/minprotobuf
|
||||
import ../../libp2p/protocols/kademlia/protobuf
|
||||
import ../../libp2p/multiaddress
|
||||
import options
|
||||
import results
|
||||
|
||||
suite "kademlia protobuffers":
|
||||
const invalidType = uint32(999)
|
||||
|
||||
proc valFromResultOption[T](res: ProtoResult[Option[T]]): T =
|
||||
assert res.isOk()
|
||||
assert res.value().isSome()
|
||||
return res.value().unsafeGet()
|
||||
|
||||
test "record encode/decode":
|
||||
let rec = Record(
|
||||
key: some(@[1'u8, 2, 3]),
|
||||
value: some(@[4'u8, 5, 6]),
|
||||
timeReceived: some("2025-05-12T12:00:00Z"),
|
||||
)
|
||||
let encoded = rec.encode()
|
||||
let decoded = Record.decode(encoded).valFromResultOption
|
||||
check:
|
||||
decoded.key.get() == rec.key.get()
|
||||
decoded.value.get() == rec.value.get()
|
||||
decoded.timeReceived.get() == rec.timeReceived.get()
|
||||
|
||||
test "peer encode/decode":
|
||||
let maddr = MultiAddress.init("/ip4/127.0.0.1/tcp/9000").tryGet()
|
||||
let peer =
|
||||
Peer(id: @[1'u8, 2, 3], addrs: @[maddr], connection: ConnectionType.connected)
|
||||
let encoded = peer.encode()
|
||||
var decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
|
||||
check:
|
||||
decoded == peer
|
||||
|
||||
test "message encode/decode roundtrip":
|
||||
let maddr = MultiAddress.init("/ip4/10.0.0.1/tcp/4001").tryGet()
|
||||
let peer = Peer(id: @[9'u8], addrs: @[maddr], connection: canConnect)
|
||||
let r = Record(key: some(@[1'u8]), value: some(@[2'u8]), timeReceived: some("t"))
|
||||
let msg = Message(
|
||||
msgType: MessageType.findNode,
|
||||
key: some(@[7'u8]),
|
||||
record: some(r),
|
||||
closerPeers: @[peer],
|
||||
providerPeers: @[peer],
|
||||
)
|
||||
let encoded = msg.encode()
|
||||
let decoded = Message.decode(encoded.buffer).valFromResultOption
|
||||
check:
|
||||
decoded == msg
|
||||
|
||||
test "decode record with missing fields":
|
||||
var pb = initProtoBuffer()
|
||||
# no fields written
|
||||
let rec = Record.decode(pb).valFromResultOption
|
||||
check:
|
||||
rec.key.isNone()
|
||||
rec.value.isNone()
|
||||
rec.timeReceived.isNone()
|
||||
|
||||
test "decode peer with missing id (invalid)":
|
||||
var pb = initProtoBuffer()
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
|
||||
test "decode peer with invalid connection type":
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, @[1'u8, 2, 3]) # id field
|
||||
pb.write(3, invalidType) # bogus connection type
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
|
||||
test "decode message with invalid msgType":
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, invalidType) # invalid MessageType
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with invalid peer in closerPeers":
|
||||
let badPeerBuf = @[0'u8, 1, 2] # junk
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(8, badPeerBuf) # closerPeers field
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with invalid embedded record":
|
||||
# encode junk data into field 3 (record)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, uint32(MessageType.putValue)) # valid msgType
|
||||
pb.write(3, @[0x00'u8, 0xFF, 0xAB]) # broken protobuf for record
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with empty embedded record":
|
||||
var recordPb = initProtoBuffer() # no fields
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, uint32(MessageType.getValue))
|
||||
pb.write(3, recordPb.buffer)
|
||||
let decoded = Message.decode(pb.buffer).valFromResultOption
|
||||
check:
|
||||
decoded.record.isSome()
|
||||
decoded.record.get().key.isNone()
|
||||
|
||||
test "peer with empty addr list and no connection":
|
||||
let peer = Peer(id: @[0x42'u8], addrs: @[], connection: ConnectionType.notConnected)
|
||||
let encoded = peer.encode()
|
||||
let decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
|
||||
check:
|
||||
decoded == peer
|
||||
|
||||
test "message with empty closer/provider peers":
|
||||
let msg = Message(
|
||||
msgType: MessageType.ping,
|
||||
key: none[seq[byte]](),
|
||||
record: none[Record](),
|
||||
closerPeers: @[],
|
||||
providerPeers: @[],
|
||||
)
|
||||
let encoded = msg.encode()
|
||||
let decoded = Message.decode(encoded.buffer).valFromResultOption
|
||||
check:
|
||||
decoded == msg
|
||||
|
||||
test "peer with addr but missing id":
|
||||
var pb = initProtoBuffer()
|
||||
let maddr = MultiAddress.init("/ip4/1.2.3.4/tcp/1234").tryGet()
|
||||
pb.write(2, maddr.data.buffer)
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
@@ -9,12 +9,11 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import sequtils, options, tables, sets
|
||||
import sequtils, tables, sets
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
errors,
|
||||
../utils,
|
||||
../../../libp2p/[
|
||||
switch,
|
||||
stream/connection,
|
||||
crypto/crypto,
|
||||
@@ -24,9 +23,9 @@ import
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
]
|
||||
import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
import ../../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
import ../../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
@@ -39,7 +38,7 @@ proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
dec ceil
|
||||
doAssert(ceil > 0, "waitSub timeout!")
|
||||
|
||||
suite "FloodSub":
|
||||
suite "FloodSub Integration":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -311,5 +310,5 @@ suite "FloodSub":
|
||||
|
||||
check (await bigNode1[0].publish("foo", bigMessage)) > 0
|
||||
|
||||
checkUntilTimeout:
|
||||
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
|
||||
messageReceived == 1
|
||||
423
tests/pubsub/integration/testgossipsubcontrolmessages.nim
Normal file
423
tests/pubsub/integration/testgossipsubcontrolmessages.nim
Normal file
@@ -0,0 +1,423 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import chronicles
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Control Messages":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foobar"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
|
||||
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
|
||||
)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
|
||||
await n0.stop()
|
||||
await n1.stop()
|
||||
|
||||
# Second part of the hack
|
||||
# Set values so peers can be GRAFTed
|
||||
let newDValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
|
||||
n0.parameters.applyDValues(newDValues)
|
||||
n1.parameters.applyDValues(newDValues)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len == 1)
|
||||
|
||||
# Then the peers are GRAFTed
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received GRAFT for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not GRAFTed
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "PRUNE messages correctly removes peers from mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
backoff = 1
|
||||
pruneMessage = ControlMessage(
|
||||
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
|
||||
)
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When another PRUNE message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received PRUNE for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
pruneMessage =
|
||||
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
n0.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not PRUNEd
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "IHAVE messages correctly advertise message ID to peers":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IHAVE observer
|
||||
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
|
||||
n1.addOnRecvObserver(checkForIHaves)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IHAVE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
check:
|
||||
receivedIHaves[0] == ControlIHave(topicID: topic, messageIDs: @[messageID])
|
||||
|
||||
asyncTest "IWANT messages correctly request messages by their IDs":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var (receivedIWants, checkForIWants) = createCheckForIWant()
|
||||
n1.addOnRecvObserver(checkForIWants)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IWANT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
check:
|
||||
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
|
||||
|
||||
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var (receivedIWants, checkForIWants) = createCheckForIWant()
|
||||
n0.addOnRecvObserver(checkForIWants)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both nodes subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When an IHAVE message is sent from node0
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
|
||||
check:
|
||||
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
|
||||
|
||||
asyncTest "IDONTWANT":
|
||||
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
|
||||
let
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(3, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
let (bFinished, handlerB) = createCompleteHandler()
|
||||
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handlerB)
|
||||
nodes[2].subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(nodes, topic)
|
||||
|
||||
check:
|
||||
nodes[2].mesh.peers(topic) == 1
|
||||
|
||||
# When we pre-emptively send a dontwant from C to B,
|
||||
nodes[2].broadcast(
|
||||
nodes[2].mesh[topic],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
|
||||
)
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
# Then B doesn't relay the message to C.
|
||||
checkUntilTimeout:
|
||||
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
discard await bFinished
|
||||
|
||||
# Then B sends IDONTWANT to C, but not A
|
||||
checkUntilTimeout:
|
||||
toSeq(nodes[2].mesh.getOrDefault(topic)).anyIt(it.iDontWants.anyIt(it.len == 1))
|
||||
check:
|
||||
toSeq(nodes[0].mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
|
||||
asyncTest "IDONTWANT is broadcasted on publish":
|
||||
# 2 nodes: A <=> B
|
||||
let
|
||||
topic = "foobar"
|
||||
nodes =
|
||||
generateNodes(2, gossip = true, sendIDontWantOnPublish = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(nodes, topic)
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
# Then IDONTWANT is sent to B on publish
|
||||
checkUntilTimeout:
|
||||
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
|
||||
|
||||
asyncTest "IDONTWANT is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
|
||||
let
|
||||
topic = "foobar"
|
||||
nodeA = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeB = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeC = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_11)
|
||||
.toGossipSub()[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await connectNodes(nodeA, nodeB)
|
||||
await connectNodes(nodeB, nodeC)
|
||||
|
||||
let (bFinished, handlerB) = createCompleteHandler()
|
||||
|
||||
nodeA.subscribe(topic, voidTopicHandler)
|
||||
nodeB.subscribe(topic, handlerB)
|
||||
nodeC.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], topic)
|
||||
|
||||
check:
|
||||
nodeC.mesh.peers(topic) == 1
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodeA.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
discard await bFinished
|
||||
|
||||
# Then B doesn't send IDONTWANT to both A and C (because C.gossipSubVersion == GossipSubCodec_11)
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
97
tests/pubsub/integration/testgossipsubcustomconn.nim
Normal file
97
tests/pubsub/integration/testgossipsubcustomconn.nim
Normal file
@@ -0,0 +1,97 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../../libp2p/stream/connection
|
||||
import ../../helpers
|
||||
|
||||
type DummyConnection* = ref object of Connection
|
||||
|
||||
method write*(
|
||||
self: DummyConnection, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
return fut
|
||||
|
||||
proc new*(T: typedesc[DummyConnection]): DummyConnection =
|
||||
let instance = T()
|
||||
instance
|
||||
|
||||
suite "GossipSub Integration - Custom Connection Support":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
|
||||
let
|
||||
topic = "test"
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var
|
||||
customConnCreated = false
|
||||
peerSelectionCalled = false
|
||||
|
||||
GossipSub(nodes[0]).customConnCallbacks = some(
|
||||
CustomConnectionCallbacks(
|
||||
customConnCreationCB: proc(
|
||||
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
|
||||
): Connection =
|
||||
customConnCreated = true
|
||||
return DummyConnection.new(),
|
||||
customPeerSelectionCB: proc(
|
||||
allPeers: HashSet[PubSubPeer],
|
||||
directPeers: HashSet[PubSubPeer],
|
||||
meshPeers: HashSet[PubSubPeer],
|
||||
fanoutPeers: HashSet[PubSubPeer],
|
||||
): HashSet[PubSubPeer] =
|
||||
peerSelectionCalled = true
|
||||
return allPeers,
|
||||
)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, handler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true), 1
|
||||
|
||||
check:
|
||||
peerSelectionCalled
|
||||
customConnCreated
|
||||
|
||||
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
|
||||
let
|
||||
topic = "test"
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, handler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true)
|
||||
except Defect:
|
||||
raised = true
|
||||
|
||||
check raised
|
||||
95
tests/pubsub/integration/testgossipsubfanout.nim
Normal file
95
tests/pubsub/integration/testgossipsubfanout.nim
Normal file
@@ -0,0 +1,95 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "GossipSub send over fanout A -> B":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
var observed = 0
|
||||
let
|
||||
obs1 = PubSubObserver(
|
||||
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
obs2 = PubSubObserver(
|
||||
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[0].addObserver(obs2)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
discard await passed.wait(2.seconds)
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "GossipSub send over fanout A -> B for subscribed topic":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
GossipSub(nodes[1]).parameters.d = 0
|
||||
GossipSub(nodes[1]).parameters.dHigh = 0
|
||||
GossipSub(nodes[1]).parameters.dLow = 0
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
|
||||
discard await passed.wait(2.seconds)
|
||||
|
||||
trace "test done, stopping..."
|
||||
275
tests/pubsub/integration/testgossipsubgossip.nim
Normal file
275
tests/pubsub/integration/testgossipsubgossip.nim
Normal file
@@ -0,0 +1,275 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Integration - Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var messages = addIHaveObservers(nodes)
|
||||
|
||||
# And are interconnected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.gossipsub.getOrDefault(topic).len == numberOfNodes - 1)
|
||||
|
||||
# When node 0 sends a message
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
checkUntilTimeout:
|
||||
messages[].mapIt(it[].len).anyIt(it > 0)
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var messages = addIHaveObservers(nodes)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When node 0 sends a message
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat()
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
let receivedIHaves = messages[].mapIt(it[].len)
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == 0
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.5),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var messages = addIHaveObservers(nodes)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When node 0 sends a message
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
# That's because the gossip factor is 0.5 over 16 available nodes
|
||||
let receivedIHaves = messages[].mapIt(it[].len)
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= 8
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with dLazy priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var messages = addIHaveObservers(nodes)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When node 0 sends a message
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
# That's because the dLazy is 6
|
||||
let receivedIHaves = messages[].mapIt(it[].len)
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
|
||||
|
||||
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iDontWant messages
|
||||
var messages = addIDontWantObservers(nodes)
|
||||
|
||||
# And are connected in a line
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == 1
|
||||
nodes[1].gossipsub.getOrDefault(topic).len == 2
|
||||
nodes[2].gossipsub.getOrDefault(topic).len == 1
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
tryPublish await nodes[0].publish(topic, largeMsg), 1
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
checkUntilTimeout:
|
||||
messages[].mapIt(it[].len)[2] == 1
|
||||
messages[].mapIt(it[].len)[1] == 0
|
||||
messages[].mapIt(it[].len)[0] == 0
|
||||
|
||||
asyncTest "GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
let
|
||||
topic = "foobar"
|
||||
nodes =
|
||||
generateNodes(2, gossip = true, enablePX = true).toGossipSub() &
|
||||
generateNodes(1, gossip = true, sendSignedPeerRecord = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitSubAllNodes(nodes, topic)
|
||||
|
||||
# Setup record handlers for all nodes
|
||||
var
|
||||
passed0: Future[void] = newFuture[void]()
|
||||
passed2: Future[void] = newFuture[void]()
|
||||
nodes[0].routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == topic
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed0.complete()
|
||||
)
|
||||
nodes[1].routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
raiseAssert "should not get here"
|
||||
)
|
||||
nodes[2].routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == topic
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed2.complete()
|
||||
)
|
||||
|
||||
# Unsubscribe from the topic
|
||||
nodes[1].unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then verify what nodes receive the PX
|
||||
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
let
|
||||
topic = "foobar"
|
||||
node0 = generateNodes(1, gossip = true)[0]
|
||||
node1 = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_10)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await connectNodes(node0, node1)
|
||||
|
||||
node0.subscribe(topic, voidTopicHandler)
|
||||
node1.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[node0, node1], topic)
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
348
tests/pubsub/integration/testgossipsubheartbeat.nim
Normal file
348
tests/pubsub/integration/testgossipsubheartbeat.nim
Normal file
@@ -0,0 +1,348 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Heartbeat":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Mesh is rebalanced during heartbeat - pruning peers":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When DValues of Node0 are updated to lower than defaults
|
||||
const
|
||||
newDLow = 2
|
||||
newDHigh = 4
|
||||
newDValues = some(
|
||||
DValues(
|
||||
dLow: some(newDLow),
|
||||
dHigh: some(newDHigh),
|
||||
d: some(3),
|
||||
dLazy: some(3),
|
||||
dScore: some(2),
|
||||
dOut: some(2),
|
||||
)
|
||||
)
|
||||
node0.parameters.applyDValues(newDValues)
|
||||
|
||||
# Then mesh of Node0 is rebalanced and peers are pruned to adapt to new values
|
||||
checkUntilTimeout:
|
||||
node0.mesh[topic].len >= newDLow and node0.mesh[topic].len <= newDHigh
|
||||
|
||||
asyncTest "Mesh is rebalanced during heartbeat - grafting new peers":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
dLow = 3
|
||||
dHigh = 4
|
||||
heartbeatInterval = 200.milliseconds
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(
|
||||
DValues(dLow: some(dLow), dHigh: some(dHigh), d: some(3), dOut: some(1))
|
||||
),
|
||||
pruneBackoff = 20.milliseconds,
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len >= dLow and
|
||||
node0.mesh.getOrDefault(topic).len <= dHigh
|
||||
|
||||
# When peers of Node0 mesh are disconnected
|
||||
let peersToDisconnect = node0.mesh[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
|
||||
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.mesh[topic].len >= dLow and node0.mesh[topic].len <= dHigh
|
||||
node0.mesh[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
|
||||
|
||||
asyncTest "Mesh is rebalanced during heartbeat - opportunistic grafting":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(
|
||||
DValues(
|
||||
dLow: some(3),
|
||||
dHigh: some(4),
|
||||
d: some(3),
|
||||
dOut: some(1),
|
||||
dLazy: some(3),
|
||||
dScore: some(2),
|
||||
)
|
||||
),
|
||||
pruneBackoff = 20.milliseconds,
|
||||
opportunisticGraftThreshold = 600,
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# Keep track of initial mesh of Node0
|
||||
let startingMesh = node0.mesh[topic].toSeq()
|
||||
|
||||
# When scores are assigned to Peers of Node0
|
||||
var expectedGrafts: seq[PubSubPeer] = @[]
|
||||
var score = 100.0
|
||||
for peer in node0.gossipsub[topic]:
|
||||
if peer in node0.mesh[topic]:
|
||||
# Assign scores in starting Mesh
|
||||
peer.score = score
|
||||
score += 100.0
|
||||
else:
|
||||
# Assign scores higher than median to Peers not in starting Mesh and expect them to be grafted
|
||||
peer.score = 800.0
|
||||
expectedGrafts &= peer
|
||||
|
||||
# Then during heartbeat Peers with lower than median scores are pruned and max 2 Peers are grafted
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
let actualGrafts = node0.mesh[topic].toSeq().filterIt(it notin startingMesh)
|
||||
check:
|
||||
actualGrafts.len == MaxOpportunisticGraftPeers
|
||||
actualGrafts.allIt(it in expectedGrafts)
|
||||
|
||||
asyncTest "Fanout maintenance during heartbeat - expired peers are dropped":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
fanoutTTL = 60.milliseconds,
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# All nodes but Node0 are subscribed to the topic
|
||||
for node in nodes[1 .. ^1]:
|
||||
node.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
let node0 = nodes[0]
|
||||
checkUntilTimeout:
|
||||
node0.gossipsub.hasKey(topic)
|
||||
|
||||
# When Node0 sends a message to the topic
|
||||
tryPublish await node0.publish(topic, newSeq[byte](10000)), 3
|
||||
|
||||
# Then Node0 fanout peers are populated
|
||||
checkUntilTimeout:
|
||||
node0.fanout.hasKey(topic)
|
||||
node0.fanout[topic].len > 0
|
||||
|
||||
# And after heartbeat Node0 fanout peers are dropped (because fanoutTTL < heartbeatInterval)
|
||||
checkUntilTimeout:
|
||||
not node0.fanout.hasKey(topic)
|
||||
|
||||
asyncTest "Fanout maintenance during heartbeat - fanout peers are replenished":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# All nodes but Node0 are subscribed to the topic
|
||||
for node in nodes[1 .. ^1]:
|
||||
node.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# When Node0 sends a message to the topic
|
||||
tryPublish await node0.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
# Then Node0 fanout peers are populated
|
||||
let maxFanoutPeers = node0.parameters.d
|
||||
checkUntilTimeout:
|
||||
node0.fanout[topic].len == maxFanoutPeers
|
||||
|
||||
# When all peers but first one of Node0 fanout are disconnected
|
||||
let peersToDisconnect = node0.fanout[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
|
||||
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
|
||||
|
||||
# Then Node0 fanout peers are replenished during heartbeat
|
||||
# expecting 10[numberOfNodes] - 1[Node0] - (6[maxFanoutPeers] - 1[first peer not disconnected]) = 4
|
||||
let expectedLen = numberOfNodes - 1 - (maxFanoutPeers - 1)
|
||||
checkUntilTimeout:
|
||||
node0.fanout[topic].len == expectedLen
|
||||
node0.fanout[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
|
||||
|
||||
asyncTest "iDontWants history - last element is pruned during heartbeat":
|
||||
const
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
historyLength = 3
|
||||
let nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
sendIDontWantOnPublish = true,
|
||||
historyLength = historyLength,
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# Get Node0 as Peer of Node1
|
||||
let peer = nodes[1].mesh[topic].toSeq()[0]
|
||||
|
||||
# Wait for history to populate
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants.len == historyLength
|
||||
|
||||
# When Node0 sends 5 messages to the topic
|
||||
const msgCount = 5
|
||||
for i in 0 ..< msgCount:
|
||||
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# Then Node1 receives 5 iDontWant messages from Node0
|
||||
checkUntilTimeoutCustom(3.seconds, 50.milliseconds):
|
||||
peer.iDontWants[0].len == msgCount
|
||||
|
||||
for i in 0 ..< historyLength:
|
||||
# When heartbeat happens
|
||||
# And history moves (new element added at start, last element pruned)
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants[i].len == 0
|
||||
|
||||
# Then iDontWant messages are moved to the next element
|
||||
var expectedHistory = newSeqWith(historyLength, 0)
|
||||
let nextIndex = i + 1
|
||||
if nextIndex < historyLength:
|
||||
expectedHistory[nextIndex] = msgCount
|
||||
|
||||
# Until they reach last element and are pruned
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants.mapIt(it.len) == expectedHistory
|
||||
|
||||
asyncTest "sentIHaves history - last element is pruned during heartbeat":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
|
||||
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
historyLength = 3
|
||||
gossipThreshold = -100.0
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyLength = historyLength,
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
gossipThreshold = gossipThreshold,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# Find Peer outside of mesh to which Node 0 will send IHave
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
|
||||
# Wait for history to populate
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves.len == historyLength
|
||||
|
||||
# When a nodeOutsideMesh receives an IHave message, it responds with an IWant to request the full message from Node0
|
||||
# Setting `peer.score < gossipThreshold` to prevent the nodeOutsideMesh from sending the IWant
|
||||
# As when IWant is processed, messages are removed from sentIHaves history
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
for p in nodeOutsideMesh.gossipsub[topic].toSeq():
|
||||
p.score = 2 * gossipThreshold
|
||||
|
||||
# When NodeInsideMesh sends a messages to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# When next heartbeat occurs
|
||||
# Then IHave is sent and sentIHaves is populated
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves[0].len == 1
|
||||
|
||||
# Need to clear mCache as node would keep populating sentIHaves until cache is shifted enough times
|
||||
nodes[0].clearMCache()
|
||||
|
||||
for i in 0 ..< historyLength:
|
||||
# When heartbeat happens
|
||||
# And history moves (new element added at start, last element pruned)
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves[i].len == 0
|
||||
|
||||
# Then sentIHaves messages are moved to the next element
|
||||
var expectedHistory = newSeqWith(historyLength, 0)
|
||||
let nextIndex = i + 1
|
||||
if nextIndex < historyLength:
|
||||
expectedHistory[nextIndex] = 1
|
||||
|
||||
# Until they reach last element and are pruned
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves.mapIt(it.len) == expectedHistory
|
||||
347
tests/pubsub/integration/testgossipsubmeshmanagement.nim
Normal file
347
tests/pubsub/integration/testgossipsubmeshmanagement.nim
Normal file
@@ -0,0 +1,347 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronicles
|
||||
import std/[sequtils]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let node = nodes[i]
|
||||
checkUntilTimeout:
|
||||
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.mesh.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes > dHigh":
|
||||
let
|
||||
numberOfNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberOfNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let node = nodes[i]
|
||||
checkUntilTimeout:
|
||||
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.mesh.getOrDefault(topic).len >= dLow and
|
||||
node.mesh.getOrDefault(topic).len <= dHigh
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
let
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.topics
|
||||
"foobar" in gossip2.topics
|
||||
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "mesh and gossipsub updated when topic subscribed and unsubscribed":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When all of them are connected and subscribed to the same topic
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then mesh and gossipsub should be populated
|
||||
for node in nodes:
|
||||
check node.topics.contains(topic)
|
||||
check node.gossipsub.hasKey(topic)
|
||||
check node.gossipsub[topic].len() == numberOfNodes - 1
|
||||
check node.mesh.hasKey(topic)
|
||||
check node.mesh[topic].len() == numberOfNodes - 1
|
||||
|
||||
# When all nodes unsubscribe from the topic
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the topic should be removed from mesh and gossipsub
|
||||
for node in nodes:
|
||||
check topic notin node.topics
|
||||
check topic notin node.mesh
|
||||
check topic notin node.gossipsub
|
||||
|
||||
asyncTest "handle subscribe and unsubscribe for multiple topics":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topics = @["foobar1", "foobar2", "foobar3"]
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When nodes subscribe to multiple topics
|
||||
await connectNodesStar(nodes)
|
||||
for topic in topics:
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then all nodes should be subscribed to the topics initially
|
||||
for i in 0 ..< topics.len:
|
||||
let topic = topics[i]
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.topics.contains(topic))
|
||||
nodes.allIt(it.gossipsub.getOrDefault(topic).len() == numberOfNodes - 1)
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len() == numberOfNodes - 1)
|
||||
|
||||
# When they unsubscribe from all topics
|
||||
for topic in topics:
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
# Then topics should be removed from mesh and gossipsub
|
||||
for i in 0 ..< topics.len:
|
||||
let topic = topics[i]
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(not it.topics.contains(topic))
|
||||
nodes.allIt(topic notin it.gossipsub)
|
||||
nodes.allIt(topic notin it.mesh)
|
||||
|
||||
asyncTest "Unsubscribe backoff":
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
unsubscribeBackoff = 1.seconds # 1s is the minimum
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, unsubscribeBackoff = unsubscribeBackoff
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
nodes[0].mesh[topic].len == numberOfNodes - 1
|
||||
|
||||
# When Node0 unsubscribes from the topic
|
||||
nodes[0].unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
# And subscribes back straight away
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then its mesh is pruned and peers have applied unsubscribeBackoff
|
||||
# Waiting more than one heartbeat (60ms) and less than unsubscribeBackoff (1s)
|
||||
await sleepAsync(unsubscribeBackoff.div(2))
|
||||
check:
|
||||
not nodes[0].mesh.hasKey(topic)
|
||||
|
||||
# When unsubscribeBackoff period is done
|
||||
await sleepAsync(unsubscribeBackoff)
|
||||
|
||||
# Then on the next heartbeat mesh is rebalanced and peers are regrafted
|
||||
check:
|
||||
nodes[0].mesh[topic].len == numberOfNodes - 1
|
||||
|
||||
asyncTest "Prune backoff":
|
||||
const
|
||||
numberOfNodes = 9
|
||||
topic = "foobar"
|
||||
pruneBackoff = 1.seconds # 1s is the minimum
|
||||
dValues = some(
|
||||
DValues(
|
||||
dLow: some(6),
|
||||
dHigh: some(8),
|
||||
d: some(6),
|
||||
dLazy: some(6),
|
||||
dScore: some(4),
|
||||
dOut: some(2),
|
||||
)
|
||||
)
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = dValues, pruneBackoff = pruneBackoff
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When DValues of Node0 are updated to lower than initial dValues
|
||||
const newDValues = some(
|
||||
DValues(
|
||||
dLow: some(2),
|
||||
dHigh: some(4),
|
||||
d: some(3),
|
||||
dLazy: some(3),
|
||||
dScore: some(2),
|
||||
dOut: some(2),
|
||||
)
|
||||
)
|
||||
node0.parameters.applyDValues(newDValues)
|
||||
|
||||
# Then Node0 mesh is pruned to newDValues.dHigh length
|
||||
# And pruned peers have applied pruneBackoff
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
|
||||
|
||||
# When DValues of Node0 are updated back to the initial dValues
|
||||
node0.parameters.applyDValues(dValues)
|
||||
|
||||
# Waiting more than one heartbeat (60ms) and less than pruneBackoff (1s)
|
||||
await sleepAsync(pruneBackoff.div(2))
|
||||
check:
|
||||
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
|
||||
|
||||
# When pruneBackoff period is done
|
||||
await sleepAsync(pruneBackoff)
|
||||
|
||||
# Then on the next heartbeat mesh is rebalanced and peers are regrafted to the initial d value
|
||||
check:
|
||||
node0.mesh.getOrDefault(topic).len == dValues.get.d.get
|
||||
302
tests/pubsub/integration/testgossipsubmessagecache.nim
Normal file
302
tests/pubsub/integration/testgossipsubmessagecache.nim
Normal file
@@ -0,0 +1,302 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Message Cache":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Received messages are added to the message cache":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message and saves it in the cache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
|
||||
asyncTest "Message cache history shifts on heartbeat and is cleared on shift":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message and saves it in the cache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
|
||||
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# When heartbeat happens, circular history shifts to the next position
|
||||
# Waiting for 5(historyLength) heartbeats
|
||||
await waitForHeartbeat(historyLength)
|
||||
|
||||
# Then history is cleared when the position with the message is reached again
|
||||
# And message is removed
|
||||
check:
|
||||
nodes[1].mcache.window(topic).len == 0
|
||||
not nodes[1].mcache.contains(messageId)
|
||||
|
||||
asyncTest "IHave propagation capped by history window":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
|
||||
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Add observer to NodeOutsideMesh for received IHave messages
|
||||
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
nodeOutsideMesh.addOnRecvObserver(checkForIHaves)
|
||||
|
||||
# When NodeInsideMesh sends a messages to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# On each heartbeat, Node0 retrieves messages in its mcache and sends IHave to NodeOutsideMesh
|
||||
# On heartbeat, Node0 mcache advances to the next position (rotating the message cache window)
|
||||
# Node0 will gossip about messages from the last few positions, depending on the mcache window size (historyGossip)
|
||||
# By waiting more than 'historyGossip' (2x3 = 6) heartbeats, we ensure Node0 does not send IHave messages for messages older than the window size
|
||||
await waitForHeartbeat(2 * historyGossip)
|
||||
|
||||
# Then nodeInsideMesh receives 3 (historyGossip) IHave messages
|
||||
check:
|
||||
receivedIHaves[].len == historyGossip
|
||||
|
||||
asyncTest "Message is retrieved from cache when handling IWant and relayed to a peer outside the mesh":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
|
||||
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Add observer to Node0 for received IWant messages
|
||||
var (receivedIWantsNode0, checkForIWant) = createCheckForIWant()
|
||||
nodes[0].addOnRecvObserver(checkForIWant)
|
||||
|
||||
# Find Peer outside of mesh to which Node 0 will relay received message
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
|
||||
# Add observer to NodeOutsideMesh for received messages
|
||||
var (receivedMessagesNodeOutsideMesh, checkForMessage) = createCheckForMessages()
|
||||
nodeOutsideMesh.addOnRecvObserver(checkForMessage)
|
||||
|
||||
# When NodeInsideMesh publishes a message to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node0 receives the message from NodeInsideMesh and saves it in its cache
|
||||
checkUntilTimeout:
|
||||
nodes[0].mcache.window(topic).len == 1
|
||||
let messageId = nodes[0].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# When Node0 sends an IHave message to NodeOutsideMesh during a heartbeat
|
||||
# Then NodeOutsideMesh responds with an IWant message to Node0
|
||||
checkUntilTimeout:
|
||||
receivedIWantsNode0[].anyIt(messageId in it.messageIDs)
|
||||
|
||||
# When Node0 handles the IWant message, it retrieves the message from its message cache using the MessageId
|
||||
# Then Node0 relays the original message to NodeOutsideMesh
|
||||
checkUntilTimeout:
|
||||
messageId in
|
||||
receivedMessagesNodeOutsideMesh[].mapIt(
|
||||
nodeOutsideMesh.msgIdProvider(it).value()
|
||||
)
|
||||
|
||||
asyncTest "Published and received messages are added to the seen cache":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message
|
||||
# Get messageId from mcache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# And both nodes save it in their seen cache
|
||||
# Node0 when publish, Node1 when received
|
||||
check:
|
||||
nodes[0].hasSeen(nodes[0].salt(messageId))
|
||||
nodes[1].hasSeen(nodes[1].salt(messageId))
|
||||
|
||||
asyncTest "Received messages are dropped if they are already in seen cache":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 2 not connected and not subscribed yet
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes two messages to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello".toBytes()), 1
|
||||
tryPublish await nodes[0].publish(topic, "World".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the messages
|
||||
# Getting messageIds from mcache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 2
|
||||
|
||||
let messageId1 = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
let messageId2 = nodes[1].mcache.window(topic).toSeq()[1]
|
||||
|
||||
# And Node0 doesn't receive messages
|
||||
check:
|
||||
nodes[2].mcache.window(topic).len == 0
|
||||
|
||||
# When Node2 connects with Node0 and subscribes to the topic
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
nodes[2].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# And messageIds are added to node0PeerNode2 sentIHaves to allow processing IWant
|
||||
# let node0PeerNode2 =
|
||||
let node0PeerNode2 = nodes[0].getPeerByPeerId(topic, nodes[2].peerInfo.peerId)
|
||||
node0PeerNode2.sentIHaves[0].incl(messageId1)
|
||||
node0PeerNode2.sentIHaves[0].incl(messageId2)
|
||||
|
||||
# And messageId1 is added to seen messages cache of Node2
|
||||
check:
|
||||
not nodes[2].addSeen(nodes[2].salt(messageId1))
|
||||
|
||||
# And Node2 sends IWant to Node0 requesting both messages
|
||||
let iWantMessage =
|
||||
ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageId1, messageId2])])
|
||||
let node2PeerNode0 = nodes[2].getPeerByPeerId(topic, nodes[0].peerInfo.peerId)
|
||||
nodes[2].broadcast(
|
||||
@[node2PeerNode0], RPCMsg(control: some(iWantMessage)), isHighPriority = false
|
||||
)
|
||||
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then Node2 receives only messageId2 and messageId1 is dropped
|
||||
check:
|
||||
nodes[2].mcache.window(topic).len == 1
|
||||
nodes[2].mcache.window(topic).toSeq()[0] == messageId2
|
||||
|
||||
asyncTest "Published messages are dropped if they are already in seen cache":
|
||||
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok("fixed_message_id_string".toBytes())
|
||||
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, msgIdProvider = customMsgIdProvider
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given Node0 has msgId already in seen cache
|
||||
let data = "Hello".toBytes()
|
||||
let msg = Message.init(
|
||||
some(nodes[0].peerInfo), data, topic, some(nodes[0].msgSeqno), nodes[0].sign
|
||||
)
|
||||
let msgId = nodes[0].msgIdProvider(msg)
|
||||
|
||||
check:
|
||||
not nodes[0].addSeen(nodes[0].salt(msgId.value()))
|
||||
|
||||
# When Node0 publishes the message to the topic
|
||||
discard await nodes[0].publish(topic, data)
|
||||
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then Node1 doesn't receive the message
|
||||
check:
|
||||
nodes[1].mcache.window(topic).len == 0
|
||||
@@ -11,13 +11,12 @@
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -73,78 +72,11 @@ proc createMessages(
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
suite "GossipSub Integration - Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
asyncTest "Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -171,7 +103,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
asyncTest "Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -198,7 +130,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
asyncTest "Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -225,7 +157,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
asyncTest "Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -264,7 +196,7 @@ suite "GossipSub Message Handling":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
@@ -279,9 +211,9 @@ suite "GossipSub Message Handling":
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len == numberOfNodes - 1)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
|
||||
@@ -441,9 +373,6 @@ suite "GossipSub Message Handling":
|
||||
sendCounter = 0
|
||||
validatedCounter = 0
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc recvCounter
|
||||
|
||||
@@ -463,8 +392,8 @@ suite "GossipSub Message Handling":
|
||||
|
||||
nodes[0].addObserver(obs0)
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
nodes[1].subscribe("foo", voidTopicHandler)
|
||||
nodes[1].subscribe("bar", voidTopicHandler)
|
||||
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
@@ -484,12 +413,12 @@ suite "GossipSub Message Handling":
|
||||
# Send message that will be rejected by the receiver's validator
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
checkUntilTimeout:
|
||||
recvCounter == 2
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
asyncTest "GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -519,7 +448,7 @@ suite "GossipSub Message Handling":
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
asyncTest "GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
@@ -585,7 +514,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
asyncTest "GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -615,7 +544,7 @@ suite "GossipSub Message Handling":
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
asyncTest "GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -627,7 +556,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
asyncTest "GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -640,7 +569,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
asyncTest "GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -682,7 +611,7 @@ suite "GossipSub Message Handling":
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
asyncTest "GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -731,7 +660,7 @@ suite "GossipSub Message Handling":
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
asyncTest "GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
381
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
381
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
@@ -0,0 +1,381 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Integration - Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
tryPublish await nodes[0].publish(topic, message), 1
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "Should not rate limit decodable messages below the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
|
||||
asyncTest "Should rate limit undecodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await nodes[1].peers[nodes[0].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
await nodes[0].peers[nodes[1].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit decodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit invalid messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Reject
|
||||
|
||||
nodes[0].addValidator(topic, execValidator)
|
||||
nodes[1].addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "DirectPeers: don't kick direct peer with low score":
|
||||
const topic = "foobar"
|
||||
let nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await nodes.addDirectPeerStar()
|
||||
|
||||
nodes[1].parameters.disconnectBadPeers = true
|
||||
nodes[1].parameters.graylistThreshold = 100000
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[1].updateScores()
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
topic notin nodes[1].mesh
|
||||
nodes[1].peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
nodes[1].parameters.graylistThreshold
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hellow")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
asyncTest "Peers disconnections mechanics":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
let nodes =
|
||||
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topicName: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topicName == topic
|
||||
if not seenFut.finished() and seen.len >= numberOfNodes:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe(topic, handler)
|
||||
|
||||
await waitSubGraph(nodes, topic)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
await seenFut.wait(2.seconds)
|
||||
check:
|
||||
seen.len >= numberOfNodes
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
check:
|
||||
topic in node.gossipsub
|
||||
node.fanout.len == 0
|
||||
node.mesh[topic].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll(topic)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
asyncTest "DecayInterval":
|
||||
const
|
||||
topic = "foobar"
|
||||
decayInterval = 50.milliseconds
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries =
|
||||
100
|
||||
nodes[0].topicParams[topic].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(decayInterval * 5)
|
||||
check:
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
@@ -0,0 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsubcontrolmessages, testgossipsubcustomconn,
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubheartbeat,
|
||||
testgossipsubmeshmanagement, testgossipsubmessagecache, testgossipsubmessagehandling,
|
||||
testgossipsubscoring
|
||||
588
tests/pubsub/testbehavior.nim
Normal file
588
tests/pubsub/testbehavior.nim
Normal file
@@ -0,0 +1,588 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers
|
||||
import ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Behavior":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "Max IDONTWANT messages per heartbeat per peer":
|
||||
# Given GossipSub node with 1 peer
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 1
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let peer = peers[0]
|
||||
|
||||
# And sequence of iDontWants with more messages than max number (1200)
|
||||
proc generateMessageIds(count: int): seq[MessageId] =
|
||||
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
|
||||
|
||||
let iDontWants =
|
||||
@[
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
]
|
||||
|
||||
# When node handles iDontWants
|
||||
gossipSub.handleIDontWant(peer, iDontWants)
|
||||
|
||||
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
|
||||
check:
|
||||
peer.iDontWants[0].len == IDontWantMaxCount
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let
|
||||
topic = "foobar"
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
|
||||
# Given GossipSub node starting with 13 peers in mesh
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 13
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And mesh is larger than dHigh
|
||||
gossipSub.parameters.dLow = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dHigh = 8
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dScore = 13
|
||||
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
# When mesh is rebalanced
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
|
||||
# Then prunning is not triggered when mesh is not larger than dScore
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
asyncTest "GossipThreshold - do not handle IHave if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IHave message
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id])
|
||||
|
||||
# When IHave is handled
|
||||
let iWant = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then IHave is ignored
|
||||
check:
|
||||
iWant.messageIDs.len == 0
|
||||
|
||||
asyncTest "GossipThreshold - do not handle IWant if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IWant message with MsgId in mcache and sentIHaves
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[0].incl(id)
|
||||
let msg = ControlIWant(messageIDs: @[id])
|
||||
|
||||
# When IWant is handled
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then IWant is ignored
|
||||
check:
|
||||
messages.len == 0
|
||||
|
||||
asyncTest "GossipThreshold - do not trigger PeerExchange on Prune":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and RoutingRecordsHandler added
|
||||
var routingRecordsFut = newFuture[void]()
|
||||
gossipSub.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
routingRecordsFut.complete()
|
||||
)
|
||||
|
||||
# and Prune message
|
||||
let msg = ControlPrune(
|
||||
topicID: topic, peers: @[PeerInfoMsg(peerId: peer.peerId)], backoff: 123'u64
|
||||
)
|
||||
|
||||
# When Prune is handled
|
||||
gossipSub.handlePrune(peer, @[msg])
|
||||
|
||||
# Then handler is not triggered
|
||||
let result = await waitForState(routingRecordsFut, HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
result.isCancelled()
|
||||
|
||||
asyncTest "GossipThreshold - do not select peer for IHave broadcast if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateGossipsub = true)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and message in cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message(topic: topic))
|
||||
|
||||
# When Node selects peers for IHave broadcast
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
|
||||
# Then peer is not selected
|
||||
check:
|
||||
gossipPeers.len == 0
|
||||
|
||||
asyncTest "PublishThreshold - do not graft when peer score below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
publishThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below publishThreshold
|
||||
gossipSub.parameters.publishThreshold = publishThreshold
|
||||
peer.score = publishThreshold - 100.0
|
||||
|
||||
# and Graft message
|
||||
let msg = ControlGraft(topicID: topic)
|
||||
|
||||
# When Graft is handled
|
||||
let prunes = gossipSub.handleGraft(peer, @[msg])
|
||||
|
||||
# Then peer is ignored and not added to prunes
|
||||
check:
|
||||
gossipSub.mesh[topic].len == 0
|
||||
prunes.len == 0
|
||||
95
tests/pubsub/testgossipsub.nim
Normal file
95
tests/pubsub/testgossipsub.nim
Normal file
@@ -0,0 +1,95 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronicles
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
@@ -1,196 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
var peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic1 = "foobar1"
|
||||
let topic2 = "foobar2"
|
||||
gossipSub.topicParams[topic1] = TopicParams.init()
|
||||
gossipSub.topicParams[topic2] = TopicParams.init()
|
||||
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic1].incl(peer)
|
||||
gossipSub.fanout[topic2].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
var observed = 0
|
||||
let
|
||||
obs1 = PubSubObserver(
|
||||
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
obs2 = PubSubObserver(
|
||||
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[0].addObserver(obs2)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
await passed.wait(2.seconds)
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
GossipSub(nodes[1]).parameters.d = 0
|
||||
GossipSub(nodes[1]).parameters.dHigh = 0
|
||||
GossipSub(nodes[1]).parameters.dLow = 0
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
|
||||
await passed.wait(2.seconds)
|
||||
|
||||
trace "test done, stopping..."
|
||||
@@ -1,717 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
for p in peers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handleIHave/Iwant tests":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
|
||||
for i in 0 ..< 30:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
check gossipSub.mcache.msgs.len == 1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are interconnected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 4), PeerTableType.Gossipsub
|
||||
)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
|
||||
await waitForHeartbeat()
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
anyIt(receivedIHaves, it > 0)
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await waitForHeartbeat()
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == 0
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
# That's because the gossip factor is 0.5 over 16 available nodes
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= 8
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with dLazy priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
# That's because the dLazy is 6
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
|
||||
|
||||
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iDontWant messages
|
||||
var receivedIDontWantsRef = new seq[int]
|
||||
addIDontWantObservers(nodes, receivedIDontWantsRef)
|
||||
|
||||
# And are connected in a line
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
check (await nodes[0].publish(topic, largeMsg)) == 1
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
let receivedIDontWants = receivedIDontWantsRef[]
|
||||
check:
|
||||
receivedIDontWants[0] == 0
|
||||
receivedIDontWants[1] == 0
|
||||
receivedIDontWants[2] == 1
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard # not used in this test
|
||||
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, enablePX = true) &
|
||||
generateNodes(1, gossip = true, sendSignedPeerRecord = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var
|
||||
gossip0 = GossipSub(nodes[0])
|
||||
gossip1 = GossipSub(nodes[1])
|
||||
gossip2 = GossipSub(nodes[2])
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
for x in 0 ..< 3:
|
||||
for y in 0 ..< 3:
|
||||
if x != y:
|
||||
await waitSub(nodes[x], nodes[y], "foobar")
|
||||
|
||||
# Setup record handlers for all nodes
|
||||
var
|
||||
passed0: Future[void] = newFuture[void]()
|
||||
passed2: Future[void] = newFuture[void]()
|
||||
gossip0.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed0.complete()
|
||||
)
|
||||
gossip1.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
raiseAssert "should not get here"
|
||||
)
|
||||
gossip2.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed2.complete()
|
||||
)
|
||||
|
||||
# Unsubscribe from the topic
|
||||
nodes[1].unsubscribe("foobar", handler)
|
||||
|
||||
# Then verify what nodes receive the PX
|
||||
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "e2e - iDontWant":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(3, gossip = true, msgIdProvider = dumbMsgIdProvider)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await nodes[1].switch.connect(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
doAssert false
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
nodes[2].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||
|
||||
check:
|
||||
gossip3.mesh.peers("foobar") == 1
|
||||
|
||||
gossip3.broadcast(
|
||||
gossip3.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
|
||||
)
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
checkUntilTimeout:
|
||||
toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 1)
|
||||
check:
|
||||
toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "e2e - iDontWant is broadcasted on publish":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, msgIdProvider = dumbMsgIdProvider, sendIDontWantOnPublish = true
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
asyncTest "e2e - iDontWant is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let
|
||||
nodeA = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeB = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeC = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_11,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await nodeA.switch.connect(
|
||||
nodeB.switch.peerInfo.peerId, nodeB.switch.peerInfo.addrs
|
||||
)
|
||||
await nodeB.switch.connect(
|
||||
nodeC.switch.peerInfo.peerId, nodeC.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
nodeA.subscribe("foobar", handler)
|
||||
nodeB.subscribe("foobar", handlerB)
|
||||
nodeC.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], "foobar")
|
||||
|
||||
var gossipA: GossipSub = GossipSub(nodeA)
|
||||
var gossipB: GossipSub = GossipSub(nodeB)
|
||||
var gossipC: GossipSub = GossipSub(nodeC)
|
||||
|
||||
check:
|
||||
gossipC.mesh.peers("foobar") == 1
|
||||
|
||||
tryPublish await nodeA.publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
# "check" alone isn't suitable for testing that a condition is true after some time has passed. Below we verify that
|
||||
# peers A and C haven't received an IDONTWANT message from B, but we need wait some time for potential in flight messages to arrive.
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(gossipC.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
toSeq(gossipA.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let node0 = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
let node1 = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_10,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await node0.switch.connect(
|
||||
node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
node0.subscribe("foobar", handler)
|
||||
node1.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[node0, node1], "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
@@ -1,522 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "topic params":
|
||||
let params = TopicParams.init()
|
||||
params.validateParameters().tryGet()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, handler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var scoreLow = -11'f64
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.score = scoreLow
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for i in 0 ..< 15:
|
||||
let peerId = conns[i].peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.In
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 40.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
for i in 0 ..< 7:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.Out
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 10.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "dont prune peers if mesh len is less than d_high":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len == expectedNumberOfPeers
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "prune peers if mesh len is higher than d_high":
|
||||
let
|
||||
numberOfNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberOfNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
# For this test to work we'd require a way to disable fanout.
|
||||
# There's not a way to toggle it, and mocking it didn't work as there's not a reliable mock available.
|
||||
skip()
|
||||
return
|
||||
|
||||
# Instantiate handlers and validators
|
||||
var handlerFut0 = newFuture[bool]()
|
||||
proc handler0(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut0.complete(true)
|
||||
|
||||
var handlerFut1 = newFuture[bool]()
|
||||
proc handler1(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut1.complete(true)
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
check topic == "foobar"
|
||||
validatorFut.complete(true)
|
||||
result = ValidationResult.Accept
|
||||
|
||||
# Setup nodes and start switches
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 5.seconds)
|
||||
topic = "foobar"
|
||||
|
||||
# Connect nodes
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# Subscribe both nodes to the topic and node1 (receiver) to the validator
|
||||
nodes[0].subscribe(topic, handler0)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Wait for both nodes to verify others' subscription
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], topic)
|
||||
subs &= waitSub(nodes[0], nodes[1], topic)
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
# When unsubscribing and resubscribing in a short time frame, the backoff period should be triggered
|
||||
nodes[1].unsubscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Backoff is set to 5 seconds, and the amount of sleeping time since the unsubsribe until now is 3-4s~
|
||||
# Meaning, the subscription shouldn't have been processed yet because it's still in backoff period
|
||||
# When publishing under this condition
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should not be received:
|
||||
check:
|
||||
validatorFut.toState().isPending()
|
||||
handlerFut1.toState().isPending()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
validatorFut.reset()
|
||||
handlerFut0.reset()
|
||||
handlerFut1.reset()
|
||||
|
||||
# If we wait backoff period to end, around 1-2s
|
||||
await waitForMesh(nodes[0], nodes[1], topic, 3.seconds)
|
||||
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should be received
|
||||
check:
|
||||
validatorFut.toState().isCompleted()
|
||||
handlerFut1.toState().isCompleted()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
let
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.topics
|
||||
"foobar" in gossip2.topics
|
||||
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
395
tests/pubsub/testgossipsubparams.nim
Normal file
395
tests/pubsub/testgossipsubparams.nim
Normal file
@@ -0,0 +1,395 @@
|
||||
{.used.}
|
||||
|
||||
import unittest2
|
||||
import chronos
|
||||
import results
|
||||
|
||||
import ../../libp2p/protocols/pubsub/gossipsub/[types]
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, pubsubpeer]
|
||||
import ../../libp2p/[peerid, multiaddress]
|
||||
|
||||
suite "GossipSubParams validation":
|
||||
proc newDefaultValidParams(): GossipSubParams =
|
||||
result = GossipSubParams.init()
|
||||
|
||||
test "default parameters are valid":
|
||||
var params = newDefaultValidParams()
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "dOut fails when equal to dLow":
|
||||
const errorMessage =
|
||||
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
|
||||
var params = newDefaultValidParams()
|
||||
params.dLow = 4
|
||||
params.d = 8
|
||||
params.dOut = params.dLow
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "dOut fails when bigger than d/2":
|
||||
const errorMessage =
|
||||
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
|
||||
var params = newDefaultValidParams()
|
||||
params.dLow = 4
|
||||
params.d = 5
|
||||
params.dOut = 3
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "dOut succeeds when less than dLow and equals d/2":
|
||||
var params = newDefaultValidParams()
|
||||
params.dLow = 4
|
||||
params.d = 6
|
||||
params.dOut = 3
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "gossipThreshold fails when zero":
|
||||
const errorMessage = "gossipsub: gossipThreshold parameter error, Must be < 0"
|
||||
var params = newDefaultValidParams()
|
||||
params.gossipThreshold = 0.0
|
||||
var res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "gossipThreshold succeeds when negative":
|
||||
var params = newDefaultValidParams()
|
||||
params.gossipThreshold = -0.1
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "unsubscribeBackoff fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: unsubscribeBackoff parameter error, Must be > 0 seconds"
|
||||
var params = newDefaultValidParams()
|
||||
params.unsubscribeBackoff = 0.seconds
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "unsubscribeBackoff succeeds when positive":
|
||||
var params = newDefaultValidParams()
|
||||
params.unsubscribeBackoff = 1.seconds
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "publishThreshold fails when equal to gossipThreshold":
|
||||
const errorMessage =
|
||||
"gossipsub: publishThreshold parameter error, Must be < gossipThreshold"
|
||||
var params = newDefaultValidParams()
|
||||
params.publishThreshold = params.gossipThreshold
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "publishThreshold succeeds when less than gossipThreshold":
|
||||
var params = newDefaultValidParams()
|
||||
params.publishThreshold = params.gossipThreshold - 1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "graylistThreshold fails when equal to publishThreshold":
|
||||
const errorMessage =
|
||||
"gossipsub: graylistThreshold parameter error, Must be < publishThreshold"
|
||||
var params = newDefaultValidParams()
|
||||
params.graylistThreshold = params.publishThreshold
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "graylistThreshold succeeds when less than publishThreshold":
|
||||
var params = newDefaultValidParams()
|
||||
params.graylistThreshold = params.publishThreshold - 1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "acceptPXThreshold fails when negative":
|
||||
const errorMessage = "gossipsub: acceptPXThreshold parameter error, Must be >= 0"
|
||||
var params = newDefaultValidParams()
|
||||
params.acceptPXThreshold = -0.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "acceptPXThreshold succeeds when zero":
|
||||
var params = newDefaultValidParams()
|
||||
params.acceptPXThreshold = 0.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "opportunisticGraftThreshold fails when negative":
|
||||
const errorMessage =
|
||||
"gossipsub: opportunisticGraftThreshold parameter error, Must be >= 0"
|
||||
var params = newDefaultValidParams()
|
||||
params.opportunisticGraftThreshold = -0.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "opportunisticGraftThreshold succeeds when zero":
|
||||
var params = newDefaultValidParams()
|
||||
params.opportunisticGraftThreshold = 0.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "decayToZero fails when greater than 0.5":
|
||||
const errorMessage =
|
||||
"gossipsub: decayToZero parameter error, Should be close to 0.0"
|
||||
var params = newDefaultValidParams()
|
||||
params.decayToZero = 0.51
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "decayToZero fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: decayToZero parameter error, Should be close to 0.0"
|
||||
var params = newDefaultValidParams()
|
||||
params.decayToZero = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "decayToZero succeeds when exactly 0.5":
|
||||
var params = newDefaultValidParams()
|
||||
params.decayToZero = 0.5
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "decayToZero succeeds when small positive value":
|
||||
var params = newDefaultValidParams()
|
||||
params.decayToZero = 0.00001
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "appSpecificWeight fails when negative":
|
||||
const errorMessage =
|
||||
"gossipsub: appSpecificWeight parameter error, Must be positive"
|
||||
var params = newDefaultValidParams()
|
||||
params.appSpecificWeight = -0.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "appSpecificWeight succeeds when zero":
|
||||
var params = newDefaultValidParams()
|
||||
params.appSpecificWeight = 0.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "ipColocationFactorWeight fails when positive":
|
||||
const errorMessage =
|
||||
"gossipsub: ipColocationFactorWeight parameter error, Must be negative or 0"
|
||||
var params = newDefaultValidParams()
|
||||
params.ipColocationFactorWeight = 0.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "ipColocationFactorWeight succeeds when zero":
|
||||
var params = newDefaultValidParams()
|
||||
params.ipColocationFactorWeight = 0.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "ipColocationFactorWeight succeeds when negative":
|
||||
var params = newDefaultValidParams()
|
||||
params.ipColocationFactorWeight = -10.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "ipColocationFactorThreshold fails when less than 1":
|
||||
const errorMessage =
|
||||
"gossipsub: ipColocationFactorThreshold parameter error, Must be at least 1"
|
||||
var params = newDefaultValidParams()
|
||||
params.ipColocationFactorThreshold = 0.9
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "ipColocationFactorThreshold succeeds when exactly 1":
|
||||
var params = newDefaultValidParams()
|
||||
params.ipColocationFactorThreshold = 1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "behaviourPenaltyWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: behaviourPenaltyWeight parameter error, Must be negative"
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "behaviourPenaltyWeight succeeds when negative":
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyWeight = -0.0001
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "behaviourPenaltyDecay fails when negative":
|
||||
const errorMessage =
|
||||
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyDecay = -0.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "behaviourPenaltyDecay fails when equal to 1":
|
||||
const errorMessage =
|
||||
"gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1"
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyDecay = 1.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "behaviourPenaltyDecay succeeds when zero":
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyDecay = 0.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "behaviourPenaltyDecay succeeds when between 0 and 1":
|
||||
var params = newDefaultValidParams()
|
||||
params.behaviourPenaltyDecay = 0.5
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "maxNumElementsInNonPriorityQueue fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: maxNumElementsInNonPriorityQueue parameter error, Must be > 0"
|
||||
var params = newDefaultValidParams()
|
||||
params.maxNumElementsInNonPriorityQueue = 0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "maxNumElementsInNonPriorityQueue succeeds when positive":
|
||||
var params = newDefaultValidParams()
|
||||
params.maxNumElementsInNonPriorityQueue = 1
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
suite "TopicParams validation":
|
||||
proc newDefaultValidTopicParams(): TopicParams =
|
||||
result = TopicParams.init()
|
||||
|
||||
test "default topic parameters are valid":
|
||||
var params = newDefaultValidTopicParams()
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "timeInMeshWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "timeInMeshWeight fails when greater than 1":
|
||||
const errorMessage =
|
||||
"gossipsub: timeInMeshWeight parameter error, Must be a small positive value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshWeight = 1.1
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "timeInMeshWeight succeeds when exactly 1":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshWeight = 1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "timeInMeshWeight succeeds when small positive value":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshWeight = 0.01
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "timeInMeshCap fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: timeInMeshCap parameter error, Should be a positive value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshCap = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "timeInMeshCap succeeds when positive":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.timeInMeshCap = 10.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "firstMessageDeliveriesWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: firstMessageDeliveriesWeight parameter error, Should be a positive value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.firstMessageDeliveriesWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "firstMessageDeliveriesWeight succeeds when positive":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.firstMessageDeliveriesWeight = 1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "meshMessageDeliveriesWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: meshMessageDeliveriesWeight parameter error, Should be a negative value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "meshMessageDeliveriesWeight succeeds when negative":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesWeight = -1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "meshMessageDeliveriesThreshold fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: meshMessageDeliveriesThreshold parameter error, Should be a positive value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesThreshold = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "meshMessageDeliveriesThreshold succeeds when positive":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesThreshold = 5.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "meshMessageDeliveriesCap fails when less than threshold":
|
||||
const errorMessage =
|
||||
"gossipsub: meshMessageDeliveriesCap parameter error, Should be >= meshMessageDeliveriesThreshold"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesThreshold = 10.0
|
||||
params.meshMessageDeliveriesCap = 9.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "meshMessageDeliveriesCap succeeds when equal to threshold":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshMessageDeliveriesThreshold = 10.0
|
||||
params.meshMessageDeliveriesCap = 10.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "meshFailurePenaltyWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: meshFailurePenaltyWeight parameter error, Should be a negative value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshFailurePenaltyWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "meshFailurePenaltyWeight succeeds when negative":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.meshFailurePenaltyWeight = -1.0
|
||||
check params.validateParameters().isOk()
|
||||
|
||||
test "invalidMessageDeliveriesWeight fails when zero":
|
||||
const errorMessage =
|
||||
"gossipsub: invalidMessageDeliveriesWeight parameter error, Should be a negative value"
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.invalidMessageDeliveriesWeight = 0.0
|
||||
let res = params.validateParameters()
|
||||
check res.isErr()
|
||||
check res.error == errorMessage
|
||||
|
||||
test "invalidMessageDeliveriesWeight succeeds when negative":
|
||||
var params = newDefaultValidTopicParams()
|
||||
params.invalidMessageDeliveriesWeight = -1.0
|
||||
check params.validateParameters().isOk()
|
||||
@@ -1,418 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import metrics
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = handler
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
check (await nodes[0].publish(topic, message)) == 1
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
|
||||
|
||||
await startNodes(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
|
||||
gossip0.subscribe("foobar", handle)
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async: (raw: true).} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
const testDecayInterval = 50.milliseconds
|
||||
gossip.parameters.decayInterval = testDecayInterval
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(testDecayInterval * 5)
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
@@ -12,7 +12,9 @@ import
|
||||
protocols/pubsub/errors,
|
||||
protocols/pubsub/rpc/message,
|
||||
protocols/pubsub/rpc/messages,
|
||||
protocols/pubsub/rpc/protobuf,
|
||||
]
|
||||
import ../utils/async_tests
|
||||
|
||||
let rng = newRng()
|
||||
|
||||
@@ -139,3 +141,34 @@ suite "Message":
|
||||
)
|
||||
|
||||
check byteSize(rpcMsg) == 28 + 32 + 2 + 2 + 38 # Total: 102 bytes
|
||||
|
||||
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
|
||||
# check value before & after decoding equal using protoc cmd tool for reference
|
||||
asyncTest "ControlMessage RPCMsg encoding and decoding":
|
||||
let id: seq[byte] = @[123]
|
||||
let message = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
|
||||
iwant: @[ControlIWant(messageIDs: @[id])],
|
||||
graft: @[ControlGraft(topicID: "foobar")],
|
||||
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
|
||||
idontwant: @[ControlIWant(messageIDs: @[id])],
|
||||
)
|
||||
)
|
||||
)
|
||||
#data encoded using protoc cmd tool
|
||||
let expectedEncoded: seq[byte] =
|
||||
@[
|
||||
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
|
||||
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
|
||||
97, 114, 24, 10, 42, 3, 10, 1, 123,
|
||||
]
|
||||
|
||||
let actualEncoded = encodeRpcMsg(message, true)
|
||||
check:
|
||||
actualEncoded == expectedEncoded
|
||||
|
||||
let actualDecoded = decodeRpcMsg(expectedEncoded).value
|
||||
check:
|
||||
actualDecoded == message
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubmeshmanagement,
|
||||
testgossipsubmessagehandling, testgossipsubscoring, testfloodsub, testmcache,
|
||||
testtimedcache, testmessage
|
||||
testbehavior, testgossipsub, testgossipsubparams, testmcache, testmessage,
|
||||
testscoring, testtimedcache
|
||||
|
||||
import ./integration/testpubsubintegration
|
||||
|
||||
44
tests/pubsub/testscoring.nim
Normal file
44
tests/pubsub/testscoring.nim
Normal file
@@ -0,0 +1,44 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
@@ -4,8 +4,8 @@ const
|
||||
libp2p_pubsub_verify {.booldefine.} = true
|
||||
libp2p_pubsub_anonymize {.booldefine.} = false
|
||||
|
||||
import hashes, random, tables, sets, sequtils, sugar
|
||||
import chronos, stew/[byteutils, results], chronos/ratelimit
|
||||
import hashes, random, tables, sets, sequtils
|
||||
import chronos, results, stew/byteutils, chronos/ratelimit
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
@@ -18,8 +18,9 @@ import
|
||||
protocols/pubsub/rpc/messages,
|
||||
protocols/secure/secure,
|
||||
]
|
||||
import ../helpers, ../utils/futures
|
||||
import ../helpers
|
||||
import chronicles
|
||||
import metrics
|
||||
|
||||
export builders
|
||||
|
||||
@@ -32,6 +33,15 @@ const HEARTBEAT_TIMEOUT* = # TEST_GOSSIPSUB_HEARTBEAT_INTERVAL + 20%
|
||||
proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
|
||||
await sleepAsync(HEARTBEAT_TIMEOUT * multiplier)
|
||||
|
||||
proc waitForHeartbeat*(timeout: Duration) {.async.} =
|
||||
await sleepAsync(timeout)
|
||||
|
||||
proc waitForHeartbeatByEvent*[T: PubSub](node: T, multiplier: int = 1) {.async.} =
|
||||
for _ in 0 ..< multiplier:
|
||||
let evnt = newAsyncEvent()
|
||||
node.heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
type
|
||||
TestGossipSub* = ref object of GossipSub
|
||||
DValues* = object
|
||||
@@ -42,6 +52,21 @@ type
|
||||
dOut*: Option[int]
|
||||
dLazy*: Option[int]
|
||||
|
||||
proc noop*(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
proc voidTopicHandler*(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc voidPeerHandler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
proc randomPeerId*(): PeerId =
|
||||
try:
|
||||
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
||||
proc getConn(): Future[Connection] {.
|
||||
async: (raises: [CancelledError, GetConnDialError])
|
||||
@@ -61,11 +86,57 @@ proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
||||
onNewPeer(p, pubSubPeer)
|
||||
pubSubPeer
|
||||
|
||||
proc randomPeerId*(): PeerId =
|
||||
try:
|
||||
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
proc setupGossipSubWithPeers*(
|
||||
numPeers: int,
|
||||
topics: seq[string],
|
||||
populateGossipsub: bool = false,
|
||||
populateMesh: bool = false,
|
||||
populateFanout: bool = false,
|
||||
): (TestGossipSub, seq[Connection], seq[PubSubPeer]) =
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
for topic in topics:
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
var peers = newSeq[PubSubPeer]()
|
||||
for i in 0 ..< numPeers:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = voidPeerHandler
|
||||
peers &= peer
|
||||
for topic in topics:
|
||||
if (populateGossipsub):
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
if (populateMesh):
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
if (populateFanout):
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
return (gossipSub, conns, peers)
|
||||
|
||||
proc setupGossipSubWithPeers*(
|
||||
numPeers: int,
|
||||
topic: string,
|
||||
populateGossipsub: bool = false,
|
||||
populateMesh: bool = false,
|
||||
populateFanout: bool = false,
|
||||
): (TestGossipSub, seq[Connection], seq[PubSubPeer]) =
|
||||
return setupGossipSubWithPeers(
|
||||
numPeers, @[topic], populateGossipsub, populateMesh, populateFanout
|
||||
)
|
||||
|
||||
proc teardownGossipSub*(gossipSub: TestGossipSub, conns: seq[Connection]) {.async.} =
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
let mid =
|
||||
@@ -78,7 +149,7 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
$m.data.hash & $m.topic.hash
|
||||
ok mid.toBytes()
|
||||
|
||||
proc applyDValues(parameters: var GossipSubParams, dValues: Option[DValues]) =
|
||||
proc applyDValues*(parameters: var GossipSubParams, dValues: Option[DValues]) =
|
||||
if dValues.isNone:
|
||||
return
|
||||
let values = dValues.get
|
||||
@@ -107,6 +178,8 @@ proc generateNodes*(
|
||||
sign: bool = libp2p_pubsub_sign,
|
||||
sendSignedPeerRecord = false,
|
||||
unsubscribeBackoff = 1.seconds,
|
||||
pruneBackoff = 1.minutes,
|
||||
fanoutTTL = 1.minutes,
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
enablePX: bool = false,
|
||||
overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] =
|
||||
@@ -117,6 +190,11 @@ proc generateNodes*(
|
||||
floodPublish: bool = false,
|
||||
dValues: Option[DValues] = DValues.none(),
|
||||
gossipFactor: Option[float] = float.none(),
|
||||
opportunisticGraftThreshold: float = 0.0,
|
||||
historyLength = 20,
|
||||
historyGossip = 5,
|
||||
gossipThreshold = -100.0,
|
||||
decayInterval = 1.seconds,
|
||||
): seq[PubSub] =
|
||||
for i in 0 ..< num:
|
||||
let switch = newStandardSwitch(
|
||||
@@ -136,12 +214,17 @@ proc generateNodes*(
|
||||
var p = GossipSubParams.init()
|
||||
p.heartbeatInterval = heartbeatInterval
|
||||
p.floodPublish = floodPublish
|
||||
p.historyLength = 20
|
||||
p.historyGossip = 20
|
||||
p.historyLength = historyLength
|
||||
p.historyGossip = historyGossip
|
||||
p.unsubscribeBackoff = unsubscribeBackoff
|
||||
p.pruneBackoff = pruneBackoff
|
||||
p.fanoutTTL = fanoutTTL
|
||||
p.enablePX = enablePX
|
||||
p.overheadRateLimit = overheadRateLimit
|
||||
p.sendIDontWantOnPublish = sendIDontWantOnPublish
|
||||
p.opportunisticGraftThreshold = opportunisticGraftThreshold
|
||||
p.gossipThreshold = gossipThreshold
|
||||
p.decayInterval = decayInterval
|
||||
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
|
||||
applyDValues(p, dValues)
|
||||
p
|
||||
@@ -168,18 +251,33 @@ proc generateNodes*(
|
||||
switch.mount(pubsub)
|
||||
result.add(pubsub)
|
||||
|
||||
proc connectNodes*(dialer: PubSub, target: PubSub) {.async.} =
|
||||
proc toGossipSub*(nodes: seq[PubSub]): seq[GossipSub] =
|
||||
return nodes.mapIt(GossipSub(it))
|
||||
|
||||
proc getNodeByPeerId*[T: PubSub](nodes: seq[T], peerId: PeerId): GossipSub =
|
||||
let filteredNodes = nodes.filterIt(it.peerInfo.peerId == peerId)
|
||||
check:
|
||||
filteredNodes.len == 1
|
||||
return filteredNodes[0]
|
||||
|
||||
proc getPeerByPeerId*[T: PubSub](node: T, topic: string, peerId: PeerId): PubSubPeer =
|
||||
let filteredPeers = node.gossipsub[topic].toSeq().filterIt(it.peerId == peerId)
|
||||
check:
|
||||
filteredPeers.len == 1
|
||||
return filteredPeers[0]
|
||||
|
||||
proc connectNodes*[T: PubSub](dialer: T, target: T) {.async.} =
|
||||
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not connect same peer"
|
||||
await dialer.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc connectNodesStar*(nodes: seq[PubSub]) {.async.} =
|
||||
proc connectNodesStar*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
for dialer in nodes:
|
||||
for node in nodes:
|
||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||
await connectNodes(dialer, node)
|
||||
|
||||
proc connectNodesSparse*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||
proc connectNodesSparse*[T: PubSub](nodes: seq[T], degree: int = 2) {.async.} =
|
||||
if nodes.len < degree:
|
||||
raise
|
||||
(ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
|
||||
@@ -192,34 +290,18 @@ proc connectNodesSparse*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||
await connectNodes(dialer, node)
|
||||
|
||||
proc activeWait(
|
||||
interval: Duration, maximum: Moment, timeoutErrorMessage = "Timeout on activeWait"
|
||||
) {.async.} =
|
||||
await sleepAsync(interval)
|
||||
doAssert Moment.now() < maximum, timeoutErrorMessage
|
||||
|
||||
proc waitSub*(sender, receiver: auto, key: string) {.async.} =
|
||||
if sender == receiver:
|
||||
return
|
||||
let timeout = Moment.now() + 5.seconds
|
||||
let fsub = GossipSub(sender)
|
||||
let peerId = receiver.peerInfo.peerId
|
||||
|
||||
# this is for testing purposes only
|
||||
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
|
||||
while (
|
||||
not fsub.gossipsub.hasKey(key) or
|
||||
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)
|
||||
) and
|
||||
(
|
||||
not fsub.mesh.hasKey(key) or
|
||||
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)
|
||||
) and (
|
||||
not fsub.fanout.hasKey(key) or
|
||||
not fsub.fanout.hasPeerId(key, receiver.peerInfo.peerId)
|
||||
)
|
||||
:
|
||||
trace "waitSub sleeping..."
|
||||
await activeWait(5.milliseconds, timeout, "waitSub timeout!")
|
||||
checkUntilTimeout:
|
||||
(fsub.gossipsub.hasKey(key) and fsub.gossipsub.hasPeerId(key, peerId)) or
|
||||
(fsub.mesh.hasKey(key) and fsub.mesh.hasPeerId(key, peerId)) or
|
||||
(fsub.fanout.hasKey(key) and fsub.fanout.hasPeerId(key, peerId))
|
||||
|
||||
proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
|
||||
let numberOfNodes = nodes.len
|
||||
@@ -228,9 +310,8 @@ proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
|
||||
if x != y:
|
||||
await waitSub(nodes[x], nodes[y], topic)
|
||||
|
||||
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||
let timeout = Moment.now() + 5.seconds
|
||||
while true:
|
||||
proc waitSubGraph*[T: PubSub](nodes: seq[T], key: string) {.async.} =
|
||||
proc isGraphConnected(): bool =
|
||||
var
|
||||
nodesMesh: Table[PeerId, seq[PeerId]]
|
||||
seen: HashSet[PeerId]
|
||||
@@ -250,10 +331,11 @@ proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||
explore(n.peerInfo.peerId)
|
||||
if seen.len == nodes.len:
|
||||
ok.inc()
|
||||
if ok == nodes.len:
|
||||
return
|
||||
trace "waitSubGraph sleeping..."
|
||||
await activeWait(5.milliseconds, timeout, "waitSubGraph timeout!")
|
||||
|
||||
return ok == nodes.len
|
||||
|
||||
checkUntilTimeout:
|
||||
isGraphConnected()
|
||||
|
||||
proc waitForMesh*(
|
||||
sender: auto, receiver: auto, key: string, timeoutDuration = 5.seconds
|
||||
@@ -262,85 +344,37 @@ proc waitForMesh*(
|
||||
return
|
||||
|
||||
let
|
||||
timeoutMoment = Moment.now() + timeoutDuration
|
||||
gossipsubSender = GossipSub(sender)
|
||||
receiverPeerId = receiver.peerInfo.peerId
|
||||
|
||||
while not gossipsubSender.mesh.hasPeerId(key, receiverPeerId):
|
||||
trace "waitForMesh sleeping..."
|
||||
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")
|
||||
checkUntilTimeout:
|
||||
gossipsubSender.mesh.hasPeerId(key, receiverPeerId)
|
||||
|
||||
type PeerTableType* {.pure.} = enum
|
||||
Gossipsub = "gossipsub"
|
||||
Mesh = "mesh"
|
||||
Fanout = "fanout"
|
||||
|
||||
proc waitForPeersInTable*(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
timeout = 5.seconds,
|
||||
) {.async.} =
|
||||
## Wait until each node in `nodes` has at least the corresponding number of peers from `peerCounts`
|
||||
## in the specified table (mesh, gossipsub, or fanout) for the given topic
|
||||
|
||||
doAssert nodes.len == peerCounts.len, "Node count must match peer count expectations"
|
||||
|
||||
# Helper proc to check current state and update satisfaction status
|
||||
proc checkState(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
satisfied: var seq[bool],
|
||||
): bool =
|
||||
for i in 0 ..< nodes.len:
|
||||
if not satisfied[i]:
|
||||
let fsub = GossipSub(nodes[i])
|
||||
let currentCount =
|
||||
case table
|
||||
of PeerTableType.Mesh:
|
||||
fsub.mesh.getOrDefault(topic).len
|
||||
of PeerTableType.Gossipsub:
|
||||
fsub.gossipsub.getOrDefault(topic).len
|
||||
of PeerTableType.Fanout:
|
||||
fsub.fanout.getOrDefault(topic).len
|
||||
satisfied[i] = currentCount >= peerCounts[i]
|
||||
return satisfied.allIt(it)
|
||||
|
||||
let timeoutMoment = Moment.now() + timeout
|
||||
var
|
||||
satisfied = newSeq[bool](nodes.len)
|
||||
allSatisfied = false
|
||||
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied) # Initial check
|
||||
# Continue checking until all requirements are met or timeout
|
||||
while not allSatisfied:
|
||||
await activeWait(
|
||||
5.milliseconds,
|
||||
timeoutMoment,
|
||||
"Timeout waiting for peer counts in " & $table & " for topic " & topic,
|
||||
)
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied)
|
||||
|
||||
proc startNodes*(nodes: seq[PubSub]) {.async.} =
|
||||
proc startNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
|
||||
|
||||
proc stopNodes*(nodes: seq[PubSub]) {.async.} =
|
||||
proc stopNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
|
||||
|
||||
template startNodesAndDeferStop*(nodes: seq[PubSub]): untyped =
|
||||
template startNodesAndDeferStop*[T: PubSub](nodes: seq[T]): untyped =
|
||||
await startNodes(nodes)
|
||||
defer:
|
||||
await stopNodes(nodes)
|
||||
|
||||
proc subscribeAllNodes*(nodes: seq[PubSub], topic: string, topicHandler: TopicHandler) =
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandler: TopicHandler
|
||||
) =
|
||||
for node in nodes:
|
||||
node.subscribe(topic, topicHandler)
|
||||
|
||||
proc subscribeAllNodes*(
|
||||
nodes: seq[PubSub], topic: string, topicHandlers: seq[TopicHandler]
|
||||
proc unsubscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandler: TopicHandler
|
||||
) =
|
||||
for node in nodes:
|
||||
node.unsubscribe(topic, topicHandler)
|
||||
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandlers: seq[TopicHandler]
|
||||
) =
|
||||
if nodes.len != topicHandlers.len:
|
||||
raise (ref CatchableError)(msg: "nodes and topicHandlers count needs to match!")
|
||||
@@ -360,12 +394,6 @@ template tryPublish*(
|
||||
|
||||
doAssert pubs >= require, "Failed to publish!"
|
||||
|
||||
proc noop*(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
proc voidTopicHandler*(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc createCompleteHandler*(): (
|
||||
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
|
||||
) =
|
||||
@@ -375,37 +403,102 @@ proc createCompleteHandler*(): (
|
||||
|
||||
return (fut, handler)
|
||||
|
||||
proc addIHaveObservers*(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
|
||||
proc createCheckForMessages*(): (
|
||||
ref seq[Message], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
) =
|
||||
var messages = new seq[Message]
|
||||
let checkForMessage = proc(
|
||||
peer: PubSubPeer, msgs: var RPCMsg
|
||||
) {.gcsafe, raises: [].} =
|
||||
for message in msgs.messages:
|
||||
messages[].add(message)
|
||||
|
||||
return (messages, checkForMessage)
|
||||
|
||||
proc createCheckForIHave*(): (
|
||||
ref seq[ControlIHave], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
) =
|
||||
var messages = new seq[ControlIHave]
|
||||
let checkForMessage = proc(
|
||||
peer: PubSubPeer, msgs: var RPCMsg
|
||||
) {.gcsafe, raises: [].} =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.ihave:
|
||||
messages[].add(msg)
|
||||
|
||||
return (messages, checkForMessage)
|
||||
|
||||
proc createCheckForIWant*(): (
|
||||
ref seq[ControlIWant], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
) =
|
||||
var messages = new seq[ControlIWant]
|
||||
let checkForMessage = proc(
|
||||
peer: PubSubPeer, msgs: var RPCMsg
|
||||
) {.gcsafe, raises: [].} =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.iwant:
|
||||
messages[].add(msg)
|
||||
|
||||
return (messages, checkForMessage)
|
||||
|
||||
proc createCheckForIDontWant*(): (
|
||||
ref seq[ControlIWant], proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
) =
|
||||
var messages = new seq[ControlIWant]
|
||||
let checkForMessage = proc(
|
||||
peer: PubSubPeer, msgs: var RPCMsg
|
||||
) {.gcsafe, raises: [].} =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.idontwant:
|
||||
messages[].add(msg)
|
||||
|
||||
return (messages, checkForMessage)
|
||||
|
||||
proc addOnRecvObserver*[T: PubSub](
|
||||
node: T, handler: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
) =
|
||||
let pubsubObserver = PubSubObserver(onRecv: handler)
|
||||
node.addObserver(pubsubObserver)
|
||||
|
||||
proc addIHaveObservers*[T: PubSub](nodes: seq[T]): (ref seq[ref seq[ControlIHave]]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIHaves[] = repeat(0, numberOfNodes)
|
||||
var allMessages = new seq[ref seq[ControlIHave]]
|
||||
allMessages[].setLen(numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iHave = msgs.control.get.ihave
|
||||
if iHave.len > 0:
|
||||
for msg in iHave:
|
||||
if msg.topicID == topic:
|
||||
receivedIHaves[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
var (messages, checkForMessage) = createCheckForIHave()
|
||||
nodes[i].addOnRecvObserver(checkForMessage)
|
||||
allMessages[i] = messages
|
||||
|
||||
proc addIDontWantObservers*(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
|
||||
return allMessages
|
||||
|
||||
proc addIDontWantObservers*[T: PubSub](
|
||||
nodes: seq[T]
|
||||
): (ref seq[ref seq[ControlIWant]]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIDontWants[] = repeat(0, numberOfNodes)
|
||||
var allMessages = new seq[ref seq[ControlIWant]]
|
||||
allMessages[].setLen(numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iDontWant = msgs.control.get.idontwant
|
||||
if iDontWant.len > 0:
|
||||
receivedIDontWants[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
var (messages, checkForMessage) = createCheckForIDontWant()
|
||||
nodes[i].addOnRecvObserver(checkForMessage)
|
||||
allMessages[i] = messages
|
||||
|
||||
return allMessages
|
||||
|
||||
proc findAndUnsubscribePeers*[T: PubSub](
|
||||
nodes: seq[T], peers: seq[PeerId], topic: string, handler: TopicHandler
|
||||
) =
|
||||
for i in 0 ..< nodes.len:
|
||||
let node = nodes[i]
|
||||
if peers.anyIt(it == node.peerInfo.peerId):
|
||||
node.unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
proc clearMCache*[T: PubSub](node: T) =
|
||||
node.mcache.msgs.clear()
|
||||
for i in 0 ..< node.mcache.history.len:
|
||||
node.mcache.history[i].setLen(0)
|
||||
node.mcache.pos = 0
|
||||
|
||||
# TODO: refactor helper methods from testgossipsub.nim
|
||||
proc setupNodes*(count: int): seq[PubSub] =
|
||||
@@ -448,3 +541,22 @@ proc baseTestProcedure*(
|
||||
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
proc currentRateLimitHits*(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
proc addDirectPeer*[T: PubSub](node: T, target: T) {.async.} =
|
||||
doAssert node.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not add same peer"
|
||||
await node.addDirectPeer(target.switch.peerInfo.peerId, target.switch.peerInfo.addrs)
|
||||
|
||||
proc addDirectPeerStar*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
for node in nodes:
|
||||
for target in nodes:
|
||||
if node.switch.peerInfo.peerId != target.switch.peerInfo.peerId:
|
||||
await addDirectPeer(node, target)
|
||||
|
||||
@@ -11,8 +11,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/net
|
||||
import tables
|
||||
import chronos, stew/[byteutils, endians2, shims/net]
|
||||
import chronos, stew/[byteutils, endians2]
|
||||
import
|
||||
../../libp2p/[
|
||||
stream/connection,
|
||||
@@ -62,7 +63,8 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
|
||||
var ip: array[4, byte]
|
||||
for i, e in msg[0 ..^ 3]:
|
||||
ip[i] = e
|
||||
$(ipv4(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
$(IpAddress(family: IPv4, address_v4: ip)) & ":" &
|
||||
$(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
of Socks5AddressType.IPv6.byte:
|
||||
let n = 16 + 2 # +2 bytes for the port
|
||||
msg = newSeq[byte](n) # +2 bytes for the port
|
||||
@@ -70,7 +72,8 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
|
||||
var ip: array[16, byte]
|
||||
for i, e in msg[0 ..^ 3]:
|
||||
ip[i] = e
|
||||
$(ipv6(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
$(IpAddress(family: IPv6, address_v6: ip)) & ":" &
|
||||
$(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
of Socks5AddressType.FQDN.byte:
|
||||
await connSrc.readExactly(addr msg[0], 1)
|
||||
let n = int(uint8.fromBytes(msg[0 .. 0])) + 2 # +2 bytes for the port
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import testnative, testdaemon, ./pubsub/testpubsub, testinterop
|
||||
import testnative, ./pubsub/testpubsub, testinterop, testdaemon
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user