mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 11:48:15 -05:00
Compare commits
64 Commits
v1.9.0
...
tmp-add-mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ee1d70d7d | ||
|
|
cd60b254a0 | ||
|
|
b88cdcdd4b | ||
|
|
4a5e06cb45 | ||
|
|
fff3a7ad1f | ||
|
|
05c894d487 | ||
|
|
8850e9ccd9 | ||
|
|
2746531851 | ||
|
|
2856db5490 | ||
|
|
b29e78ccae | ||
|
|
c9761c3588 | ||
|
|
e4ef21e07c | ||
|
|
61429aa0d6 | ||
|
|
c1ef011556 | ||
|
|
cd1424c09f | ||
|
|
878d627f93 | ||
|
|
1d6385ddc5 | ||
|
|
873f730b4e | ||
|
|
1c1547b137 | ||
|
|
9997f3e3d3 | ||
|
|
4d0b4ecc22 | ||
|
|
ccb24b5f1f | ||
|
|
5cb493439d | ||
|
|
24b284240a | ||
|
|
b0f77d24f9 | ||
|
|
e32ac492d3 | ||
|
|
470a7f8cc5 | ||
|
|
b269fce289 | ||
|
|
bc4febe92c | ||
|
|
b5f9bfe0f4 | ||
|
|
4ce1e8119b | ||
|
|
65136b38e2 | ||
|
|
ffc114e8d9 | ||
|
|
f2be2d6ed5 | ||
|
|
ab690a06a6 | ||
|
|
10cdaf14c5 | ||
|
|
ebbfb63c17 | ||
|
|
ac25da6cea | ||
|
|
fb41972ba3 | ||
|
|
504d1618af | ||
|
|
0f91b23f12 | ||
|
|
5ddd62a8b9 | ||
|
|
e7f13a7e73 | ||
|
|
89e825fb0d | ||
|
|
1b706e84fa | ||
|
|
5cafcb70dc | ||
|
|
8c71266058 | ||
|
|
9c986c5c13 | ||
|
|
3d0451d7f2 | ||
|
|
b1f65c97ae | ||
|
|
5584809fca | ||
|
|
7586f17b15 | ||
|
|
0e16d873c8 | ||
|
|
b11acd2118 | ||
|
|
1376f5b077 | ||
|
|
340ea05ae5 | ||
|
|
024ec51f66 | ||
|
|
efe453df87 | ||
|
|
c0f4d903ba | ||
|
|
28f2b268ae | ||
|
|
5abb6916b6 | ||
|
|
e6aec94c0c | ||
|
|
9eddc7c662 | ||
|
|
028c730a4f |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @vacp2p/p2p
|
||||
14
.github/workflows/ci.yml
vendored
14
.github/workflows/ci.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -36,6 +36,8 @@ jobs:
|
||||
memory_management: refc
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
- ref: version-2-2
|
||||
memory_management: refc
|
||||
include:
|
||||
- platform:
|
||||
os: linux
|
||||
@@ -96,15 +98,9 @@ jobs:
|
||||
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Setup python
|
||||
run: |
|
||||
mkdir .venv
|
||||
python -m venv .venv
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
nimble install_pinned
|
||||
|
||||
- name: Use gcc 14
|
||||
@@ -118,11 +114,9 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
|
||||
- name: Run test suite with coverage flags
|
||||
run: |
|
||||
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
export NIMFLAGS="-d:libp2p_quic_support --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
nimble testnative
|
||||
nimble testpubsub
|
||||
nimble testfilter
|
||||
|
||||
4
.github/workflows/daily_common.yml
vendored
4
.github/workflows/daily_common.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -97,5 +97,5 @@ jobs:
|
||||
dependency_solver="legacy"
|
||||
fi
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
nimble test
|
||||
|
||||
5
.github/workflows/dependencies.yml
vendored
5
.github/workflows/dependencies.yml
vendored
@@ -17,10 +17,13 @@ jobs:
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2 }}
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NWAKU }}
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIM_CODEX }}
|
||||
steps:
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -29,7 +32,7 @@ jobs:
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
token: ${{ matrix.target.token }}
|
||||
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
|
||||
60
.github/workflows/examples.yml
vendored
Normal file
60
.github/workflows/examples.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Examples
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
examples:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
name: "Build Examples"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
shell: bash
|
||||
os: linux
|
||||
cpu: amd64
|
||||
nim_ref: version-1-6
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Build and run examples
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble examples
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -17,3 +17,11 @@ examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
# Ignore all test build files in tests folder (auto generated when running tests).
|
||||
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
|
||||
# Second and third rules are here to un-ignores all files with extension and Docker file,
|
||||
# because it appears that vs code is skipping text search is some tests files without these rules.
|
||||
tests/**/test*[^.]*
|
||||
!tests/**/*.*
|
||||
!tests/**/Dockerfile
|
||||
29
.pinned
29
.pinned
@@ -1,20 +1,19 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
mbedtls;https://github.com/status-im/nim-mbedtls.git@#740fb2f469511adc1772c5cb32395f4076b9e0c5
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
quic;https://github.com/status-im/nim-quic.git@#a6c30263c95fc5ddb2ef4d197c09b282555c06b0
|
||||
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
|
||||
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
|
||||
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
|
||||
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
|
||||
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
|
||||
24
README.md
24
README.md
@@ -20,7 +20,7 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Testing](#testing)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
@@ -33,22 +33,22 @@
|
||||
## Background
|
||||
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
|
||||
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
|
||||
|
||||
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/).
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
|
||||
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
@@ -70,6 +70,8 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
|
||||
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
@@ -78,10 +80,10 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
@@ -147,12 +149,18 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable quic transport support
|
||||
```bash
|
||||
nim c -d:libp2p_quic_support some_file.nim
|
||||
```
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
|
||||
3
examples/examples_build.nim
Normal file
3
examples/examples_build.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import directchat, tutorial_6_game
|
||||
5
examples/examples_run.nim
Normal file
5
examples/examples_run.nim
Normal file
@@ -0,0 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
|
||||
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery
|
||||
@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||
pending.add(item.write(msg))
|
||||
if len(pending) > 0:
|
||||
var results = await all(pending)
|
||||
except:
|
||||
echo getCurrentException().msg
|
||||
except CatchableError as err:
|
||||
echo err.msg
|
||||
|
||||
proc main() {.async.} =
|
||||
var data = new CustomData
|
||||
|
||||
@@ -158,8 +158,8 @@ waitFor(main())
|
||||
## This is John receiving & logging everyone's metrics.
|
||||
##
|
||||
## ## Going further
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## you can achieve very different properties.
|
||||
##
|
||||
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)
|
||||
|
||||
@@ -17,7 +17,7 @@ when defined(nimdoc):
|
||||
## stay backward compatible during the Major version, whereas private ones can
|
||||
## change at each new Minor version.
|
||||
##
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## that can help you get started.
|
||||
|
||||
# Import stuff for doc
|
||||
@@ -52,7 +52,6 @@ else:
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
transports/quictransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
@@ -71,3 +70,7 @@ else:
|
||||
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
|
||||
multicodec, builders, pubsub
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import libp2p/transports/quictransport
|
||||
export quictransport
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.9.0"
|
||||
version = "1.10.1"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -9,10 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2",
|
||||
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc",
|
||||
"https://github.com/status-im/nim-mbedtls.git"
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew >= 0.4.0",
|
||||
"websock", "unittest2", "results",
|
||||
"https://github.com/status-im/nim-quic.git#a6c30263c95fc5ddb2ef4d197c09b282555c06b0"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -26,12 +25,8 @@ let cfg =
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
proc runTest(
|
||||
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
|
||||
) =
|
||||
proc runTest(filename: string, moreoptions: string = "") =
|
||||
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
@@ -61,51 +56,15 @@ task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest(
|
||||
"pubsub/testgossipinternal",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:pubsub_internal_testing",
|
||||
)
|
||||
runTest("pubsub/testpubsub")
|
||||
runTest("pubsub/testpubsub", sign = false, verify = false)
|
||||
runTest(
|
||||
"pubsub/testpubsub",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:libp2p_pubsub_anonymize=true",
|
||||
)
|
||||
|
||||
task testpubsub_slim, "Runs pubsub tests":
|
||||
runTest(
|
||||
"pubsub/testgossipinternal",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:pubsub_internal_testing",
|
||||
)
|
||||
runTest("pubsub/testpubsub")
|
||||
|
||||
task testfilter, "Run PKI filter test":
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
|
||||
runTest(
|
||||
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
|
||||
)
|
||||
runTest("testpkifilter")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
|
||||
|
||||
task test, "Runs the test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub"
|
||||
exec "nimble testdaemon"
|
||||
exec "nimble testinterop"
|
||||
runTest("testall")
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
|
||||
task test_slim, "Runs the (slimmed down) test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub_slim"
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
|
||||
task website, "Build the website":
|
||||
tutorialToMd("examples/tutorial_1_connect.nim")
|
||||
@@ -117,18 +76,12 @@ task website, "Build the website":
|
||||
tutorialToMd("examples/circuitrelay.nim")
|
||||
exec "mkdocs build"
|
||||
|
||||
task examples_build, "Build the samples":
|
||||
buildSample("directchat")
|
||||
buildSample("helloworld", true)
|
||||
buildSample("circuitrelay", true)
|
||||
buildSample("tutorial_1_connect", true)
|
||||
buildSample("tutorial_2_customproto", true)
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
task examples, "Build and run examples":
|
||||
exec "nimble install -y nimpng"
|
||||
exec "nimble install -y nico --passNim=--skipParentCfg"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
buildSample("examples_build", false, "--styleCheck:off") # build only
|
||||
|
||||
buildSample("examples_run", true)
|
||||
|
||||
# pin system
|
||||
# while nimble lockfile
|
||||
|
||||
@@ -23,7 +23,7 @@ import
|
||||
stream/connection,
|
||||
multiaddress,
|
||||
crypto/crypto,
|
||||
transports/[transport, tcptransport],
|
||||
transports/[transport, tcptransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
@@ -37,8 +37,11 @@ import services/wildcardresolverservice
|
||||
|
||||
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.public.} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise
|
||||
@@ -151,7 +154,7 @@ proc withTransport*(
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
@@ -162,10 +165,25 @@ proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import transports/quictransport
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
MemoryTransport.new(upgr)
|
||||
)
|
||||
|
||||
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
|
||||
b.rng = rng
|
||||
b
|
||||
@@ -247,6 +265,10 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let pkRes = PrivateKey.random(b.rng[])
|
||||
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
debug "no secure managers defined. Adding noise by default"
|
||||
b.secureManagers.add(SecureProtocol.Noise)
|
||||
|
||||
var secureManagerInstances: seq[Secure]
|
||||
if SecureProtocol.Noise in b.secureManagers:
|
||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||
@@ -270,7 +292,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade))
|
||||
transports.add(tProvider(muxedUpgrade, seckey))
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
import stew/[base58, results]
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
import stew/base58
|
||||
|
||||
export results
|
||||
|
||||
@@ -41,6 +41,7 @@ const ContentIdsList = [
|
||||
multiCodec("dag-pb"),
|
||||
multiCodec("dag-cbor"),
|
||||
multiCodec("dag-json"),
|
||||
multiCodec("libp2p-key"),
|
||||
multiCodec("git-raw"),
|
||||
multiCodec("eth-block"),
|
||||
multiCodec("eth-block-list"),
|
||||
|
||||
@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import stew/results
|
||||
import results
|
||||
export results, utility
|
||||
|
||||
# This is workaround for Nim's `import` bug
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
import results
|
||||
from stew/assign2 import assign
|
||||
export results
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
|
||||
import nimcrypto/utils as ncrutils
|
||||
import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/[results, ctops]
|
||||
import stew/ctops
|
||||
import results
|
||||
|
||||
import ../utility
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@ import constants
|
||||
import nimcrypto/[hash, sha2]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
|
||||
import ../../utility
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
import stew/[endians2, ctops]
|
||||
import results
|
||||
export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
|
||||
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
||||
res
|
||||
|
||||
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
|
||||
var v = value
|
||||
if value <= cast[T](0x7F):
|
||||
if len(dest) >= 1:
|
||||
dest[0] = cast[byte](value)
|
||||
1
|
||||
else:
|
||||
var s = 0
|
||||
var res = 0
|
||||
while v != 0:
|
||||
v = v shr 7
|
||||
s += 7
|
||||
inc(res)
|
||||
if len(dest) >= res:
|
||||
var k = 0
|
||||
while s != 0:
|
||||
s -= 7
|
||||
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
|
||||
inc(k)
|
||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||
## number of bytes (octets) used.
|
||||
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
return ok(field)
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
inclass = false
|
||||
ttag = 0
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
|
||||
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
|
||||
|
||||
export sha2, results, rand
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
import results
|
||||
import peerid, stream/connection, transports/transport
|
||||
|
||||
export results
|
||||
@@ -31,14 +31,14 @@ method connect*(
|
||||
## a protocol
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method connect*(
|
||||
self: Dial, address: MultiAddress, allowUnknownPeerId = false
|
||||
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial, peerId: PeerId, protos: seq[string]
|
||||
@@ -47,7 +47,7 @@ method dial*(
|
||||
## existing connection
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
@@ -60,14 +60,14 @@ method dial*(
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method addTransport*(self: Dial, transport: Transport) {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
|
||||
|
||||
method tryDial*(
|
||||
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.
|
||||
base, async: (raises: [DialFailedError, CancelledError])
|
||||
.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.tryDial] abstract method not implemented!")
|
||||
|
||||
@@ -9,8 +9,7 @@
|
||||
|
||||
import std/tables
|
||||
|
||||
import stew/results
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import pkg/[chronos, chronicles, metrics, results]
|
||||
|
||||
import
|
||||
dial,
|
||||
@@ -125,9 +124,13 @@ proc expandDnsAddr(
|
||||
for resolvedAddress in resolved:
|
||||
let lastPart = resolvedAddress[^1].tryGet()
|
||||
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||
let
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
except ResultError[string]:
|
||||
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
else:
|
||||
result.add((resolvedAddress, peerId))
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
import chronos, chronicles, results
|
||||
import ../errors
|
||||
|
||||
type
|
||||
@@ -59,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
|
||||
pa{T}.valueOr:
|
||||
raise newException(KeyError, "Attritute not found")
|
||||
raise newException(KeyError, "Attribute not found")
|
||||
|
||||
proc match*(pa, candidate: PeerAttributes): bool =
|
||||
for f in pa.attributes:
|
||||
@@ -86,12 +86,12 @@ type
|
||||
method request*(
|
||||
self: DiscoveryInterface, pa: PeerAttributes
|
||||
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
|
||||
|
||||
method advertise*(
|
||||
self: DiscoveryInterface
|
||||
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
|
||||
|
||||
type
|
||||
DiscoveryQuery* = ref object
|
||||
|
||||
@@ -26,14 +26,14 @@ proc `==`*(a, b: RdvNamespace): bool {.borrow.}
|
||||
method request*(
|
||||
self: RendezVousInterface, pa: PeerAttributes
|
||||
) {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
var namespace = ""
|
||||
var namespace = Opt.none(string)
|
||||
for attr in pa:
|
||||
if attr.ofType(RdvNamespace):
|
||||
namespace = string attr.to(RdvNamespace)
|
||||
namespace = Opt.some(string attr.to(RdvNamespace))
|
||||
elif attr.ofType(DiscoveryService):
|
||||
namespace = string attr.to(DiscoveryService)
|
||||
namespace = Opt.some(string attr.to(DiscoveryService))
|
||||
elif attr.ofType(PeerId):
|
||||
namespace = $attr.to(PeerId)
|
||||
namespace = Opt.some($attr.to(PeerId))
|
||||
else:
|
||||
# unhandled type
|
||||
return
|
||||
@@ -44,8 +44,8 @@ method request*(
|
||||
for address in pr.addresses:
|
||||
peer.add(address.address)
|
||||
|
||||
peer.add(DiscoveryService(namespace))
|
||||
peer.add(RdvNamespace(namespace))
|
||||
peer.add(DiscoveryService(namespace.get()))
|
||||
peer.add(RdvNamespace(namespace.get()))
|
||||
self.onPeerFound(peer)
|
||||
|
||||
await sleepAsync(self.timeToRequest)
|
||||
|
||||
@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc memoryStB(s: string, vb: var VBuffer): bool =
|
||||
## Memory stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc memoryBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Memory bufferToString() implementation.
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc memoryVB(vb: var VBuffer): bool =
|
||||
## Memory validateBuffer() implementation.
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
var port: array[2, byte]
|
||||
@@ -355,6 +367,10 @@ const
|
||||
)
|
||||
TranscoderDNS* =
|
||||
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
|
||||
TranscoderMemory* = Transcoder(
|
||||
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
|
||||
)
|
||||
|
||||
ProtocolsList = [
|
||||
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
|
||||
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
@@ -393,6 +409,9 @@ const
|
||||
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
|
||||
),
|
||||
]
|
||||
|
||||
DNSANY* = mapEq("dns")
|
||||
@@ -453,6 +472,8 @@ const
|
||||
|
||||
CircuitRelay* = mapEq("p2p-circuit")
|
||||
|
||||
Memory* = mapEq("memory")
|
||||
|
||||
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
|
||||
for item in ProtocolsList:
|
||||
result[item.mcodec] = item
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
import results
|
||||
import stew/[base32, base58, base64]
|
||||
|
||||
type
|
||||
MultiBaseStatus* {.pure.} = enum
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
import tables, hashes
|
||||
import vbuffer
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
## List of officially supported codecs can BE found here
|
||||
@@ -396,6 +396,7 @@ const MultiCodecList = [
|
||||
("onion3", 0x01BD),
|
||||
("p2p-circuit", 0x0122),
|
||||
("libp2p-peer-record", 0x0301),
|
||||
("memory", 0x0309),
|
||||
("dns", 0x35),
|
||||
("dns4", 0x36),
|
||||
("dns6", 0x37),
|
||||
@@ -403,6 +404,7 @@ const MultiCodecList = [
|
||||
# IPLD formats
|
||||
("dag-pb", 0x70),
|
||||
("dag-cbor", 0x71),
|
||||
("libp2p-key", 0x72),
|
||||
("dag-json", 0x129),
|
||||
("git-raw", 0x78),
|
||||
("eth-block", 0x90),
|
||||
|
||||
@@ -27,7 +27,7 @@ import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
import varint, vbuffer, multicodec, multibase
|
||||
import stew/base58
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
# This is workaround for Nim `import` bug.
|
||||
export sha, sha2, keccak, blake2, hash, utils
|
||||
|
||||
@@ -52,7 +52,7 @@ method newStream*(
|
||||
): Future[Connection] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
|
||||
.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Muxer.newStream] abstract method not implemented!")
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
@@ -68,4 +68,4 @@ proc new*(
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Muxer.getStreams] abstract method not implemented!")
|
||||
|
||||
@@ -22,7 +22,7 @@ method resolveTxt*(
|
||||
self: NameResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Get TXT record
|
||||
raiseAssert "Not implemented!"
|
||||
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
|
||||
|
||||
method resolveIp*(
|
||||
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
@@ -30,7 +30,7 @@ method resolveIp*(
|
||||
async: (raises: [CancelledError, TransportAddressError]), base
|
||||
.} =
|
||||
## Resolve the specified address
|
||||
raiseAssert "Not implemented!"
|
||||
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
|
||||
|
||||
proc getHostname*(ma: MultiAddress): string =
|
||||
let
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
import
|
||||
std/[hashes, strutils],
|
||||
stew/[base58, results],
|
||||
stew/base58,
|
||||
results,
|
||||
chronicles,
|
||||
nimcrypto/utils,
|
||||
utility,
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
{.push public.}
|
||||
|
||||
import std/sequtils
|
||||
import pkg/[chronos, chronicles, stew/results]
|
||||
import pkg/[chronos, chronicles, results]
|
||||
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||
|
||||
export peerid, multiaddress, crypto, routing_record, errors, results
|
||||
|
||||
@@ -160,10 +160,10 @@ proc updatePeerInfo*(
|
||||
peerStore[KeyBook][info.peerId] = pubkey
|
||||
|
||||
info.agentVersion.withValue(agentVersion):
|
||||
peerStore[AgentBook][info.peerId] = agentVersion.string
|
||||
peerStore[AgentBook][info.peerId] = agentVersion
|
||||
|
||||
info.protoVersion.withValue(protoVersion):
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion
|
||||
|
||||
if info.protos.len > 0:
|
||||
peerStore[ProtoBook][info.peerId] = info.protos
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/[endians2, results]
|
||||
import ../varint, ../utility, stew/endians2, results
|
||||
export results, utility
|
||||
|
||||
{.push public.}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import ../../../switch, ../../../multiaddress, ../../../peerid
|
||||
import core
|
||||
@@ -96,7 +96,7 @@ method dialMe*(
|
||||
of ResponseStatus.Ok:
|
||||
try:
|
||||
response.ma.tryGet()
|
||||
except:
|
||||
except ResultError[void]:
|
||||
raiseAssert("checked with if")
|
||||
of ResponseStatus.DialError:
|
||||
raise newException(
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
import ../../../multiaddress, ../../../peerid, ../../../errors
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../protocol,
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
|
||||
import core
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
|
||||
import core
|
||||
import
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import macros
|
||||
import stew/[objects, results]
|
||||
import stew/objects
|
||||
import results
|
||||
import ../../../peerinfo, ../../../signed_envelope
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, options, strutils, sugar]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import results, chronos, chronicles
|
||||
import
|
||||
../protobuf/minprotobuf,
|
||||
../peerinfo,
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, stew/results
|
||||
import chronos, results
|
||||
import ../stream/connection
|
||||
|
||||
export results
|
||||
@@ -66,21 +66,6 @@ template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void
|
||||
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
|
||||
p.handlerImpl = handler
|
||||
|
||||
# Callbacks that are annotated with `{.async: (raises).}` explicitly
|
||||
# document the types of errors that they may raise, but are not compatible
|
||||
# with `LPProtoHandler` and need to use a custom `proc` type.
|
||||
# They are internally wrapped into a `LPProtoHandler`, but still allow the
|
||||
# compiler to check that their `{.async: (raises).}` annotation is correct.
|
||||
# https://github.com/nim-lang/Nim/issues/23432
|
||||
func `handler=`*[E](
|
||||
p: LPProtocol,
|
||||
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
|
||||
) {.deprecated: "Use `LPProtoHandler` that explicitly specifies raised exceptions.".} =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
|
||||
p.handlerImpl = wrap
|
||||
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
@@ -96,17 +81,3 @@ proc new*(
|
||||
else:
|
||||
maxIncomingStreams,
|
||||
)
|
||||
|
||||
proc new*[E](
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int),
|
||||
): T {.
|
||||
deprecated:
|
||||
"Use `new` with `LPProtoHandler` that explicitly specifies raised exceptions."
|
||||
.} =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
|
||||
T.new(codec, wrap, maxIncomingStreams)
|
||||
|
||||
@@ -29,7 +29,7 @@ import
|
||||
../../utility,
|
||||
../../switch
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat
|
||||
|
||||
@@ -31,7 +31,7 @@ import
|
||||
../../errors,
|
||||
../../utility
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
export tables, sets
|
||||
@@ -589,7 +589,7 @@ method addValidator*(
|
||||
|
||||
method removeValidator*(
|
||||
p: PubSub, topic: varargs[string], hook: ValidatorHandler
|
||||
) {.base, public.} =
|
||||
) {.base, public, gcsafe.} =
|
||||
for t in topic:
|
||||
p.validators.withValue(t, validators):
|
||||
validators[].excl(hook)
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||
import stew/results
|
||||
import std/[sequtils, tables, hashes, options, sets, deques]
|
||||
import results
|
||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||
import chronos/ratelimit
|
||||
import
|
||||
@@ -59,6 +59,8 @@ declareCounter(
|
||||
"number of peers disconnected due to over non-prio queue capacity",
|
||||
)
|
||||
|
||||
var countConnectedFut: uint64 = 0
|
||||
|
||||
const DefaultMaxNumElementsInNonPriorityQueue* = 1024
|
||||
|
||||
type
|
||||
@@ -231,6 +233,7 @@ proc closeSendConn(
|
||||
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
countConnectedFut.dec()
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
@@ -243,6 +246,7 @@ proc connectOnce(
|
||||
p: PubSubPeer
|
||||
): Future[void] {.async: (raises: [CancelledError, GetConnDialError, LPError]).} =
|
||||
try:
|
||||
debug "AAAAA connectOnce", countConnectedFut
|
||||
if p.connectedFut.finished:
|
||||
p.connectedFut = newFuture[void]()
|
||||
let newConn =
|
||||
@@ -262,6 +266,7 @@ proc connectOnce(
|
||||
# Topic subscription relies on either connectedFut
|
||||
# to be completed, or onEvent to be called later
|
||||
p.connectedFut.complete()
|
||||
countConnectedFut.dec()
|
||||
p.sendConn = newConn
|
||||
p.address =
|
||||
if p.sendConn.observedAddr.isSome:
|
||||
@@ -285,6 +290,7 @@ proc connectImpl(p: PubSubPeer) {.async: (raises: []).} =
|
||||
if p.disconnected:
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
countConnectedFut.dec()
|
||||
return
|
||||
await connectOnce(p)
|
||||
except CancelledError as exc:
|
||||
@@ -346,6 +352,7 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
countConnectedFut.inc()
|
||||
discard await race(p.connectedFut)
|
||||
|
||||
var conn = p.sendConn
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[hashes, sets]
|
||||
import chronos/timer, stew/results
|
||||
import chronos/timer, results
|
||||
|
||||
import ../../utility
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import tables, sequtils, sugar, sets
|
||||
import metrics except collect
|
||||
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects, results]
|
||||
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
|
||||
import
|
||||
./protocol,
|
||||
../protobuf/minprotobuf,
|
||||
@@ -37,6 +37,9 @@ const
|
||||
RendezVousCodec* = "/rendezvous/1.0.0"
|
||||
MinimumDuration* = 2.hours
|
||||
MaximumDuration = 72.hours
|
||||
MaximumMessageLen = 1 shl 22 # 4MB
|
||||
MinimumNamespaceLen = 1
|
||||
MaximumNamespaceLen = 255
|
||||
RegistrationLimitPerPeer = 1000
|
||||
DiscoverLimit = 1000'u64
|
||||
SemaphoreDefaultSize = 5
|
||||
@@ -61,7 +64,7 @@ type
|
||||
|
||||
Cookie = object
|
||||
offset: uint64
|
||||
ns: string
|
||||
ns: Opt[string]
|
||||
|
||||
Register = object
|
||||
ns: string
|
||||
@@ -77,7 +80,7 @@ type
|
||||
ns: string
|
||||
|
||||
Discover = object
|
||||
ns: string
|
||||
ns: Opt[string]
|
||||
limit: Opt[uint64]
|
||||
cookie: Opt[seq[byte]]
|
||||
|
||||
@@ -98,7 +101,8 @@ type
|
||||
proc encode(c: Cookie): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, c.offset)
|
||||
result.write(2, c.ns)
|
||||
if c.ns.isSome():
|
||||
result.write(2, c.ns.get())
|
||||
result.finish()
|
||||
|
||||
proc encode(r: Register): ProtoBuffer =
|
||||
@@ -125,7 +129,8 @@ proc encode(u: Unregister): ProtoBuffer =
|
||||
|
||||
proc encode(d: Discover): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, d.ns)
|
||||
if d.ns.isSome():
|
||||
result.write(1, d.ns.get())
|
||||
d.limit.withValue(limit):
|
||||
result.write(2, limit)
|
||||
d.cookie.withValue(cookie):
|
||||
@@ -159,13 +164,17 @@ proc encode(msg: Message): ProtoBuffer =
|
||||
result.finish()
|
||||
|
||||
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
|
||||
var c: Cookie
|
||||
var
|
||||
c: Cookie
|
||||
ns: string
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, c.offset)
|
||||
r2 = pb.getRequiredField(2, c.ns)
|
||||
r2 = pb.getField(2, ns)
|
||||
if r1.isErr() or r2.isErr():
|
||||
return Opt.none(Cookie)
|
||||
if r2.get(false):
|
||||
c.ns = Opt.some(ns)
|
||||
Opt.some(c)
|
||||
|
||||
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
|
||||
@@ -217,13 +226,16 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
|
||||
d: Discover
|
||||
limit: uint64
|
||||
cookie: seq[byte]
|
||||
ns: string
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, d.ns)
|
||||
r1 = pb.getField(1, ns)
|
||||
r2 = pb.getField(2, limit)
|
||||
r3 = pb.getField(3, cookie)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr:
|
||||
return Opt.none(Discover)
|
||||
if r1.get(false):
|
||||
d.ns = Opt.some(ns)
|
||||
if r2.get(false):
|
||||
d.limit = Opt.some(limit)
|
||||
if r3.get(false):
|
||||
@@ -413,10 +425,10 @@ proc save(
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
libp2p_rendezvous_register.inc()
|
||||
if r.ns.len notin 1 .. 255:
|
||||
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
|
||||
return conn.sendRegisterResponseError(InvalidNamespace)
|
||||
let ttl = r.ttl.get(rdv.minTTL)
|
||||
if ttl notin rdv.minTTL .. rdv.maxTTL:
|
||||
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
|
||||
return conn.sendRegisterResponseError(InvalidTTL)
|
||||
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
|
||||
if pr.isErr():
|
||||
@@ -444,7 +456,7 @@ proc discover(
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Received Discover", peerId = conn.peerId, ns = d.ns
|
||||
libp2p_rendezvous_discover.inc()
|
||||
if d.ns.len notin 0 .. 255:
|
||||
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
|
||||
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||
return
|
||||
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
|
||||
@@ -457,20 +469,19 @@ proc discover(
|
||||
return
|
||||
else:
|
||||
Cookie(offset: rdv.registered.low().uint64 - 1)
|
||||
if cookie.ns != d.ns or
|
||||
cookie.offset notin rdv.registered.low().uint64 .. rdv.registered.high().uint64:
|
||||
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get() or
|
||||
cookie.offset < rdv.registered.low().uint64 or
|
||||
cookie.offset > rdv.registered.high().uint64:
|
||||
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
|
||||
let
|
||||
nsSalted = d.ns & rdv.salt
|
||||
namespaces =
|
||||
if d.ns != "":
|
||||
try:
|
||||
rdv.namespaces[nsSalted]
|
||||
except KeyError:
|
||||
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||
return
|
||||
else:
|
||||
toSeq(cookie.offset.int .. rdv.registered.high())
|
||||
let namespaces =
|
||||
if d.ns.isSome():
|
||||
try:
|
||||
rdv.namespaces[d.ns.get() & rdv.salt]
|
||||
except KeyError:
|
||||
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||
return
|
||||
else:
|
||||
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
|
||||
if namespaces.len() == 0:
|
||||
await conn.sendDiscoverResponse(@[], Cookie())
|
||||
return
|
||||
@@ -514,15 +525,15 @@ proc advertisePeer(
|
||||
rdv.sema.release()
|
||||
|
||||
await rdv.sema.acquire()
|
||||
discard await advertiseWrap().withTimeout(5.seconds)
|
||||
await advertiseWrap()
|
||||
|
||||
proc advertise*(
|
||||
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
|
||||
) {.async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
if ns.len notin 1 .. 255:
|
||||
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
|
||||
raise newException(AdvertiseError, "Invalid namespace")
|
||||
|
||||
if ttl notin rdv.minDuration .. rdv.maxDuration:
|
||||
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
|
||||
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
|
||||
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
|
||||
@@ -537,7 +548,7 @@ proc advertise*(
|
||||
let futs = collect(newSeq()):
|
||||
for peer in peers:
|
||||
trace "Send Advertise", peerId = peer, ns
|
||||
rdv.advertisePeer(peer, msg.buffer)
|
||||
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
|
||||
|
||||
await allFutures(futs)
|
||||
|
||||
@@ -561,7 +572,7 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||
@[]
|
||||
|
||||
proc request*(
|
||||
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int, peers: seq[PeerId]
|
||||
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
|
||||
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
var
|
||||
s: Table[PeerId, (PeerRecord, Register)]
|
||||
@@ -570,7 +581,7 @@ proc request*(
|
||||
|
||||
if l <= 0 or l > DiscoverLimit.int:
|
||||
raise newException(AdvertiseError, "Invalid limit")
|
||||
if ns.len notin 0 .. 255:
|
||||
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
|
||||
raise newException(AdvertiseError, "Invalid namespace")
|
||||
|
||||
limit = l.uint64
|
||||
@@ -582,15 +593,18 @@ proc request*(
|
||||
await conn.close()
|
||||
d.limit = Opt.some(limit)
|
||||
d.cookie =
|
||||
try:
|
||||
Opt.some(rdv.cookiesSaved[peer][ns])
|
||||
except KeyError as exc:
|
||||
if ns.isSome():
|
||||
try:
|
||||
Opt.some(rdv.cookiesSaved[peer][ns.get()])
|
||||
except KeyError, CatchableError:
|
||||
Opt.none(seq[byte])
|
||||
else:
|
||||
Opt.none(seq[byte])
|
||||
await conn.writeLp(
|
||||
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
|
||||
)
|
||||
let
|
||||
buf = await conn.readLp(65536)
|
||||
buf = await conn.readLp(MaximumMessageLen)
|
||||
msgRcv = Message.decode(buf).valueOr:
|
||||
debug "Message undecodable"
|
||||
return
|
||||
@@ -604,12 +618,14 @@ proc request*(
|
||||
trace "Cannot discover", ns, status = resp.status, text = resp.text
|
||||
return
|
||||
resp.cookie.withValue(cookie):
|
||||
if cookie.len() < 1000 and
|
||||
rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
|
||||
try:
|
||||
rdv.cookiesSaved[peer][ns] = cookie
|
||||
except KeyError:
|
||||
raiseAssert "checked with hasKeyOrPut"
|
||||
if ns.isSome:
|
||||
let namespace = ns.get()
|
||||
if cookie.len() < 1000 and
|
||||
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
|
||||
try:
|
||||
rdv.cookiesSaved[peer][namespace] = cookie
|
||||
except KeyError:
|
||||
raiseAssert "checked with hasKeyOrPut"
|
||||
for r in resp.registrations:
|
||||
if limit == 0:
|
||||
return
|
||||
@@ -632,8 +648,9 @@ proc request*(
|
||||
else:
|
||||
s[pr.peerId] = (pr, r)
|
||||
limit.dec()
|
||||
for (_, r) in s.values():
|
||||
rdv.save(ns, peer, r, false)
|
||||
if ns.isSome():
|
||||
for (_, r) in s.values():
|
||||
rdv.save(ns.get(), peer, r, false)
|
||||
|
||||
for peer in peers:
|
||||
if limit == 0:
|
||||
@@ -652,10 +669,15 @@ proc request*(
|
||||
return toSeq(s.values()).mapIt(it[0])
|
||||
|
||||
proc request*(
|
||||
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
|
||||
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
|
||||
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
await rdv.request(ns, l, rdv.peers)
|
||||
|
||||
proc request*(
|
||||
rdv: RendezVous, l: int = DiscoverLimit.int
|
||||
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
await rdv.request(Opt.none(string), l, rdv.peers)
|
||||
|
||||
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||
let nsSalted = ns & rdv.salt
|
||||
try:
|
||||
@@ -668,7 +690,7 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||
proc unsubscribe*(
|
||||
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
|
||||
) {.async: (raises: [RendezVousError, CancelledError]).} =
|
||||
if ns.len notin 1 .. 255:
|
||||
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
|
||||
raise newException(RendezVousError, "Invalid namespace")
|
||||
|
||||
let msg = encode(
|
||||
@@ -688,7 +710,7 @@ proc unsubscribe*(
|
||||
for peer in peerIds:
|
||||
unsubscribePeer(peer)
|
||||
|
||||
discard await allFutures(futs).withTimeout(5.seconds)
|
||||
await allFutures(futs)
|
||||
|
||||
proc unsubscribe*(
|
||||
rdv: RendezVous, ns: string
|
||||
@@ -784,8 +806,10 @@ proc new*(
|
||||
rdv.setup(switch)
|
||||
return rdv
|
||||
|
||||
proc deletesRegister(rdv: RendezVous) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Register timeout", 1.minutes:
|
||||
proc deletesRegister(
|
||||
rdv: RendezVous, interval = 1.minutes
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Register timeout", interval:
|
||||
let n = Moment.now()
|
||||
var total = 0
|
||||
rdv.registered.flushIfIt(it.expiration < n)
|
||||
|
||||
@@ -20,7 +20,6 @@ import ../../peerid
|
||||
import ../../peerinfo
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../utility
|
||||
import ../../errors
|
||||
|
||||
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
||||
|
||||
|
||||
@@ -11,15 +11,14 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../protocol,
|
||||
../../stream/streamseq,
|
||||
../../stream/connection,
|
||||
../../multiaddress,
|
||||
../../peerinfo,
|
||||
../../errors
|
||||
../../peerinfo
|
||||
|
||||
export protocol, results
|
||||
|
||||
@@ -82,7 +81,7 @@ method readMessage*(
|
||||
): Future[seq[byte]] {.
|
||||
async: (raises: [CancelledError, LPStreamError], raw: true), base
|
||||
.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[SecureConn.readMessage] abstract method not implemented!")
|
||||
|
||||
method getWrapped*(s: SecureConn): Connection =
|
||||
s.stream
|
||||
@@ -92,7 +91,7 @@ method handshake*(
|
||||
): Future[SecureConn] {.
|
||||
async: (raises: [CancelledError, LPStreamError], raw: true), base
|
||||
.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Secure.handshake] abstract method not implemented!")
|
||||
|
||||
proc handleConn(
|
||||
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, times]
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
|
||||
|
||||
export peerid, multiaddress, signed_envelope
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import stew/[byteutils, results, endians2]
|
||||
import chronos, chronos/transports/[osnet, ipnet], chronicles
|
||||
import stew/endians2
|
||||
import chronos, chronos/transports/[osnet, ipnet], chronicles, results
|
||||
import ../[multiaddress, multicodec]
|
||||
import ../switch
|
||||
|
||||
@@ -73,7 +73,6 @@ proc new*(
|
||||
return T(networkInterfaceProvider: networkInterfaceProvider)
|
||||
|
||||
proc getProtocolArgument*(ma: MultiAddress, codec: MultiCodec): MaResult[seq[byte]] =
|
||||
var buffer: seq[byte]
|
||||
for item in ma:
|
||||
let
|
||||
ritem = ?item
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sugar
|
||||
import pkg/stew/[results, byteutils]
|
||||
import pkg/stew/byteutils, pkg/results
|
||||
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
|
||||
|
||||
export crypto
|
||||
|
||||
63
libp2p/stream/bridgestream.nim
Normal file
63
libp2p/stream/bridgestream.nim
Normal file
@@ -0,0 +1,63 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import pkg/chronos
|
||||
import connection, bufferstream
|
||||
|
||||
export connection
|
||||
|
||||
type
|
||||
WriteHandler = proc(data: seq[byte]): Future[void] {.
|
||||
async: (raises: [CancelledError, LPStreamError])
|
||||
.}
|
||||
|
||||
BridgeStream* = ref object of BufferStream
|
||||
writeHandler: WriteHandler
|
||||
closeHandler: proc(): Future[void] {.async: (raises: []).}
|
||||
|
||||
method write*(
|
||||
s: BridgeStream, msg: seq[byte]
|
||||
): Future[void] {.public, async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
s.writeHandler(msg)
|
||||
|
||||
method closeImpl*(s: BridgeStream): Future[void] {.async: (raises: [], raw: true).} =
|
||||
if not isNil(s.closeHandler):
|
||||
discard s.closeHandler()
|
||||
|
||||
procCall BufferStream(s).closeImpl()
|
||||
|
||||
method getWrapped*(s: BridgeStream): Connection =
|
||||
nil
|
||||
|
||||
proc bridgedConnections*(
|
||||
closeTogether: bool = true, dirA = Direction.In, dirB = Direction.In
|
||||
): (BridgeStream, BridgeStream) =
|
||||
let connA = BridgeStream()
|
||||
let connB = BridgeStream()
|
||||
connA.dir = dirA
|
||||
connB.dir = dirB
|
||||
connA.initStream()
|
||||
connB.initStream()
|
||||
|
||||
connA.writeHandler = proc(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
connB.pushData(data)
|
||||
connB.writeHandler = proc(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
connA.pushData(data)
|
||||
|
||||
if closeTogether:
|
||||
connA.closeHandler = proc(): Future[void] {.async: (raises: []).} =
|
||||
await noCancel connB.close()
|
||||
connB.closeHandler = proc(): Future[void] {.async: (raises: []).} =
|
||||
await noCancel connA.close()
|
||||
|
||||
return (connA, connB)
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles, metrics
|
||||
import connection
|
||||
import ../utility
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[hashes, oids, strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronicles, chronos, metrics
|
||||
import lpstream, ../multiaddress, ../peerinfo, ../errors
|
||||
|
||||
@@ -124,7 +124,7 @@ proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
|
||||
return
|
||||
|
||||
method getWrapped*(s: Connection): Connection {.base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Connection.getWrapped] abstract method not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
proc setShortAgent*(s: Connection, shortAgent: string) =
|
||||
|
||||
@@ -133,7 +133,7 @@ method readOnce*(
|
||||
## Reads whatever is available in the stream,
|
||||
## up to `nbytes`. Will block if nothing is
|
||||
## available
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
|
||||
|
||||
proc readExactly*(
|
||||
s: LPStream, pbytes: pointer, nbytes: int
|
||||
@@ -242,7 +242,7 @@ method write*(
|
||||
async: (raises: [CancelledError, LPStreamError], raw: true), base, public
|
||||
.} =
|
||||
# Write `msg` to stream, waiting for the write to be finished
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[LPStream.write] abstract method not implemented!")
|
||||
|
||||
proc writeLp*(
|
||||
s: LPStream, msg: openArray[byte]
|
||||
|
||||
@@ -77,7 +77,7 @@ method setup*(
|
||||
return true
|
||||
|
||||
method run*(self: Service, switch: Switch) {.base, async: (raises: [CancelledError]).} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Service.run] abstract method not implemented!")
|
||||
|
||||
method stop*(
|
||||
self: Service, switch: Switch
|
||||
|
||||
122
libp2p/transports/memorymanager.nim
Normal file
122
libp2p/transports/memorymanager.nim
Normal file
@@ -0,0 +1,122 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import locks
|
||||
import tables
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import ./transport
|
||||
import ../multiaddress
|
||||
import ../stream/connection
|
||||
import ../stream/bridgestream
|
||||
|
||||
type
|
||||
MemoryTransportError* = object of transport.TransportError
|
||||
MemoryTransportAcceptStopped* = object of MemoryTransportError
|
||||
|
||||
type MemoryListener* = object
|
||||
address: string
|
||||
accept: Future[Connection]
|
||||
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].}
|
||||
|
||||
proc init(
|
||||
_: type[MemoryListener],
|
||||
address: string,
|
||||
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].},
|
||||
): MemoryListener =
|
||||
return MemoryListener(
|
||||
accept: newFuture[Connection]("MemoryListener.accept"),
|
||||
address: address,
|
||||
onListenerEnd: onListenerEnd,
|
||||
)
|
||||
|
||||
proc close*(self: MemoryListener) =
|
||||
if not (self.accept.finished):
|
||||
self.accept.fail(newException(MemoryTransportAcceptStopped, "Listener closed"))
|
||||
self.onListenerEnd(self.address)
|
||||
|
||||
proc accept*(
|
||||
self: MemoryListener
|
||||
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
|
||||
return self.accept
|
||||
|
||||
proc dial*(
|
||||
self: MemoryListener
|
||||
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
|
||||
let (connA, connB) = bridgedConnections()
|
||||
|
||||
self.onListenerEnd(self.address)
|
||||
self.accept.complete(connA)
|
||||
|
||||
let dFut = newFuture[Connection]("MemoryListener.dial")
|
||||
dFut.complete(connB)
|
||||
|
||||
return dFut
|
||||
|
||||
type memoryConnManager = ref object
|
||||
listeners: Table[string, MemoryListener]
|
||||
connections: Table[string, Connection]
|
||||
lock: Lock
|
||||
|
||||
proc init(_: type[memoryConnManager]): memoryConnManager =
|
||||
var m = memoryConnManager()
|
||||
initLock(m.lock)
|
||||
return m
|
||||
|
||||
proc onListenerEnd(
|
||||
self: memoryConnManager
|
||||
): proc(address: string) {.closure, gcsafe, raises: [].} =
|
||||
proc cb(address: string) {.closure, gcsafe, raises: [].} =
|
||||
acquire(self.lock)
|
||||
defer:
|
||||
release(self.lock)
|
||||
|
||||
try:
|
||||
if address in self.listeners:
|
||||
self.listeners.del(address)
|
||||
except KeyError:
|
||||
raiseAssert "checked with if"
|
||||
|
||||
return cb
|
||||
|
||||
proc accept*(
|
||||
self: memoryConnManager, address: string
|
||||
): MemoryListener {.raises: [MemoryTransportError].} =
|
||||
acquire(self.lock)
|
||||
defer:
|
||||
release(self.lock)
|
||||
|
||||
if address in self.listeners:
|
||||
raise newException(MemoryTransportError, "Memory address already in use")
|
||||
|
||||
let listener = MemoryListener.init(address, self.onListenerEnd())
|
||||
self.listeners[address] = listener
|
||||
|
||||
return listener
|
||||
|
||||
proc dial*(
|
||||
self: memoryConnManager, address: string
|
||||
): MemoryListener {.raises: [MemoryTransportError].} =
|
||||
acquire(self.lock)
|
||||
defer:
|
||||
release(self.lock)
|
||||
|
||||
if address notin self.listeners:
|
||||
raise newException(MemoryTransportError, "No memory listener found")
|
||||
|
||||
try:
|
||||
return self.listeners[address]
|
||||
except KeyError:
|
||||
raiseAssert "checked with if"
|
||||
|
||||
let instance: memoryConnManager = memoryConnManager.init()
|
||||
|
||||
proc getInstance*(): memoryConnManager {.gcsafe.} =
|
||||
{.gcsafe.}:
|
||||
instance
|
||||
127
libp2p/transports/memorytransport.nim
Normal file
127
libp2p/transports/memorytransport.nim
Normal file
@@ -0,0 +1,127 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## Memory transport implementation
|
||||
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import ./transport
|
||||
import ../multiaddress
|
||||
import ../stream/connection
|
||||
import ../crypto/crypto
|
||||
import ../upgrademngrs/upgrade
|
||||
import ./memorymanager
|
||||
|
||||
export connection
|
||||
export MemoryTransportError, MemoryTransportAcceptStopped
|
||||
|
||||
const MemoryAutoAddress* = "/memory/*"
|
||||
|
||||
logScope:
|
||||
topics = "libp2p memorytransport"
|
||||
|
||||
type MemoryTransport* = ref object of Transport
|
||||
rng: ref HmacDrbgContext
|
||||
connections: seq[Connection]
|
||||
listener: Opt[MemoryListener]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MemoryTransport],
|
||||
upgrade: Upgrade = Upgrade(),
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
): T =
|
||||
T(upgrader: upgrade, rng: rng)
|
||||
|
||||
proc listenAddress(self: MemoryTransport, ma: MultiAddress): MultiAddress =
|
||||
if $ma != MemoryAutoAddress:
|
||||
return ma
|
||||
|
||||
# when special address is used `/memory/*` use any free address.
|
||||
# here we assume that any random generated address will be free.
|
||||
var randomBuf: array[10, byte]
|
||||
hmacDrbgGenerate(self.rng[], randomBuf)
|
||||
|
||||
return MultiAddress.init("/memory/" & toHex(randomBuf)).get()
|
||||
|
||||
method start*(
|
||||
self: MemoryTransport, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [LPError, transport.TransportError]).} =
|
||||
if self.running:
|
||||
return
|
||||
|
||||
trace "starting memory transport on addrs", address = $addrs
|
||||
|
||||
self.addrs = addrs.mapIt(self.listenAddress(it))
|
||||
self.running = true
|
||||
|
||||
method stop*(self: MemoryTransport) {.async: (raises: []).} =
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
trace "stopping memory transport", address = $self.addrs
|
||||
self.running = false
|
||||
|
||||
# closing listener will throw interruption error to caller of accept()
|
||||
let listener = self.listener
|
||||
if listener.isSome:
|
||||
listener.get().close()
|
||||
|
||||
# end all connections
|
||||
await noCancel allFutures(self.connections.mapIt(it.close()))
|
||||
|
||||
method accept*(
|
||||
self: MemoryTransport
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
if not self.running:
|
||||
raise newException(MemoryTransportError, "Transport closed, no more connections!")
|
||||
|
||||
var listener: MemoryListener
|
||||
try:
|
||||
listener = getInstance().accept($self.addrs[0])
|
||||
self.listener = Opt.some(listener)
|
||||
let conn = await listener.accept()
|
||||
self.connections.add(conn)
|
||||
self.listener = Opt.none(MemoryListener)
|
||||
return conn
|
||||
except CancelledError as e:
|
||||
listener.close()
|
||||
raise e
|
||||
except MemoryTransportError as e:
|
||||
raise e
|
||||
except CatchableError:
|
||||
raiseAssert "should never happen"
|
||||
|
||||
method dial*(
|
||||
self: MemoryTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId),
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
try:
|
||||
let listener = getInstance().dial($ma)
|
||||
let conn = await listener.dial()
|
||||
self.connections.add(conn)
|
||||
return conn
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except MemoryTransportError as e:
|
||||
raise e
|
||||
except CatchableError:
|
||||
raiseAssert "should never happen"
|
||||
|
||||
proc dial*(
|
||||
self: MemoryTransport, ma: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)
|
||||
): Future[Connection] {.gcsafe.} =
|
||||
self.dial("", ma)
|
||||
|
||||
method handles*(self: MemoryTransport, ma: MultiAddress): bool {.gcsafe, raises: [].} =
|
||||
if procCall Transport(self).handles(ma):
|
||||
if ma.protocols.isOk:
|
||||
return Memory.match(ma)
|
||||
@@ -2,6 +2,7 @@ import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/quic
|
||||
import results
|
||||
import ../multiaddress
|
||||
import ../multicodec
|
||||
import ../stream/connection
|
||||
@@ -9,6 +10,7 @@ import ../wire
|
||||
import ../muxers/muxer
|
||||
import ../upgrademngrs/upgrade
|
||||
import ./transport
|
||||
import tls/certificate
|
||||
|
||||
export multiaddress
|
||||
export multicodec
|
||||
@@ -23,6 +25,9 @@ type
|
||||
QuicConnection = quic.Connection
|
||||
QuicTransportError* = object of transport.TransportError
|
||||
QuicTransportDialError* = object of transport.TransportDialError
|
||||
QuicTransportAcceptStopped* = object of QuicTransportError
|
||||
|
||||
const alpn = "libp2p"
|
||||
|
||||
# Stream
|
||||
type QuicStream* = ref object of P2PConnection
|
||||
@@ -81,15 +86,19 @@ method close*(session: QuicSession) {.async: (raises: []).} =
|
||||
|
||||
proc getStream*(
|
||||
session: QuicSession, direction = Direction.In
|
||||
): Future[QuicStream] {.async: (raises: [CatchableError]).} =
|
||||
var stream: Stream
|
||||
case direction
|
||||
of Direction.In:
|
||||
stream = await session.connection.incomingStream()
|
||||
of Direction.Out:
|
||||
stream = await session.connection.openStream()
|
||||
await stream.write(@[]) # QUIC streams do not exist until data is sent
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
): Future[QuicStream] {.async: (raises: [QuicTransportError]).} =
|
||||
try:
|
||||
var stream: Stream
|
||||
case direction
|
||||
of Direction.In:
|
||||
stream = await session.connection.incomingStream()
|
||||
of Direction.Out:
|
||||
stream = await session.connection.openStream()
|
||||
await stream.write(@[]) # QUIC streams do not exist until data is sent
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
except CatchableError as exc:
|
||||
# TODO: incomingStream is using {.async.} with no raises
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
|
||||
method getWrapped*(self: QuicSession): P2PConnection =
|
||||
nil
|
||||
@@ -131,19 +140,65 @@ method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
|
||||
method close*(m: QuicMuxer) {.async: (raises: []).} =
|
||||
try:
|
||||
await m.quicSession.close()
|
||||
m.handleFut.cancel()
|
||||
m.handleFut.cancelSoon()
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
|
||||
# Transport
|
||||
type QuicUpgrade = ref object of Upgrade
|
||||
|
||||
type CertGenerator =
|
||||
proc(kp: KeyPair): CertificateX509 {.gcsafe, raises: [TLSCertificateError].}
|
||||
|
||||
type QuicTransport* = ref object of Transport
|
||||
listener: Listener
|
||||
client: QuicClient
|
||||
privateKey: PrivateKey
|
||||
connections: seq[P2PConnection]
|
||||
rng: ref HmacDrbgContext
|
||||
certGenerator: CertGenerator
|
||||
|
||||
func new*(_: type QuicTransport, u: Upgrade): QuicTransport =
|
||||
QuicTransport(upgrader: QuicUpgrade(ms: u.ms))
|
||||
proc makeCertificateVerifier(): CertificateVerifier =
|
||||
proc certificateVerifier(serverName: string, certificatesDer: seq[seq[byte]]): bool =
|
||||
if certificatesDer.len != 1:
|
||||
trace "CertificateVerifier: expected one certificate in the chain",
|
||||
cert_count = certificatesDer.len
|
||||
return false
|
||||
|
||||
let cert =
|
||||
try:
|
||||
parse(certificatesDer[0])
|
||||
except CertificateParsingError as e:
|
||||
trace "CertificateVerifier: failed to parse certificate", msg = e.msg
|
||||
return false
|
||||
|
||||
return cert.verify()
|
||||
|
||||
return CustomCertificateVerifier.init(certificateVerifier)
|
||||
|
||||
proc defaultCertGenerator(
|
||||
kp: KeyPair
|
||||
): CertificateX509 {.gcsafe, raises: [TLSCertificateError].} =
|
||||
return generateX509(kp, encodingFormat = EncodingFormat.PEM)
|
||||
|
||||
proc new*(_: type QuicTransport, u: Upgrade, privateKey: PrivateKey): QuicTransport =
|
||||
return QuicTransport(
|
||||
upgrader: QuicUpgrade(ms: u.ms),
|
||||
privateKey: privateKey,
|
||||
certGenerator: defaultCertGenerator,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
_: type QuicTransport,
|
||||
u: Upgrade,
|
||||
privateKey: PrivateKey,
|
||||
certGenerator: CertGenerator,
|
||||
): QuicTransport =
|
||||
return QuicTransport(
|
||||
upgrader: QuicUpgrade(ms: u.ms),
|
||||
privateKey: privateKey,
|
||||
certGenerator: certGenerator,
|
||||
)
|
||||
|
||||
method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises: [].} =
|
||||
if not procCall Transport(transport).handles(address):
|
||||
@@ -155,12 +210,32 @@ method start*(
|
||||
) {.async: (raises: [LPError, transport.TransportError]).} =
|
||||
doAssert self.listener.isNil, "start() already called"
|
||||
#TODO handle multiple addr
|
||||
|
||||
let pubkey = self.privateKey.getPublicKey().valueOr:
|
||||
doAssert false, "could not obtain public key"
|
||||
return
|
||||
|
||||
try:
|
||||
self.listener = listen(initTAddress(addrs[0]).tryGet)
|
||||
if self.rng.isNil:
|
||||
self.rng = newRng()
|
||||
|
||||
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
|
||||
let tlsConfig = TLSConfig.init(
|
||||
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
|
||||
)
|
||||
self.client = QuicClient.init(tlsConfig, rng = self.rng)
|
||||
self.listener =
|
||||
QuicServer.init(tlsConfig, rng = self.rng).listen(initTAddress(addrs[0]).tryGet)
|
||||
await procCall Transport(self).start(addrs)
|
||||
self.addrs[0] =
|
||||
MultiAddress.init(self.listener.localAddress(), IPPROTO_UDP).tryGet() &
|
||||
MultiAddress.init("/quic-v1").get()
|
||||
except QuicConfigError as exc:
|
||||
doAssert false, "invalid quic setup: " & $exc.msg
|
||||
except TLSCertificateError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
except QuicError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
self.running = true
|
||||
@@ -174,50 +249,69 @@ method stop*(transport: QuicTransport) {.async: (raises: []).} =
|
||||
await transport.listener.stop()
|
||||
except CatchableError as exc:
|
||||
trace "Error shutting down Quic transport", description = exc.msg
|
||||
transport.listener.destroy()
|
||||
transport.running = false
|
||||
transport.listener = nil
|
||||
|
||||
proc wrapConnection(
|
||||
transport: QuicTransport, connection: QuicConnection
|
||||
): P2PConnection {.raises: [Defect, TransportOsError, LPError].} =
|
||||
): QuicSession {.raises: [TransportOsError, MaError].} =
|
||||
let
|
||||
remoteAddr = connection.remoteAddress()
|
||||
observedAddr =
|
||||
MultiAddress.init(remoteAddr, IPPROTO_UDP).get() &
|
||||
MultiAddress.init("/quic-v1").get()
|
||||
conres = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
|
||||
conres.initStream()
|
||||
session = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
|
||||
|
||||
session.initStream()
|
||||
|
||||
transport.connections.add(session)
|
||||
|
||||
transport.connections.add(conres)
|
||||
proc onClose() {.async: (raises: []).} =
|
||||
await noCancel conres.join()
|
||||
transport.connections.keepItIf(it != conres)
|
||||
await noCancel session.join()
|
||||
transport.connections.keepItIf(it != session)
|
||||
trace "Cleaned up client"
|
||||
|
||||
asyncSpawn onClose()
|
||||
return conres
|
||||
|
||||
return session
|
||||
|
||||
method accept*(
|
||||
self: QuicTransport
|
||||
): Future[P2PConnection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
): Future[connection.Connection] {.
|
||||
async: (raises: [transport.TransportError, CancelledError])
|
||||
.} =
|
||||
doAssert not self.listener.isNil, "call start() before calling accept()"
|
||||
|
||||
if not self.running:
|
||||
# stop accept only when transport is stopped (not when error occurs)
|
||||
raise newException(QuicTransportAcceptStopped, "Quic transport stopped")
|
||||
|
||||
try:
|
||||
let connection = await self.listener.accept()
|
||||
return self.wrapConnection(connection)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise (ref QuicTransportError)(msg: e.msg, parent: e)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except QuicError as exc:
|
||||
debug "Quic Error", description = exc.msg
|
||||
except MaError as exc:
|
||||
debug "Multiaddr Error", description = exc.msg
|
||||
except CatchableError as exc: # TODO: removing this requires async/raises in nim-quic
|
||||
info "Unexpected error accepting quic connection", description = exc.msg
|
||||
except TransportOsError as exc:
|
||||
debug "OS Error", description = exc.msg
|
||||
|
||||
method dial*(
|
||||
self: QuicTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId),
|
||||
): Future[P2PConnection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
): Future[connection.Connection] {.
|
||||
async: (raises: [transport.TransportError, CancelledError])
|
||||
.} =
|
||||
try:
|
||||
let connection = await dial(initTAddress(address).tryGet)
|
||||
return self.wrapConnection(connection)
|
||||
let quicConnection = await self.client.dial(initTAddress(address).tryGet)
|
||||
return self.wrapConnection(quicConnection)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
@@ -227,8 +321,13 @@ method upgrade*(
|
||||
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.async: (raises: [CancelledError, LPError]).} =
|
||||
let qs = QuicSession(conn)
|
||||
if peerId.isSome:
|
||||
qs.peerId = peerId.get()
|
||||
qs.peerId =
|
||||
if peerId.isSome:
|
||||
peerId.get()
|
||||
else:
|
||||
let certificates = qs.connection.certificates()
|
||||
let cert = parse(certificates[0])
|
||||
cert.peerId()
|
||||
|
||||
let muxer = QuicMuxer(quicSession: qs, connection: conn)
|
||||
muxer.streamHandler = proc(conn: P2PConnection) {.async: (raises: []).} =
|
||||
|
||||
1091
libp2p/transports/tls/certificate.c
Normal file
1091
libp2p/transports/tls/certificate.c
Normal file
File diff suppressed because it is too large
Load Diff
206
libp2p/transports/tls/certificate.h
Normal file
206
libp2p/transports/tls/certificate.h
Normal file
@@ -0,0 +1,206 @@
|
||||
#ifndef LIBP2P_CERT_H
|
||||
#define LIBP2P_CERT_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct cert_context_s *cert_context_t;
|
||||
|
||||
typedef struct cert_key_s *cert_key_t;
|
||||
|
||||
typedef int32_t cert_error_t;
|
||||
|
||||
#define CERT_SUCCESS 0
|
||||
#define CERT_ERROR_NULL_PARAM -1
|
||||
#define CERT_ERROR_MEMORY -2
|
||||
#define CERT_ERROR_DRBG_INIT -3
|
||||
#define CERT_ERROR_DRBG_CONFIG -4
|
||||
#define CERT_ERROR_DRBG_SEED -5
|
||||
#define CERT_ERROR_KEY_GEN -6
|
||||
#define CERT_ERROR_CERT_GEN -7
|
||||
#define CERT_ERROR_EXTENSION_GEN -8
|
||||
#define CERT_ERROR_EXTENSION_ADD -9
|
||||
#define CERT_ERROR_EXTENSION_DATA -10
|
||||
#define CERT_ERROR_BIO_GEN -11
|
||||
#define CERT_ERROR_SIGN -12
|
||||
#define CERT_ERROR_ENCODING -13
|
||||
#define CERT_ERROR_PARSE -14
|
||||
#define CERT_ERROR_RAND -15
|
||||
#define CERT_ERROR_ECKEY_GEN -16
|
||||
#define CERT_ERROR_BIGNUM_CONV -17
|
||||
#define CERT_ERROR_SET_KEY -18
|
||||
#define CERT_ERROR_VALIDITY_PERIOD -19
|
||||
#define CERT_ERROR_BIO_WRITE -20
|
||||
#define CERT_ERROR_SERIAL_WRITE -21
|
||||
#define CERT_ERROR_EVP_PKEY_EC_KEY -22
|
||||
#define CERT_ERROR_X509_VER -23
|
||||
#define CERT_ERROR_BIGNUM_GEN -24
|
||||
#define CERT_ERROR_X509_NAME -25
|
||||
#define CERT_ERROR_X509_CN -26
|
||||
#define CERT_ERROR_X509_SUBJECT -27
|
||||
#define CERT_ERROR_X509_ISSUER -28
|
||||
#define CERT_ERROR_AS1_TIME_GEN -29
|
||||
#define CERT_ERROR_PUBKEY_SET -30
|
||||
#define CERT_ERROR_AS1_OCTET -31
|
||||
#define CERT_ERROR_X509_READ -32
|
||||
#define CERT_ERROR_PUBKEY_GET -33
|
||||
#define CERT_ERROR_EXTENSION_NOT_FOUND -34
|
||||
#define CERT_ERROR_EXTENSION_GET -35
|
||||
#define CERT_ERROR_DECODE_SEQUENCE -36
|
||||
#define CERT_ERROR_NOT_ENOUGH_SEQ_ELEMS -37
|
||||
#define CERT_ERROR_NOT_OCTET_STR -38
|
||||
#define CERT_ERROR_NID -39
|
||||
#define CERT_ERROR_PUBKEY_DER_LEN -40
|
||||
#define CERT_ERROR_PUBKEY_DER_CONV -41
|
||||
#define CERT_ERROR_INIT_KEYGEN -42
|
||||
#define CERT_ERROR_SET_CURVE -43
|
||||
#define CERT_ERROR_X509_REQ_GEN -44
|
||||
#define CERT_ERROR_X509_REQ_DER -45
|
||||
#define CERT_ERROR_NO_PUBKEY -46
|
||||
#define CERT_ERROR_X509_SAN -47
|
||||
#define CERT_ERROR_CN_TOO_LONG -48
|
||||
#define CERT_ERROR_CN_LABEL_TOO_LONG -49
|
||||
#define CERT_ERROR_CN_EMPTY_LABEL -50
|
||||
#define CERT_ERROR_CN_EMPTY -51
|
||||
|
||||
typedef enum { CERT_FORMAT_DER = 0, CERT_FORMAT_PEM = 1 } cert_format_t;
|
||||
|
||||
/* Buffer structure for raw key data */
|
||||
typedef struct {
|
||||
unsigned char *data; /* data buffer */
|
||||
size_t len; /* Length of data */
|
||||
} cert_buffer;
|
||||
|
||||
/* Struct to hold the parsed certificate data */
|
||||
typedef struct {
|
||||
cert_buffer *signature;
|
||||
cert_buffer *ident_pubk;
|
||||
cert_buffer *cert_pubkey;
|
||||
char *valid_from;
|
||||
char *valid_to;
|
||||
} cert_parsed;
|
||||
|
||||
/**
|
||||
* Initialize the CTR-DRBG for cryptographic operations
|
||||
* This function creates and initializes a CTR-DRBG context using
|
||||
* the provided seed for entropy. The DRBG is configured to use
|
||||
* AES-256-CTR as the underlying cipher.
|
||||
*
|
||||
* @param seed A null-terminated string used to seed the DRBG. Must not be NULL.
|
||||
* @param ctx Pointer to a context pointer that will be allocated and
|
||||
* initialized. The caller is responsible for eventually freeing this context
|
||||
* with the appropriate cleanup function.
|
||||
*
|
||||
* @return CERT_SUCCESS on successful initialization, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
|
||||
cert_context_t *ctx);
|
||||
|
||||
/**
|
||||
* Generate an EC key pair for use with certificates
|
||||
*
|
||||
* @param ctx Context pointer obtained from `cert_init_drbg`
|
||||
* @param out Pointer to store the generated key
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out);
|
||||
|
||||
/**
|
||||
* Serialize a key's private key to a format
|
||||
*
|
||||
* @param key The key to export
|
||||
* @param out Pointer to a buffer structure that will be populated with the key
|
||||
* @param format output format
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
|
||||
cert_format_t format);
|
||||
|
||||
/**
|
||||
* Serialize a key's public key to a format
|
||||
*
|
||||
* @param key The key to export
|
||||
* @param out Pointer to a buffer structure that will be populated with the key
|
||||
* @param format output format
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_serialize_pubk(cert_key_t key, cert_buffer **out,
|
||||
cert_format_t format);
|
||||
|
||||
/**
|
||||
* Generate a self-signed X.509 certificate with libp2p extension
|
||||
*
|
||||
* @param ctx Context pointer obtained from `cert_init_drbg`
|
||||
* @param key Key to use
|
||||
* @param out Pointer to a buffer that will be populated with a certificate
|
||||
* @param signature buffer that contains a signature
|
||||
* @param ident_pubk buffer that contains the bytes of an identity pubk
|
||||
* @param common_name Common name to use for the certificate subject/issuer
|
||||
* @param validFrom Date from which certificate is issued
|
||||
* @param validTo Date to which certificate is issued
|
||||
* @param format Certificate format
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_generate(cert_context_t ctx, cert_key_t key,
|
||||
cert_buffer **out, cert_buffer *signature,
|
||||
cert_buffer *ident_pubk, const char *cn,
|
||||
const char *validFrom, const char *validTo,
|
||||
cert_format_t format);
|
||||
|
||||
/**
|
||||
* Parse a certificate to extract the custom extension and public key
|
||||
*
|
||||
* @param cert Buffer containing the certificate data
|
||||
* @param format Certificate format
|
||||
* @param cert_parsed Pointer to a structure containing the parsed
|
||||
* certificate data.
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_parse(cert_buffer *cert, cert_format_t format,
|
||||
cert_parsed **out);
|
||||
|
||||
/**
|
||||
* Free all resources associated with a CTR-DRBG context
|
||||
*
|
||||
* @param ctx The context to free
|
||||
*/
|
||||
void cert_free_ctr_drbg(cert_context_t ctx);
|
||||
|
||||
/**
|
||||
* Free memory allocated for a parsed certificate
|
||||
*
|
||||
* @param cert Pointer to the parsed certificate structure
|
||||
*/
|
||||
void cert_free_parsed(cert_parsed *cert);
|
||||
|
||||
/**
|
||||
* Free all resources associated with a key
|
||||
*
|
||||
* @param key The key to free
|
||||
*/
|
||||
void cert_free_key(cert_key_t key);
|
||||
|
||||
/**
|
||||
* Free memory allocated for a buffer
|
||||
*
|
||||
* @param buffer Pointer to the buffer structure
|
||||
*/
|
||||
void cert_free_buffer(cert_buffer *buffer);
|
||||
|
||||
/**
|
||||
* Create a X.509 certificate request
|
||||
*
|
||||
* @param cn Domain for which we're requesting the certificate
|
||||
* @param key Public key of the requesting client
|
||||
* @param csr_buffer Pointer to the buffer that will be set to the CSR in DER format
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_signing_req(const char *cn, cert_key_t key, cert_buffer **csr_buffer);
|
||||
|
||||
#endif /* LIBP2P_CERT_H */
|
||||
@@ -7,46 +7,26 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import std/[sequtils, strutils, exitprocs]
|
||||
import std/[sequtils, exitprocs]
|
||||
|
||||
import strutils
|
||||
import times
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
|
||||
import mbedtls/pk
|
||||
import mbedtls/ctr_drbg as ctr_drbg_module
|
||||
import mbedtls/entropy as entropy_module
|
||||
import mbedtls/ecp
|
||||
import mbedtls/sha256
|
||||
import mbedtls/md
|
||||
import mbedtls/asn1
|
||||
import mbedtls/asn1write
|
||||
import mbedtls/x509
|
||||
import mbedtls/x509_crt
|
||||
import mbedtls/oid
|
||||
import mbedtls/debug
|
||||
import mbedtls/error
|
||||
import nimcrypto/utils
|
||||
import ../../crypto/crypto
|
||||
import ../../errors
|
||||
import ./certificate_ffi
|
||||
import ../../../libp2p/peerid
|
||||
|
||||
logScope:
|
||||
topics = "libp2p tls certificate"
|
||||
|
||||
# Constants and OIDs
|
||||
const
|
||||
P2P_SIGNING_PREFIX = "libp2p-tls-handshake:"
|
||||
SIGNATURE_ALG = MBEDTLS_MD_SHA256
|
||||
EC_GROUP_ID = MBEDTLS_ECP_DP_SECP256R1
|
||||
LIBP2P_EXT_OID_DER: array[10, byte] =
|
||||
[0x2B, 0x06, 0x01, 0x04, 0x01, 0x83, 0xA2, 0x5A, 0x01, 0x01]
|
||||
# "1.3.6.1.4.1.53594.1.1"
|
||||
|
||||
# Exception types for TLS certificate errors
|
||||
type
|
||||
TLSCertificateError* = object of LPError
|
||||
ASN1EncodingError* = object of TLSCertificateError
|
||||
KeyGenerationError* = object of TLSCertificateError
|
||||
CertificateCreationError* = object of TLSCertificateError
|
||||
CertificatePubKeySerializationError* = object of TLSCertificateError
|
||||
CertificateParsingError* = object of TLSCertificateError
|
||||
IdentityPubKeySerializationError* = object of TLSCertificateError
|
||||
IdentitySigningError* = object of TLSCertificateError
|
||||
@@ -58,184 +38,129 @@ type
|
||||
signature*: seq[byte]
|
||||
|
||||
P2pCertificate* = object
|
||||
certificate*: mbedtls_x509_crt
|
||||
extension*: P2pExtension
|
||||
pubKeyDer: seq[byte]
|
||||
validFrom: Time
|
||||
validTo: Time
|
||||
|
||||
CertificateX509* = object
|
||||
certificate*: seq[byte]
|
||||
# Complete ASN.1 DER content (certificate, signature algorithm and signature).
|
||||
privateKey*: seq[byte] # Private key used to sign certificate
|
||||
|
||||
type EncodingFormat* = enum
|
||||
DER
|
||||
PEM
|
||||
|
||||
proc ptrInc*(p: ptr byte, n: uint): ptr byte =
|
||||
## Utility function to increment a pointer by n bytes.
|
||||
cast[ptr byte](cast[uint](p) + n)
|
||||
proc cert_format_t(self: EncodingFormat): cert_format_t =
|
||||
if self == EncodingFormat.DER: CERT_FORMAT_DER else: CERT_FORMAT_PEM
|
||||
|
||||
proc toCertBuffer*(self: seq[uint8]): cert_buffer =
|
||||
cert_buffer(data: self[0].unsafeAddr, length: self.len.csize_t)
|
||||
|
||||
proc toSeq*(self: ptr cert_buffer): seq[byte] =
|
||||
toOpenArray(cast[ptr UncheckedArray[byte]](self.data), 0, self.length.int - 1).toSeq()
|
||||
|
||||
# Initialize entropy and DRBG contexts at the module level
|
||||
var
|
||||
entropy: mbedtls_entropy_context
|
||||
ctrDrbg: mbedtls_ctr_drbg_context
|
||||
cert_ctx: cert_context_t = nil
|
||||
drbgInitialized = false
|
||||
|
||||
func publicKey*(cert: P2pCertificate): PublicKey =
|
||||
return PublicKey.init(cert.extension.publicKey).get()
|
||||
|
||||
func peerId*(cert: P2pCertificate): PeerId =
|
||||
return PeerId.init(cert.publicKey()).tryGet()
|
||||
|
||||
proc initializeDRBG() {.raises: [KeyGenerationError].} =
|
||||
## Function to initialize entropy and DRBG context if not already initialized.
|
||||
if not drbgInitialized:
|
||||
mbedtls_entropy_init(addr entropy)
|
||||
mbedtls_ctr_drbg_init(addr ctrDrbg)
|
||||
|
||||
# Seed the random number generator
|
||||
let personalization = "libp2p_tls"
|
||||
let ret = mbedtls_ctr_drbg_seed(
|
||||
addr ctrDrbg,
|
||||
mbedtls_entropy_func,
|
||||
addr entropy,
|
||||
cast[ptr byte](personalization.cstring),
|
||||
personalization.len.uint,
|
||||
let ret = cert_init_drbg(
|
||||
personalization.cstring, personalization.len.csize_t, addr cert_ctx
|
||||
)
|
||||
if ret != 0:
|
||||
if ret != CERT_SUCCESS:
|
||||
raise newException(KeyGenerationError, "Failed to seed CTR_DRBG")
|
||||
drbgInitialized = true
|
||||
|
||||
proc cleanupDRBG() =
|
||||
## Function to free entropy and DRBG context.
|
||||
if drbgInitialized:
|
||||
mbedtls_ctr_drbg_free(addr ctrDrbg)
|
||||
mbedtls_entropy_free(addr entropy)
|
||||
cert_free_ctr_drbg(cert_ctx)
|
||||
drbgInitialized = false
|
||||
|
||||
# Register cleanup function to free entropy and DRBG context
|
||||
addExitProc(cleanupDRBG)
|
||||
|
||||
proc generateSignedKey(
|
||||
signature: seq[byte], pubKey: seq[byte]
|
||||
): seq[byte] {.raises: [ASN1EncodingError].} =
|
||||
## Generates the ASN.1-encoded SignedKey structure.
|
||||
func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
|
||||
## Creates message used for certificate signature.
|
||||
##
|
||||
## The SignedKey structure contains the public key and its signature,
|
||||
## encoded as a SEQUENCE of two OCTET STRINGs.
|
||||
##
|
||||
## Parameters:
|
||||
## - `signature`: The signature bytes.
|
||||
## - `pubKey`: The public key bytes.
|
||||
##
|
||||
## Returns:
|
||||
## A sequence of bytes representing the ASN.1-encoded SignedKey.
|
||||
##
|
||||
## Raises:
|
||||
## - `ASN1EncodingError` if ASN.1 encoding fails.
|
||||
const extValueSize = 256 # Buffer size for ASN.1 encoding
|
||||
var
|
||||
extValue: array[extValueSize, byte]
|
||||
extPtr: ptr byte = addr extValue[extValueSize - 1]
|
||||
# Start at the end of the buffer as mbedtls_asn1_write_octet_string works backwards in data buffer.
|
||||
startPtr: ptr byte = addr extValue[0]
|
||||
len = 0
|
||||
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
|
||||
let prefixLen = P2P_SIGNING_PREFIX.len.int
|
||||
let msg = newSeq[byte](prefixLen + pubKey.len)
|
||||
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
|
||||
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)
|
||||
|
||||
# Write signature OCTET STRING
|
||||
let retSig = mbedtls_asn1_write_octet_string(
|
||||
addr extPtr, startPtr, unsafeAddr signature[0], signature.len.uint
|
||||
)
|
||||
if retSig < 0:
|
||||
raise newException(ASN1EncodingError, "Failed to write signature OCTET STRING")
|
||||
len += retSig
|
||||
return msg
|
||||
|
||||
# Write publicKey OCTET STRING
|
||||
let retPub = mbedtls_asn1_write_octet_string(
|
||||
addr extPtr, startPtr, unsafeAddr pubKey[0], pubKey.len.uint
|
||||
)
|
||||
if retPub < 0:
|
||||
raise newException(ASN1EncodingError, "Failed to write publicKey OCTET STRING")
|
||||
len += retPub
|
||||
func makeIssuerDN(identityKeyPair: KeyPair): string {.inline.} =
|
||||
let issuerDN =
|
||||
try:
|
||||
"CN=" & $(PeerId.init(identityKeyPair.pubkey).tryGet())
|
||||
except LPError:
|
||||
raiseAssert "pubkey must be set"
|
||||
|
||||
# Total length of the SEQUENCE contents
|
||||
let contentLen = retSig + retPub
|
||||
# Write SEQUENCE length
|
||||
let retLen = mbedtls_asn1_write_len(addr extPtr, startPtr, contentLen.uint)
|
||||
if retLen < 0:
|
||||
raise newException(ASN1EncodingError, "Failed to write SEQUENCE length")
|
||||
len += retLen
|
||||
return issuerDN
|
||||
|
||||
# Write SEQUENCE tag
|
||||
let retTag = mbedtls_asn1_write_tag(
|
||||
addr extPtr, startPtr, MBEDTLS_ASN1_CONSTRUCTED or MBEDTLS_ASN1_SEQUENCE
|
||||
)
|
||||
if retTag < 0:
|
||||
raise newException(ASN1EncodingError, "Failed to write SEQUENCE tag")
|
||||
len += retTag
|
||||
proc makeASN1Time(time: Time): string {.inline.} =
|
||||
let str =
|
||||
try:
|
||||
let f = initTimeFormat("yyyyMMddhhmmss")
|
||||
format(time.utc(), f)
|
||||
except TimeFormatParseError:
|
||||
raiseAssert "time format is const and checked with test"
|
||||
|
||||
# Calculate dataOffset based on the accumulated length
|
||||
let dataOffset = extValueSize - len - 1
|
||||
return str & "Z"
|
||||
|
||||
# Extract the relevant portion of extValue as a seq[byte]
|
||||
let extValueSeq = toSeq(extValue[dataOffset ..< extValueSize])
|
||||
|
||||
# Return the extension content
|
||||
return extValueSeq
|
||||
|
||||
proc makeLibp2pExtension(
|
||||
identityKeypair: KeyPair, certificateKeypair: mbedtls_pk_context
|
||||
): seq[byte] {.
|
||||
proc makeExtValues(
|
||||
identityKeypair: KeyPair, certKey: cert_key_t
|
||||
): tuple[signature: cert_buffer, pubkey: cert_buffer] {.
|
||||
raises: [
|
||||
CertificateCreationError, IdentityPubKeySerializationError, IdentitySigningError,
|
||||
ASN1EncodingError, TLSCertificateError,
|
||||
CertificatePubKeySerializationError, IdentitySigningError,
|
||||
IdentityPubKeySerializationError,
|
||||
]
|
||||
.} =
|
||||
## Creates the libp2p extension containing the SignedKey.
|
||||
##
|
||||
## The libp2p extension is an ASN.1-encoded structure that includes
|
||||
## the public key and its signature over the certificate's public key.
|
||||
## Creates the buffers to be used for writing the libp2p extension
|
||||
##
|
||||
## Parameters:
|
||||
## - `identityKeypair`: The peer's identity key pair.
|
||||
## - `certificateKeypair`: The key pair used for the certificate.
|
||||
## - `certificateKey`: The key used for the certificate.
|
||||
##
|
||||
## Returns:
|
||||
## A sequence of bytes representing the libp2p extension.
|
||||
##
|
||||
## Raises:
|
||||
## - `CertificateCreationError` if public key serialization fails.
|
||||
## - `IdentitySigningError` if signing the message fails.
|
||||
## - `CertificatePubKeySerializationError` if serialization of certificate public key fails
|
||||
## - `IdentityPubKeySerializationError` if serialization of identity public key fails.
|
||||
## - `IdentitySigningError` if signing the hash fails.
|
||||
## - `ASN1EncodingError` if ASN.1 encoding fails.
|
||||
|
||||
# Serialize the Certificate's Public Key
|
||||
var
|
||||
certPubKeyDer: array[512, byte]
|
||||
certPubKeyDerLen: cint
|
||||
|
||||
certPubKeyDerLen = mbedtls_pk_write_pubkey_der(
|
||||
unsafeAddr certificateKeypair, addr certPubKeyDer[0], certPubKeyDer.len.uint
|
||||
)
|
||||
if certPubKeyDerLen < 0:
|
||||
var derCert: ptr cert_buffer = nil
|
||||
let ret = cert_serialize_pubk(certKey, derCert.addr, DER.cert_format_t())
|
||||
if ret != CERT_SUCCESS:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to write certificate public key in DER format"
|
||||
CertificatePubKeySerializationError, "Failed to serialize the certificate pubkey"
|
||||
)
|
||||
|
||||
# Adjust pointer to the start of the data
|
||||
let certPubKeyDerPtr = addr certPubKeyDer[certPubKeyDer.len - certPubKeyDerLen]
|
||||
let certificatePubKeyDer = derCert.toSeq()
|
||||
|
||||
# Create the Message to Sign
|
||||
var msg = newSeq[byte](P2P_SIGNING_PREFIX.len + certPubKeyDerLen.int.int)
|
||||
let msg = makeSignatureMessage(certificatePubKeyDer)
|
||||
|
||||
# Copy the prefix into msg
|
||||
for i in 0 ..< P2P_SIGNING_PREFIX.len:
|
||||
msg[i] = byte(P2P_SIGNING_PREFIX[i])
|
||||
|
||||
# Copy the public key DER into msg
|
||||
copyMem(addr msg[P2P_SIGNING_PREFIX.len], certPubKeyDerPtr, certPubKeyDerLen.int)
|
||||
|
||||
# Compute SHA-256 hash of the message
|
||||
var hash: array[32, byte]
|
||||
let hashRet = mbedtls_sha256(
|
||||
msg[0].addr, msg.len.uint, addr hash[0], 0 # 0 for SHA-256
|
||||
)
|
||||
if hashRet != 0:
|
||||
# Since hashing failure is critical and unlikely, we can raise a general exception
|
||||
raise newException(TLSCertificateError, "Failed to compute SHA-256 hash")
|
||||
|
||||
# Sign the hash with the Identity Key
|
||||
let signatureResult = identityKeypair.seckey.sign(hash)
|
||||
# Sign the message with the Identity Key
|
||||
let signatureResult = identityKeypair.seckey.sign(msg)
|
||||
if signatureResult.isErr:
|
||||
raise newException(
|
||||
IdentitySigningError, "Failed to sign the hash with the identity key"
|
||||
IdentitySigningError, "Failed to sign the message with the identity key"
|
||||
)
|
||||
let signature = signatureResult.get().data
|
||||
|
||||
@@ -247,285 +172,79 @@ proc makeLibp2pExtension(
|
||||
)
|
||||
let pubKeyBytes = pubKeyBytesResult.get()
|
||||
|
||||
# Generate the SignedKey ASN.1 structure
|
||||
return generateSignedKey(signature, pubKeyBytes)
|
||||
return (signature.toCertBuffer(), pubKeyBytes.toCertBuffer())
|
||||
|
||||
proc generate*(
|
||||
identityKeyPair: KeyPair, encodingFormat: EncodingFormat = EncodingFormat.DER
|
||||
): (seq[byte], seq[byte]) {.
|
||||
proc generateX509*(
|
||||
identityKeyPair: KeyPair,
|
||||
validFrom: Time = fromUnix(157813200),
|
||||
validTo: Time = fromUnix(67090165200),
|
||||
encodingFormat: EncodingFormat = EncodingFormat.DER,
|
||||
): CertificateX509 {.
|
||||
raises: [
|
||||
KeyGenerationError, CertificateCreationError, ASN1EncodingError,
|
||||
IdentityPubKeySerializationError, IdentitySigningError, TLSCertificateError,
|
||||
KeyGenerationError, IdentitySigningError, IdentityPubKeySerializationError,
|
||||
CertificateCreationError, CertificatePubKeySerializationError,
|
||||
]
|
||||
.} =
|
||||
## Generates a self-signed X.509 certificate with the libp2p extension.
|
||||
##
|
||||
## Parameters:
|
||||
## - `identityKeyPair`: The peer's identity key pair.
|
||||
## - `encodingFormat`: The encoding format of generated certificate.
|
||||
##
|
||||
## Returns:
|
||||
## A tuple containing:
|
||||
## - The certificate.
|
||||
## - The private key.
|
||||
## - `raw` - The certificate content (encoded using encodingFormat).
|
||||
## - `privateKey` - The private key.
|
||||
##
|
||||
## Raises:
|
||||
## - `KeyGenerationError` if key generation fails.
|
||||
## - `CertificateCreationError` if certificate creation fails.
|
||||
## - `ASN1EncodingError` if encoding fails.
|
||||
|
||||
# Ensure DRBG contexts are initialized
|
||||
initializeDRBG()
|
||||
var
|
||||
crt: mbedtls_x509write_cert
|
||||
certKey: mbedtls_pk_context
|
||||
ret: cint
|
||||
|
||||
mbedtls_entropy_init(addr entropy)
|
||||
mbedtls_ctr_drbg_init(addr ctrDrbg)
|
||||
mbedtls_x509write_crt_init(addr crt)
|
||||
mbedtls_pk_init(addr certKey)
|
||||
|
||||
defer:
|
||||
mbedtls_entropy_free(addr entropy)
|
||||
mbedtls_ctr_drbg_free(addr ctrDrbg)
|
||||
mbedtls_pk_free(addr certKey)
|
||||
mbedtls_x509write_crt_free(addr crt)
|
||||
|
||||
# Seed the random number generator
|
||||
let personalization = "libp2p_tls"
|
||||
ret = mbedtls_ctr_drbg_seed(
|
||||
addr ctrDrbg,
|
||||
mbedtls_entropy_func,
|
||||
addr entropy,
|
||||
cast[ptr byte](personalization.cstring),
|
||||
personalization.len.uint,
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(KeyGenerationError, "Failed to seed CTR_DRBG")
|
||||
|
||||
# Initialize certificate key
|
||||
ret = mbedtls_pk_setup(addr certKey, mbedtls_pk_info_from_type(MBEDTLS_PK_ECKEY))
|
||||
if ret != 0:
|
||||
raise newException(KeyGenerationError, "Failed to set up certificate key context")
|
||||
|
||||
# Generate key pair for the certificate
|
||||
let G =
|
||||
try:
|
||||
mb_pk_ec(certKey)
|
||||
except MbedTLSError as e:
|
||||
raise newException(KeyGenerationError, e.msg)
|
||||
ret = mbedtls_ecp_gen_key(EC_GROUP_ID, G, mbedtls_ctr_drbg_random, addr ctrDrbg)
|
||||
if ret != 0:
|
||||
var certKey: cert_key_t
|
||||
var ret = cert_generate_key(cert_ctx, certKey.addr)
|
||||
if ret != CERT_SUCCESS:
|
||||
raise
|
||||
newException(KeyGenerationError, "Failed to generate EC key pair for certificate")
|
||||
newException(KeyGenerationError, "Failed to generate certificate key - " & $ret)
|
||||
|
||||
## Initialize libp2p extension
|
||||
let libp2pExtension = makeLibp2pExtension(identityKeyPair, certKey)
|
||||
let issuerDN = makeIssuerDN(identityKeyPair)
|
||||
let libp2pExtension = makeExtValues(identityKeyPair, certKey)
|
||||
let validFromAsn1 = makeASN1Time(validFrom)
|
||||
let validToAsn1 = makeASN1Time(validTo)
|
||||
var certificate: ptr cert_buffer = nil
|
||||
|
||||
# Set the Subject and Issuer Name (self-signed)
|
||||
ret = mbedtls_x509write_crt_set_subject_name(addr crt, "CN=libp2p.io")
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to set subject name")
|
||||
|
||||
ret = mbedtls_x509write_crt_set_issuer_name(addr crt, "CN=libp2p.io")
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to set issuer name")
|
||||
|
||||
# Set Validity Period
|
||||
let notBefore = "19750101000000"
|
||||
let notAfter = "40960101000000"
|
||||
ret =
|
||||
mbedtls_x509write_crt_set_validity(addr crt, notBefore.cstring, notAfter.cstring)
|
||||
if ret != 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to set certificate validity period"
|
||||
)
|
||||
|
||||
# Assign the Public Key to the Certificate
|
||||
mbedtls_x509write_crt_set_subject_key(addr crt, addr certKey)
|
||||
mbedtls_x509write_crt_set_issuer_key(addr crt, addr certKey) # Self-signed
|
||||
|
||||
# Add the libp2p Extension
|
||||
let oid = string.fromBytes(LIBP2P_EXT_OID_DER)
|
||||
ret = mbedtls_x509write_crt_set_extension(
|
||||
addr crt,
|
||||
oid, # OID
|
||||
oid.len.uint, # OID length
|
||||
0, # Critical flag
|
||||
unsafeAddr libp2pExtension[0], # Extension data
|
||||
libp2pExtension.len.uint, # Extension data length
|
||||
ret = cert_generate(
|
||||
cert_ctx, certKey, certificate.addr, libp2pExtension.signature.unsafeAddr,
|
||||
libp2pExtension.pubkey.unsafeAddr, issuerDN.cstring, validFromAsn1.cstring,
|
||||
validToAsn1.cstring, encodingFormat.cert_format_t,
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to set libp2p extension in certificate"
|
||||
)
|
||||
if ret != CERT_SUCCESS:
|
||||
raise
|
||||
newException(CertificateCreationError, "Failed to generate certificate - " & $ret)
|
||||
|
||||
# Set Basic Constraints (optional, e.g., CA:FALSE)
|
||||
ret = mbedtls_x509write_crt_set_basic_constraints(
|
||||
addr crt,
|
||||
0, # is_ca
|
||||
-1, # max_pathlen (-1 for no limit)
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to set basic constraints")
|
||||
var privKDer: ptr cert_buffer = nil
|
||||
ret = cert_serialize_privk(certKey, privKDer.addr, encodingFormat.cert_format_t)
|
||||
if ret != CERT_SUCCESS:
|
||||
raise newException(KeyGenerationError, "Failed to serialize privK - " & $ret)
|
||||
|
||||
# Set Key Usage
|
||||
ret = mbedtls_x509write_crt_set_key_usage(
|
||||
addr crt, MBEDTLS_X509_KU_DIGITAL_SIGNATURE or MBEDTLS_X509_KU_KEY_ENCIPHERMENT
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to set key usage")
|
||||
let outputCertificate = certificate.toSeq()
|
||||
let outputPrivateKey = privKDer.toSeq()
|
||||
|
||||
# Set the MD algorithm
|
||||
mbedtls_x509write_crt_set_md_alg(addr crt, SIGNATURE_ALG)
|
||||
cert_free_buffer(certificate)
|
||||
cert_free_buffer(privKDer)
|
||||
|
||||
# Generate a random serial number
|
||||
const SERIAL_LEN = 20
|
||||
var serialBuffer: array[SERIAL_LEN, byte]
|
||||
ret = mbedtls_ctr_drbg_random(addr ctrDrbg, addr serialBuffer[0], SERIAL_LEN)
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to generate serial number")
|
||||
return CertificateX509(certificate: outputCertificate, privateKey: outputPrivateKey)
|
||||
|
||||
# Set the serial number
|
||||
ret = mbedtls_x509write_crt_set_serial_raw(addr crt, addr serialBuffer[0], SERIAL_LEN)
|
||||
if ret != 0:
|
||||
raise newException(CertificateCreationError, "Failed to set serial number")
|
||||
proc parseCertTime*(certTime: string): Time {.raises: [TimeParseError].} =
|
||||
var timeNoZone = certTime[0 ..^ 5] # removes GMT part
|
||||
# days with 1 digit have additional space -> strip it
|
||||
timeNoZone = timeNoZone.replace(" ", " ")
|
||||
|
||||
# Prepare Buffer for Certificate Serialization
|
||||
const CERT_BUFFER_SIZE = 4096
|
||||
var certBuffer: array[CERT_BUFFER_SIZE, byte]
|
||||
var outputCertificate: seq[byte]
|
||||
|
||||
if encodingFormat == EncodingFormat.DER:
|
||||
let certLen: cint = mbedtls_x509write_crt_der(
|
||||
addr crt,
|
||||
addr certBuffer[0],
|
||||
CERT_BUFFER_SIZE.uint,
|
||||
mbedtls_ctr_drbg_random,
|
||||
addr ctrDrbg,
|
||||
)
|
||||
if certLen < 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to write certificate in DER format"
|
||||
)
|
||||
# Adjust the buffer to contain only the data
|
||||
outputCertificate =
|
||||
toSeq(certBuffer[(CERT_BUFFER_SIZE - certLen) ..< CERT_BUFFER_SIZE])
|
||||
else:
|
||||
let ret = mbedtls_x509write_crt_pem(
|
||||
addr crt,
|
||||
addr certBuffer[0],
|
||||
CERT_BUFFER_SIZE.uint,
|
||||
mbedtls_ctr_drbg_random,
|
||||
addr ctrDrbg,
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to write certificate in PEM format"
|
||||
)
|
||||
let n = certBuffer.find(0'u8) # Find the index of the first null byte
|
||||
outputCertificate = certBuffer[0 .. n - 1].toSeq()
|
||||
|
||||
# Serialize the Private Key
|
||||
var privKeyBuffer: array[2048, byte]
|
||||
var outputPrivateKey: seq[byte]
|
||||
|
||||
if encodingFormat == EncodingFormat.DER:
|
||||
let privKeyLen = mbedtls_pk_write_key_der(
|
||||
addr certKey, addr privKeyBuffer[0], privKeyBuffer.len.uint
|
||||
)
|
||||
if privKeyLen < 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to write private key in DER format"
|
||||
)
|
||||
# Adjust the buffer to contain only the data
|
||||
outputPrivateKey =
|
||||
toSeq(privKeyBuffer[(privKeyBuffer.len - privKeyLen) ..< privKeyBuffer.len])
|
||||
else:
|
||||
let ret = mbedtls_pk_write_key_pem(
|
||||
addr certKey, addr privKeyBuffer[0], privKeyBuffer.len.uint
|
||||
)
|
||||
if ret != 0:
|
||||
raise newException(
|
||||
CertificateCreationError, "Failed to write private key in PEM format"
|
||||
)
|
||||
let n = privKeyBuffer.find(0'u8) # Find the index of the first null byte
|
||||
outputPrivateKey = privKeyBuffer[0 .. n - 1].toSeq()
|
||||
|
||||
# Return the Serialized Certificate and Private Key
|
||||
return (outputCertificate, outputPrivateKey)
|
||||
|
||||
proc libp2pext(
|
||||
p_ctx: pointer,
|
||||
crt: ptr mbedtls_x509_crt,
|
||||
oid: ptr mbedtls_x509_buf,
|
||||
critical: cint,
|
||||
p: ptr byte,
|
||||
endPtr: ptr byte,
|
||||
): cint {.cdecl.} =
|
||||
## Callback function to parse the libp2p extension.
|
||||
##
|
||||
## This function is used as a callback by mbedtls during certificate parsing
|
||||
## to extract the libp2p extension containing the SignedKey.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p_ctx`: Pointer to the P2pExtension object to store the parsed data.
|
||||
## - `crt`: Pointer to the certificate being parsed.
|
||||
## - `oid`: Pointer to the OID of the extension.
|
||||
## - `critical`: Critical flag of the extension.
|
||||
## - `p`: Pointer to the start of the extension data.
|
||||
## - `endPtr`: Pointer to the end of the extension data.
|
||||
##
|
||||
## Returns:
|
||||
## - 0 on success, or a negative error code on failure.
|
||||
|
||||
# Check if the OID matches the libp2p extension
|
||||
if oid.len != LIBP2P_EXT_OID_DER.len:
|
||||
return MBEDTLS_ERR_OID_NOT_FOUND # Extension not handled by this callback
|
||||
for i in 0 ..< LIBP2P_EXT_OID_DER.len:
|
||||
if ptrInc(oid.p, i.uint)[] != LIBP2P_EXT_OID_DER[i]:
|
||||
return MBEDTLS_ERR_OID_NOT_FOUND # Extension not handled by this callback
|
||||
|
||||
var parsePtr = p
|
||||
|
||||
# Parse SEQUENCE tag and length
|
||||
var len: uint
|
||||
if mbedtls_asn1_get_tag(
|
||||
addr parsePtr, endPtr, addr len, MBEDTLS_ASN1_CONSTRUCTED or MBEDTLS_ASN1_SEQUENCE
|
||||
) != 0:
|
||||
debug "Failed to parse SEQUENCE in libp2p extension"
|
||||
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
|
||||
|
||||
# Parse publicKey OCTET STRING
|
||||
var pubKeyLen: uint
|
||||
if mbedtls_asn1_get_tag(
|
||||
addr parsePtr, endPtr, addr pubKeyLen, MBEDTLS_ASN1_OCTET_STRING
|
||||
) != 0:
|
||||
debug "Failed to parse publicKey OCTET STRING in libp2p extension"
|
||||
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
|
||||
|
||||
# Extract publicKey
|
||||
var publicKey = newSeq[byte](int(pubKeyLen))
|
||||
copyMem(addr publicKey[0], parsePtr, int(pubKeyLen))
|
||||
parsePtr = ptrInc(parsePtr, pubKeyLen)
|
||||
|
||||
# Parse signature OCTET STRING
|
||||
var signatureLen: uint
|
||||
if mbedtls_asn1_get_tag(
|
||||
addr parsePtr, endPtr, addr signatureLen, MBEDTLS_ASN1_OCTET_STRING
|
||||
) != 0:
|
||||
debug "Failed to parse signature OCTET STRING in libp2p extension"
|
||||
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
|
||||
|
||||
# Extract signature
|
||||
var signature = newSeq[byte](int(signatureLen))
|
||||
copyMem(addr signature[0], parsePtr, int(signatureLen))
|
||||
|
||||
# Store the publicKey and signature in the P2pExtension
|
||||
let extension = cast[ptr P2pExtension](p_ctx)
|
||||
extension[].publicKey = publicKey
|
||||
extension[].signature = signature
|
||||
|
||||
return 0 # Success
|
||||
const certTimeFormat = "MMM d hh:mm:ss yyyy"
|
||||
const f = initTimeFormat(certTimeFormat)
|
||||
return parse(timeNoZone, f, utc()).toTime()
|
||||
|
||||
proc parse*(
|
||||
certificateDer: seq[byte]
|
||||
@@ -540,23 +259,54 @@ proc parse*(
|
||||
##
|
||||
## Raises:
|
||||
## - `CertificateParsingError` if certificate parsing fails.
|
||||
var crt: mbedtls_x509_crt
|
||||
mbedtls_x509_crt_init(addr crt)
|
||||
defer:
|
||||
mbedtls_x509_crt_free(addr crt)
|
||||
|
||||
var extension = P2pExtension()
|
||||
let ret = mbedtls_x509_crt_parse_der_with_ext_cb(
|
||||
addr crt,
|
||||
unsafeAddr certificateDer[0],
|
||||
certificateDer.len.uint,
|
||||
0,
|
||||
libp2pext,
|
||||
addr extension,
|
||||
)
|
||||
if ret != 0:
|
||||
let certDerBuffer = certificateDer.toCertBuffer()
|
||||
let certParsed: ptr cert_parsed = nil
|
||||
defer:
|
||||
cert_free_parsed(certParsed)
|
||||
|
||||
let ret =
|
||||
cert_parse(certDerBuffer.unsafeAddr, DER.cert_format_t(), certParsed.unsafeAddr)
|
||||
if ret != CERT_SUCCESS:
|
||||
raise newException(
|
||||
CertificateParsingError, "Failed to parse certificate, error code: " & $ret
|
||||
)
|
||||
|
||||
return P2pCertificate(certificate: crt, extension: extension)
|
||||
var validFrom, validTo: Time
|
||||
try:
|
||||
validFrom = parseCertTime($certParsed.valid_from)
|
||||
validTo = parseCertTime($certParsed.valid_to)
|
||||
except TimeParseError as e:
|
||||
raise newException(
|
||||
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
|
||||
)
|
||||
|
||||
P2pCertificate(
|
||||
extension: P2pExtension(
|
||||
signature: certParsed.signature.toSeq(), publicKey: certParsed.ident_pubk.toSeq()
|
||||
),
|
||||
pubKeyDer: certParsed.cert_pbuk.toSeq(),
|
||||
validFrom: validFrom,
|
||||
validTo: validTo,
|
||||
)
|
||||
|
||||
proc verify*(self: P2pCertificate): bool =
|
||||
## Verifies that P2pCertificate has signature that was signed by owner of the certificate.
|
||||
##
|
||||
## Parameters:
|
||||
## - `self`: The P2pCertificate.
|
||||
##
|
||||
## Returns:
|
||||
## `true` if certificate is valid.
|
||||
|
||||
let currentTime = now().utc().toTime()
|
||||
if not (currentTime >= self.validFrom and currentTime < self.validTo):
|
||||
return false
|
||||
|
||||
var sig: Signature
|
||||
var key: PublicKey
|
||||
if sig.init(self.extension.signature) and key.init(self.extension.publicKey):
|
||||
let msg = makeSignatureMessage(self.pubKeyDer)
|
||||
return sig.verify(msg, key)
|
||||
|
||||
return false
|
||||
|
||||
85
libp2p/transports/tls/certificate_ffi.nim
Normal file
85
libp2p/transports/tls/certificate_ffi.nim
Normal file
@@ -0,0 +1,85 @@
|
||||
when defined(macosx):
|
||||
{.passl: "-L/opt/homebrew/opt/openssl@3/lib -lcrypto".}
|
||||
{.passc: "-I/opt/homebrew/opt/openssl@3/include".}
|
||||
else:
|
||||
{.passl: "-lcrypto".}
|
||||
|
||||
{.compile: "./certificate.c".}
|
||||
|
||||
type
|
||||
cert_error_t* = int32
|
||||
|
||||
cert_format_t* {.size: sizeof(cuint).} = enum
|
||||
CERT_FORMAT_DER = 0
|
||||
CERT_FORMAT_PEM = 1
|
||||
|
||||
cert_buffer* {.pure, inheritable, bycopy.} = object
|
||||
data*: ptr uint8
|
||||
length*: csize_t
|
||||
|
||||
cert_parsed* {.pure, inheritable, bycopy.} = object
|
||||
signature*: ptr cert_buffer
|
||||
ident_pubk*: ptr cert_buffer
|
||||
cert_pbuk*: ptr cert_buffer
|
||||
valid_from*: cstring
|
||||
valid_to*: cstring
|
||||
|
||||
cert_context_s* = object
|
||||
|
||||
cert_key_s* = object
|
||||
|
||||
cert_context_t* = ptr cert_context_s
|
||||
|
||||
cert_key_t* = ptr cert_key_s
|
||||
|
||||
const CERT_SUCCESS* = 0
|
||||
|
||||
proc cert_init_drbg*(
|
||||
seed: cstring, seed_len: csize_t, ctx: ptr cert_context_t
|
||||
): cert_error_t {.cdecl, importc: "cert_init_drbg".}
|
||||
|
||||
proc cert_generate_key*(
|
||||
ctx: cert_context_t, out_arg: ptr cert_key_t
|
||||
): cert_error_t {.cdecl, importc: "cert_generate_key".}
|
||||
|
||||
proc cert_serialize_privk*(
|
||||
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
|
||||
): cert_error_t {.cdecl, importc: "cert_serialize_privk".}
|
||||
|
||||
proc cert_serialize_pubk*(
|
||||
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
|
||||
): cert_error_t {.cdecl, importc: "cert_serialize_pubk".}
|
||||
|
||||
proc cert_generate*(
|
||||
ctx: cert_context_t,
|
||||
key: cert_key_t,
|
||||
out_arg: ptr ptr cert_buffer,
|
||||
signature: ptr cert_buffer,
|
||||
ident_pubk: ptr cert_buffer,
|
||||
cn: cstring,
|
||||
validFrom: cstring,
|
||||
validTo: cstring,
|
||||
format: cert_format_t,
|
||||
): cert_error_t {.cdecl, importc: "cert_generate".}
|
||||
|
||||
proc cert_parse*(
|
||||
cert: ptr cert_buffer, format: cert_format_t, out_arg: ptr ptr cert_parsed
|
||||
): cert_error_t {.cdecl, importc: "cert_parse".}
|
||||
|
||||
proc cert_free_ctr_drbg*(
|
||||
ctx: cert_context_t
|
||||
): void {.cdecl, importc: "cert_free_ctr_drbg".}
|
||||
|
||||
proc cert_free_key*(key: cert_key_t): void {.cdecl, importc: "cert_free_key".}
|
||||
|
||||
proc cert_free_buffer*(
|
||||
buffer: ptr cert_buffer
|
||||
): void {.cdecl, importc: "cert_free_buffer".}
|
||||
|
||||
proc cert_free_parsed*(
|
||||
cert: ptr cert_parsed
|
||||
): void {.cdecl, importc: "cert_free_parsed".}
|
||||
|
||||
proc cert_signing_req*(
|
||||
cn: cstring, key: cert_key_t, csr_buffer: ptr ptr cert_buffer
|
||||
): cert_error_t {.cdecl, importc: "cert_signing_req".}
|
||||
@@ -11,9 +11,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import chronos, chronicles, strutils
|
||||
import stew/[byteutils, endians2, results, objects]
|
||||
import chronos, chronicles, strutils, results
|
||||
import stew/[byteutils, endians2, objects]
|
||||
import ../multicodec
|
||||
import
|
||||
transport,
|
||||
@@ -302,7 +301,7 @@ proc new*(
|
||||
flags: set[ServerFlags] = {},
|
||||
): TorSwitch {.raises: [LPError], public.} =
|
||||
var builder = SwitchBuilder.new().withRng(rng).withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TorTransport.new(torServer, flags, upgr)
|
||||
)
|
||||
if addresses.len != 0:
|
||||
@@ -325,7 +324,7 @@ proc new*(
|
||||
return torSwitch
|
||||
|
||||
method addTransport*(s: TorSwitch, t: Transport) =
|
||||
doAssert(false, "not implemented!")
|
||||
doAssert(false, "[TorSwitch.addTransport ] abstract method not implemented!")
|
||||
|
||||
method getTorTransport*(s: TorSwitch): Transport {.base.} =
|
||||
return s.transports[0]
|
||||
|
||||
@@ -66,7 +66,7 @@ method accept*(
|
||||
## accept incoming connections
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Transport.accept] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Transport,
|
||||
@@ -79,7 +79,7 @@ method dial*(
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Transport.dial] abstract method not implemented!")
|
||||
|
||||
proc dial*(
|
||||
self: Transport, address: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
transport,
|
||||
|
||||
@@ -45,7 +45,7 @@ type
|
||||
method upgrade*(
|
||||
self: Upgrade, conn: Connection, peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.async: (raises: [CancelledError, LPError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Upgrade.upgrade] abstract method not implemented!")
|
||||
|
||||
proc secure*(
|
||||
self: Upgrade, conn: Connection, peerId: Opt[PeerId]
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, options, macros]
|
||||
import stew/[byteutils, results]
|
||||
import stew/byteutils
|
||||
import results
|
||||
|
||||
export results
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[byteutils, leb128, results]
|
||||
import stew/[byteutils, leb128]
|
||||
import results
|
||||
export leb128, results
|
||||
|
||||
type
|
||||
|
||||
@@ -20,7 +20,7 @@ when defined(windows): import winlean else: import posix
|
||||
const
|
||||
RTRANSPMA* = mapOr(TCP, WebSockets, UNIX)
|
||||
|
||||
TRANSPMA* = mapOr(RTRANSPMA, QUIC, UDP)
|
||||
TRANSPMA* = mapOr(RTRANSPMA, QUIC, QUIC_V1, UDP)
|
||||
|
||||
proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
|
||||
## Initialize ``TransportAddress`` with MultiAddress ``ma``.
|
||||
@@ -75,7 +75,7 @@ proc connect*(
|
||||
## ``bufferSize`` is size of internal buffer for transport.
|
||||
##
|
||||
|
||||
if not (RTRANSPMA.match(ma)):
|
||||
if not (TRANSPMA.match(ma)):
|
||||
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
|
||||
|
||||
let transportAddress = initTAddress(ma).tryGet()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
site_name: nim-libp2p
|
||||
|
||||
repo_url: https://github.com/status-im/nim-libp2p
|
||||
repo_name: status-im/nim-libp2p
|
||||
site_url: https://status-im.github.io/nim-libp2p/docs
|
||||
repo_url: https://github.com/vacp2p/nim-libp2p
|
||||
repo_name: vacp2p/nim-libp2p
|
||||
site_url: https://vacp2p.github.io/nim-libp2p/docs
|
||||
# Can't find a way to point the edit to the .nim instead
|
||||
# of the .md
|
||||
edit_uri: ''
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import options, tables
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import helpers
|
||||
import ../libp2p
|
||||
@@ -8,7 +7,7 @@ import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
|
||||
type
|
||||
SwitchCreator = proc(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade): Transport =
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.gcsafe, raises: [LPError].}
|
||||
@@ -319,7 +318,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
|
||||
let nativeNode = swCreator(
|
||||
ma = wsAddress,
|
||||
prov = proc(upgr: Upgrade): Transport =
|
||||
prov = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr),
|
||||
)
|
||||
|
||||
@@ -359,7 +358,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
.withNoise()
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
{.used.}
|
||||
|
||||
import chronos, stew/[byteutils, results]
|
||||
import chronos, results, stew/byteutils
|
||||
import
|
||||
../libp2p/
|
||||
[
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
upgrademngrs/upgrade,
|
||||
multiaddress,
|
||||
errors,
|
||||
]
|
||||
[stream/connection, transports/transport, upgrademngrs/upgrade, multiaddress]
|
||||
|
||||
import ./helpers
|
||||
|
||||
|
||||
@@ -12,10 +12,12 @@ import ../libp2p/stream/chronosstream
|
||||
import ../libp2p/muxers/mplex/lpchannel
|
||||
import ../libp2p/protocols/secure/secure
|
||||
import ../libp2p/switch
|
||||
import ../libp2p/nameresolving/[nameresolver, mockresolver]
|
||||
import ../libp2p/nameresolving/mockresolver
|
||||
|
||||
import "."/[asyncunit, errorhelpers]
|
||||
export asyncunit, errorhelpers, mockresolver
|
||||
import errorhelpers
|
||||
import utils/async_tests
|
||||
|
||||
export async_tests, errorhelpers, mockresolver
|
||||
|
||||
const
|
||||
StreamTransportTrackerName = "stream.transport"
|
||||
@@ -47,7 +49,7 @@ template checkTrackers*() =
|
||||
{.push warning[BareExcept]: off.}
|
||||
try:
|
||||
GC_fullCollect()
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
@@ -90,25 +92,6 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
|
||||
testBufferStream.initStream()
|
||||
testBufferStream
|
||||
|
||||
proc bridgedConnections*(): (Connection, Connection) =
|
||||
let
|
||||
connA = TestBufferStream()
|
||||
connB = TestBufferStream()
|
||||
connA.dir = Direction.Out
|
||||
connB.dir = Direction.In
|
||||
connA.initStream()
|
||||
connB.initStream()
|
||||
connA.writeHandler = proc(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
connB.pushData(data)
|
||||
|
||||
connB.writeHandler = proc(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
connA.pushData(data)
|
||||
return (connA, connB)
|
||||
|
||||
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
## Periodically checks a given condition until it is true or a timeout occurs.
|
||||
##
|
||||
|
||||
@@ -5,17 +5,15 @@ WORKDIR /workspace
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y python python3 python3-pip python3-venv curl
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN mkdir .venv && python3 -m venv .venv && . .venv/bin/activate
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bookworm-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
|
||||
FROM --platform=linux/amd64 debian:bullseye-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
|
||||
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||
ENV RUST_BACKTRACE=1
|
||||
|
||||
@@ -18,6 +18,9 @@ import
|
||||
import ../stubs/autonatclientstub
|
||||
import ../errorhelpers
|
||||
|
||||
logScope:
|
||||
topics = "hp interop node"
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder
|
||||
@@ -41,86 +44,96 @@ proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "All relay addresses", relayAddr
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
|
||||
try:
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService =
|
||||
AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError:
|
||||
raise newException(CatchableError, "Connection to relay timed out")
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
debug "Connected to redis"
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
debug "Got relay address", relayMA
|
||||
let relayId = await switch.connect(relayMA)
|
||||
debug "Connected to relay", relayId
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(
|
||||
it.contains(multiCodec("p2p-circuit")).tryGet()
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
:
|
||||
await sleepAsync(100.milliseconds)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
quit(0)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
discard waitFor(main().withTimeout(4.minutes))
|
||||
quit(1)
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError:
|
||||
error "Program execution timed out."
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
|
||||
@@ -9,12 +9,11 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import sequtils, options, tables, sets
|
||||
import sequtils, tables, sets
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
errors,
|
||||
switch,
|
||||
stream/connection,
|
||||
crypto/crypto,
|
||||
@@ -49,13 +48,10 @@ suite "FloodSub":
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
let
|
||||
nodes = generateNodes(2)
|
||||
let nodes = generateNodes(2)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
@@ -71,48 +67,33 @@ suite "FloodSub":
|
||||
agentA == "nim-libp2p"
|
||||
agentB == "nim-libp2p"
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "FloodSub basic publish/subscribe B -> A":
|
||||
var completionFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
let
|
||||
nodes = generateNodes(2)
|
||||
let nodes = generateNodes(2)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
await waitSub(nodes[1], nodes[0], "foobar")
|
||||
|
||||
check (await nodes[1].publish("foobar", "Hello!".toBytes())) > 0
|
||||
|
||||
check (await completionFut.wait(5.seconds)) == true
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let
|
||||
nodes = generateNodes(2)
|
||||
let nodes = generateNodes(2)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
@@ -130,21 +111,15 @@ suite "FloodSub":
|
||||
check (await nodes[0].publish("foobar", "Hello!".toBytes())) > 0
|
||||
check (await handlerFut) == true
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub validation should fail":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
nodes = generateNodes(2)
|
||||
let nodes = generateNodes(2)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
@@ -159,23 +134,17 @@ suite "FloodSub":
|
||||
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let
|
||||
nodes = generateNodes(2)
|
||||
let nodes = generateNodes(2)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
nodes[1].subscribe("foo", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foo")
|
||||
nodes[1].subscribe("bar", handler)
|
||||
@@ -194,10 +163,6 @@ suite "FloodSub":
|
||||
check (await nodes[0].publish("foo", "Hello!".toBytes())) > 0
|
||||
check (await nodes[0].publish("bar", "Hello!".toBytes())) > 0
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub multiple peers, no self trigger":
|
||||
var runs = 10
|
||||
|
||||
@@ -219,11 +184,10 @@ suite "FloodSub":
|
||||
counter,
|
||||
)
|
||||
|
||||
let
|
||||
nodes = generateNodes(runs, triggerSelf = false)
|
||||
nodesFut = nodes.mapIt(it.switch.start())
|
||||
let nodes = generateNodes(runs, triggerSelf = false)
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
for i in 0 ..< runs:
|
||||
nodes[i].subscribe("foobar", futs[i][1])
|
||||
@@ -241,9 +205,6 @@ suite "FloodSub":
|
||||
await allFuturesThrowing(pubs)
|
||||
|
||||
await allFuturesThrowing(futs.mapIt(it[0]))
|
||||
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub multiple peers, with self trigger":
|
||||
var runs = 10
|
||||
@@ -266,11 +227,10 @@ suite "FloodSub":
|
||||
counter,
|
||||
)
|
||||
|
||||
let
|
||||
nodes = generateNodes(runs, triggerSelf = true)
|
||||
nodesFut = nodes.mapIt(it.switch.start())
|
||||
let nodes = generateNodes(runs, triggerSelf = true)
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
for i in 0 ..< runs:
|
||||
nodes[i].subscribe("foobar", futs[i][1])
|
||||
@@ -299,10 +259,6 @@ suite "FloodSub":
|
||||
# remove the topic tho
|
||||
node.topics.len == 0
|
||||
|
||||
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub message size validation":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
@@ -313,11 +269,9 @@ suite "FloodSub":
|
||||
bigNode = generateNodes(1)
|
||||
smallNode = generateNodes(1, maxMessageSize = 200)
|
||||
|
||||
# start switches
|
||||
nodesFut =
|
||||
await allFinished(bigNode[0].switch.start(), smallNode[0].switch.start())
|
||||
startNodesAndDeferStop(bigNode & smallNode)
|
||||
await connectNodesStar(bigNode & smallNode)
|
||||
|
||||
await subscribeNodes(bigNode & smallNode)
|
||||
bigNode[0].subscribe("foo", handler)
|
||||
smallNode[0].subscribe("foo", handler)
|
||||
await waitSub(bigNode[0], smallNode[0], "foo")
|
||||
@@ -337,10 +291,6 @@ suite "FloodSub":
|
||||
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
||||
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
||||
|
||||
await allFuturesThrowing(smallNode[0].switch.stop(), bigNode[0].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub message size validation 2":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
@@ -350,11 +300,9 @@ suite "FloodSub":
|
||||
bigNode1 = generateNodes(1, maxMessageSize = 20000000)
|
||||
bigNode2 = generateNodes(1, maxMessageSize = 20000000)
|
||||
|
||||
# start switches
|
||||
nodesFut =
|
||||
await allFinished(bigNode1[0].switch.start(), bigNode2[0].switch.start())
|
||||
startNodesAndDeferStop(bigNode1 & bigNode2)
|
||||
await connectNodesStar(bigNode1 & bigNode2)
|
||||
|
||||
await subscribeNodes(bigNode1 & bigNode2)
|
||||
bigNode2[0].subscribe("foo", handler)
|
||||
await waitSub(bigNode1[0], bigNode2[0], "foo")
|
||||
|
||||
@@ -364,7 +312,3 @@ suite "FloodSub":
|
||||
|
||||
checkUntilTimeout:
|
||||
messageReceived == 1
|
||||
|
||||
await allFuturesThrowing(bigNode1[0].switch.stop(), bigNode2[0].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
@@ -1,925 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[options, deques, sequtils, enumerate, algorithm]
|
||||
import stew/byteutils
|
||||
import ../../libp2p/builders
|
||||
import ../../libp2p/errors
|
||||
import ../../libp2p/crypto/crypto
|
||||
import ../../libp2p/stream/bufferstream
|
||||
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
|
||||
import ../../libp2p/switch
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../../libp2p/protocols/pubsub/rpc/protobuf
|
||||
import utils
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub internal":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, handler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "topic params":
|
||||
let params = TopicParams.init()
|
||||
params.validateParameters().tryGet()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var scoreLow = -11'f64
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.score = scoreLow
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
var peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic1 = "foobar1"
|
||||
let topic2 = "foobar2"
|
||||
gossipSub.topicParams[topic1] = TopicParams.init()
|
||||
gossipSub.topicParams[topic2] = TopicParams.init()
|
||||
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic1].incl(peer)
|
||||
gossipSub.fanout[topic2].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
for p in peers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = handler
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for i in 0 ..< 15:
|
||||
let peerId = conns[i].peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.In
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 40.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
for i in 0 ..< 7:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.Out
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 10.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handleIHave/Iwant tests":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
|
||||
for i in 0 ..< 30:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
check gossipSub.mcache.msgs.len == 1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
proc setupTest(): Future[
|
||||
tuple[
|
||||
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
|
||||
]
|
||||
] {.async.} =
|
||||
let nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await nodes[1].switch.connect(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(nodes[0])
|
||||
var gossip1: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
return (gossip0, gossip1, receivedMessages)
|
||||
|
||||
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
|
||||
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
|
||||
|
||||
proc createMessages(
|
||||
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
|
||||
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
|
||||
var iwantMessageIds = newSeq[MessageId]()
|
||||
var sentMessages = initHashSet[seq[byte]]()
|
||||
|
||||
for i, size in enumerate([size1, size2]):
|
||||
let data = newSeqWith(size, i.byte)
|
||||
sentMessages.incl(data)
|
||||
|
||||
let msg =
|
||||
Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
|
||||
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
|
||||
iwantMessageIds.add(iwantMessageId)
|
||||
gossip1.mcache.put(iwantMessageId, msg)
|
||||
|
||||
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
|
||||
peer.sentIHaves[^1].incl(iwantMessageId)
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize div 2 + 1
|
||||
let (iwantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let size1 = gossip1.maxMessageSize div 2
|
||||
let size2 = gossip1.maxMessageSize div 3
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let maxSize = gossip1.maxMessageSize
|
||||
let size1 = maxSize div 2
|
||||
let size2 = maxSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
if seqs[0] < seqs[1]:
|
||||
smallestSet.incl(seqs[0])
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,387 +0,0 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import sequtils, options, tables, sets
|
||||
import chronos, stew/byteutils, chronicles
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
errors,
|
||||
peerid,
|
||||
peerinfo,
|
||||
stream/connection,
|
||||
stream/bufferstream,
|
||||
crypto/crypto,
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/gossipsub,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/rpc/messages,
|
||||
]
|
||||
import ../helpers
|
||||
|
||||
template tryPublish(
|
||||
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
|
||||
): untyped =
|
||||
var
|
||||
expiration = Moment.now() + timeout
|
||||
pubs = 0
|
||||
while pubs < require and Moment.now() < expiration:
|
||||
pubs = pubs + call
|
||||
await sleepAsync(wait)
|
||||
|
||||
doAssert pubs >= require, "Failed to publish!"
|
||||
|
||||
suite "GossipSub":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let
|
||||
nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
nodesFut = nodes.mapIt(it.switch.start())
|
||||
|
||||
await subscribeSparseNodes(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
let dgossip = GossipSub(dialer)
|
||||
dgossip.parameters.dHigh = 2
|
||||
dgossip.parameters.dLow = 1
|
||||
dgossip.parameters.d = 1
|
||||
dgossip.parameters.dOut = 1
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
info "seen up", count = seen.len
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], dialer, "foobar")
|
||||
|
||||
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
|
||||
let publishedTo = nodes[0].publish(
|
||||
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
|
||||
).await
|
||||
check:
|
||||
publishedTo != 0
|
||||
publishedTo != runs
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
await allFutures(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await subscribeNodes(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
asyncTest "GossipSub directPeers: always forward messages":
|
||||
let
|
||||
nodes = generateNodes(3, gossip = true)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(
|
||||
nodes[0].switch.start(), nodes[1].switch.start(), nodes[2].switch.start()
|
||||
)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[2]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
nodes[1].subscribe("foobar", noop)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut.wait(2.seconds)
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check "foobar" notin GossipSub(nodes[0]).mesh
|
||||
check "foobar" notin GossipSub(nodes[1]).mesh
|
||||
check "foobar" notin GossipSub(nodes[2]).mesh
|
||||
|
||||
await allFuturesThrowing(
|
||||
nodes[0].switch.stop(), nodes[1].switch.stop(), nodes[2].switch.stop()
|
||||
)
|
||||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let
|
||||
nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
nodesFut = nodes.mapIt(it.switch.start())
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
|
||||
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
# MacOs has some nasty jitter when sleeping
|
||||
# (up to 7 ms), so we need some pretty long
|
||||
# sleeps to be safe here
|
||||
gossip.parameters.decayInterval = 300.milliseconds
|
||||
|
||||
let
|
||||
# start switches
|
||||
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
await sleepAsync(1500.milliseconds)
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
143
tests/pubsub/testgossipsubfanout.nim
Normal file
143
tests/pubsub/testgossipsubfanout.nim
Normal file
@@ -0,0 +1,143 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
var observed = 0
|
||||
let
|
||||
obs1 = PubSubObserver(
|
||||
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
obs2 = PubSubObserver(
|
||||
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[0].addObserver(obs2)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
discard await passed.wait(2.seconds)
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
GossipSub(nodes[1]).parameters.d = 0
|
||||
GossipSub(nodes[1]).parameters.dHigh = 0
|
||||
GossipSub(nodes[1]).parameters.dLow = 0
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
|
||||
discard await passed.wait(2.seconds)
|
||||
|
||||
trace "test done, stopping..."
|
||||
751
tests/pubsub/testgossipsubgossip.nim
Normal file
751
tests/pubsub/testgossipsubgossip.nim
Normal file
@@ -0,0 +1,751 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "handleIHave/Iwant tests":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
check gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are interconnected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 4), PeerTableType.Gossipsub
|
||||
)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
|
||||
await waitForHeartbeat()
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
anyIt(receivedIHaves, it > 0)
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await waitForHeartbeat()
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == 0
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
# That's because the gossip factor is 0.5 over 16 available nodes
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= 8
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with dLazy priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
# That's because the dLazy is 6
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
|
||||
|
||||
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iDontWant messages
|
||||
var receivedIDontWantsRef = new seq[int]
|
||||
addIDontWantObservers(nodes, receivedIDontWantsRef)
|
||||
|
||||
# And are connected in a line
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
check (await nodes[0].publish(topic, largeMsg)) == 1
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
let receivedIDontWants = receivedIDontWantsRef[]
|
||||
check:
|
||||
receivedIDontWants[0] == 0
|
||||
receivedIDontWants[1] == 0
|
||||
receivedIDontWants[2] == 1
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard # not used in this test
|
||||
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, enablePX = true) &
|
||||
generateNodes(1, gossip = true, sendSignedPeerRecord = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var
|
||||
gossip0 = GossipSub(nodes[0])
|
||||
gossip1 = GossipSub(nodes[1])
|
||||
gossip2 = GossipSub(nodes[2])
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
for x in 0 ..< 3:
|
||||
for y in 0 ..< 3:
|
||||
if x != y:
|
||||
await waitSub(nodes[x], nodes[y], "foobar")
|
||||
|
||||
# Setup record handlers for all nodes
|
||||
var
|
||||
passed0: Future[void] = newFuture[void]()
|
||||
passed2: Future[void] = newFuture[void]()
|
||||
gossip0.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed0.complete()
|
||||
)
|
||||
gossip1.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
raiseAssert "should not get here"
|
||||
)
|
||||
gossip2.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed2.complete()
|
||||
)
|
||||
|
||||
# Unsubscribe from the topic
|
||||
nodes[1].unsubscribe("foobar", handler)
|
||||
|
||||
# Then verify what nodes receive the PX
|
||||
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "e2e - iDontWant":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(3, gossip = true, msgIdProvider = dumbMsgIdProvider)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await nodes[1].switch.connect(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
doAssert false
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
nodes[2].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||
|
||||
check:
|
||||
gossip3.mesh.peers("foobar") == 1
|
||||
|
||||
gossip3.broadcast(
|
||||
gossip3.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
|
||||
)
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
checkUntilTimeout:
|
||||
toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 1)
|
||||
check:
|
||||
toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "e2e - iDontWant is broadcasted on publish":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, msgIdProvider = dumbMsgIdProvider, sendIDontWantOnPublish = true
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
asyncTest "e2e - iDontWant is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let
|
||||
nodeA = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeB = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeC = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_11,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await nodeA.switch.connect(
|
||||
nodeB.switch.peerInfo.peerId, nodeB.switch.peerInfo.addrs
|
||||
)
|
||||
await nodeB.switch.connect(
|
||||
nodeC.switch.peerInfo.peerId, nodeC.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
nodeA.subscribe("foobar", handler)
|
||||
nodeB.subscribe("foobar", handlerB)
|
||||
nodeC.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], "foobar")
|
||||
|
||||
var gossipA: GossipSub = GossipSub(nodeA)
|
||||
var gossipB: GossipSub = GossipSub(nodeB)
|
||||
var gossipC: GossipSub = GossipSub(nodeC)
|
||||
|
||||
check:
|
||||
gossipC.mesh.peers("foobar") == 1
|
||||
|
||||
tryPublish await nodeA.publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
# "check" alone isn't suitable for testing that a condition is true after some time has passed. Below we verify that
|
||||
# peers A and C haven't received an IDONTWANT message from B, but we need wait some time for potential in flight messages to arrive.
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(gossipC.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
toSeq(gossipA.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let node0 = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
let node1 = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_10,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await node0.switch.connect(
|
||||
node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
node0.subscribe("foobar", handler)
|
||||
node1.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[node0, node1], "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
|
||||
asyncTest "IHAVE messages correctly advertise message ID to peers":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IHAVE observer
|
||||
var receivedIHave = newFuture[(string, seq[MessageId])]()
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.ihave:
|
||||
receivedIHave.complete((msg.topicID, msg.messageIDs))
|
||||
n1.addObserver(PubSubObserver(onRecv: checkForIhaves))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IHAVE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
let r = await receivedIHave.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
r.isCompleted((topic, @[messageID]))
|
||||
|
||||
asyncTest "IWANT messages correctly request messages by their IDs":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var receivedIWant = newFuture[seq[MessageId]]()
|
||||
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.iwant:
|
||||
receivedIWant.complete(msg.messageIDs)
|
||||
n1.addObserver(PubSubObserver(onRecv: checkForIwants))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IWANT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
let r = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
r.isCompleted(@[messageID])
|
||||
|
||||
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var receivedIWant = newFuture[seq[MessageId]]()
|
||||
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.iwant:
|
||||
receivedIWant.complete(msg.messageIDs)
|
||||
n0.addObserver(PubSubObserver(onRecv: checkForIwants))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both nodes subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When an IHAVE message is sent from node0
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
|
||||
let iWantResult = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
iWantResult.isCompleted(@[messageID])
|
||||
689
tests/pubsub/testgossipsubmeshmanagement.nim
Normal file
689
tests/pubsub/testgossipsubmeshmanagement.nim
Normal file
@@ -0,0 +1,689 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "topic params":
|
||||
let params = TopicParams.init()
|
||||
params.validateParameters().tryGet()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let
|
||||
topic = "foobar"
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "dont prune peers if mesh len is less than d_high":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len == expectedNumberOfPeers
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "prune peers if mesh len is higher than d_high":
|
||||
let
|
||||
numberOfNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberOfNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
# For this test to work we'd require a way to disable fanout.
|
||||
# There's not a way to toggle it, and mocking it didn't work as there's not a reliable mock available.
|
||||
skip()
|
||||
return
|
||||
|
||||
# Instantiate handlers and validators
|
||||
var handlerFut0 = newFuture[bool]()
|
||||
proc handler0(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut0.complete(true)
|
||||
|
||||
var handlerFut1 = newFuture[bool]()
|
||||
proc handler1(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut1.complete(true)
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
check topic == "foobar"
|
||||
validatorFut.complete(true)
|
||||
result = ValidationResult.Accept
|
||||
|
||||
# Setup nodes and start switches
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 5.seconds)
|
||||
topic = "foobar"
|
||||
|
||||
# Connect nodes
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# Subscribe both nodes to the topic and node1 (receiver) to the validator
|
||||
nodes[0].subscribe(topic, handler0)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Wait for both nodes to verify others' subscription
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], topic)
|
||||
subs &= waitSub(nodes[0], nodes[1], topic)
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
# When unsubscribing and resubscribing in a short time frame, the backoff period should be triggered
|
||||
nodes[1].unsubscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Backoff is set to 5 seconds, and the amount of sleeping time since the unsubsribe until now is 3-4s~
|
||||
# Meaning, the subscription shouldn't have been processed yet because it's still in backoff period
|
||||
# When publishing under this condition
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should not be received:
|
||||
check:
|
||||
validatorFut.toState().isPending()
|
||||
handlerFut1.toState().isPending()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
validatorFut.reset()
|
||||
handlerFut0.reset()
|
||||
handlerFut1.reset()
|
||||
|
||||
# If we wait backoff period to end, around 1-2s
|
||||
await waitForMesh(nodes[0], nodes[1], topic, 3.seconds)
|
||||
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should be received
|
||||
check:
|
||||
validatorFut.toState().isCompleted()
|
||||
handlerFut1.toState().isCompleted()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
let
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.topics
|
||||
"foobar" in gossip2.topics
|
||||
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "mesh and gossipsub updated when topic subscribed and unsubscribed":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When all of them are connected and subscribed to the same topic
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then mesh and gossipsub should be populated
|
||||
for node in nodes:
|
||||
check node.topics.contains(topic)
|
||||
check node.gossipsub.hasKey(topic)
|
||||
check node.gossipsub[topic].len() == numberOfNodes - 1
|
||||
check node.mesh.hasKey(topic)
|
||||
check node.mesh[topic].len() == numberOfNodes - 1
|
||||
|
||||
# When all nodes unsubscribe from the topic
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the topic should be removed from mesh and gossipsub
|
||||
for node in nodes:
|
||||
check topic notin node.topics
|
||||
check topic notin node.mesh
|
||||
check topic notin node.gossipsub
|
||||
|
||||
asyncTest "handle subscribe and unsubscribe for multiple topics":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topics = @["foobar1", "foobar2", "foobar3"]
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When nodes subscribe to multiple topics
|
||||
await connectNodesStar(nodes)
|
||||
for topic in topics:
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then all nodes should be subscribed to the topics initially
|
||||
for node in nodes:
|
||||
for topic in topics:
|
||||
check node.topics.contains(topic)
|
||||
check node.gossipsub[topic].len() == numberOfNodes - 1
|
||||
check node.mesh[topic].len() == numberOfNodes - 1
|
||||
|
||||
# When they unsubscribe from all topics
|
||||
for topic in topics:
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then topics should be removed from mesh and gossipsub
|
||||
for node in nodes:
|
||||
for topic in topics:
|
||||
check topic notin node.topics
|
||||
check topic notin node.mesh
|
||||
check topic notin node.gossipsub
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foobar"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
|
||||
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
|
||||
)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
|
||||
await n0.stop()
|
||||
await n1.stop()
|
||||
|
||||
# Second part of the hack
|
||||
# Set values so peers can be GRAFTed
|
||||
let newDValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
|
||||
n0.parameters.applyDValues(newDValues)
|
||||
n1.parameters.applyDValues(newDValues)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 1), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
# Then the peers are GRAFTed
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received GRAFT for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not GRAFTed
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "PRUNE messages correctly removes peers from mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
backoff = 1
|
||||
pruneMessage = ControlMessage(
|
||||
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
|
||||
)
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When another PRUNE message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received PRUNE for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
pruneMessage =
|
||||
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
n0.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not PRUNEd
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
875
tests/pubsub/testgossipsubmessagehandling.nim
Normal file
875
tests/pubsub/testgossipsubmessagehandling.nim
Normal file
@@ -0,0 +1,875 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
proc setupTest(): Future[
|
||||
tuple[
|
||||
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
|
||||
]
|
||||
] {.async.} =
|
||||
let nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await nodes[1].switch.connect(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(nodes[0])
|
||||
var gossip1: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
return (gossip0, gossip1, receivedMessages)
|
||||
|
||||
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
|
||||
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
|
||||
|
||||
proc createMessages(
|
||||
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
|
||||
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
|
||||
var iwantMessageIds = newSeq[MessageId]()
|
||||
var sentMessages = initHashSet[seq[byte]]()
|
||||
|
||||
for i, size in enumerate([size1, size2]):
|
||||
let data = newSeqWith(size, i.byte)
|
||||
sentMessages.incl(data)
|
||||
|
||||
let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
|
||||
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
|
||||
iwantMessageIds.add(iwantMessageId)
|
||||
gossip1.mcache.put(iwantMessageId, msg)
|
||||
|
||||
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
|
||||
peer.sentIHaves[^1].incl(iwantMessageId)
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize div 2 + 1
|
||||
let (iwantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let size1 = gossip1.maxMessageSize div 2
|
||||
let size2 = gossip1.maxMessageSize div 3
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let maxSize = gossip1.maxMessageSize
|
||||
let size1 = maxSize div 2
|
||||
let size2 = maxSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
if seqs[0] < seqs[1]:
|
||||
smallestSet.incl(seqs[0])
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "messages are not sent back to source or forwarding peer":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
let (handlerFut0, handler0) = createCompleteHandler()
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are connected in a ring
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
await connectNodes(nodes[2], nodes[0])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
|
||||
let results =
|
||||
await waitForStates(@[handlerFut0, handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isPending()
|
||||
results[1].isCompleted()
|
||||
results[2].isCompleted()
|
||||
|
||||
asyncTest "GossipSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
check topic == "foobar"
|
||||
validatorFut.complete(true)
|
||||
result = ValidationResult.Accept
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) and (await handlerFut)
|
||||
|
||||
asyncTest "GossipSub validation should fail (reject)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
|
||||
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = ValidationResult.Reject
|
||||
validatorFut.complete(true)
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) == true
|
||||
|
||||
asyncTest "GossipSub validation should fail (ignore)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
|
||||
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = ValidationResult.Ignore
|
||||
validatorFut.complete(true)
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) == true
|
||||
|
||||
asyncTest "GossipSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
|
||||
var passed, failed: Future[bool] = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result =
|
||||
if topic == "foo":
|
||||
passed.complete(true)
|
||||
ValidationResult.Accept
|
||||
else:
|
||||
failed.complete(true)
|
||||
ValidationResult.Reject
|
||||
|
||||
nodes[1].addValidator("foo", "bar", validator)
|
||||
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check ((await passed) and (await failed) and (await handlerFut))
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foo" notin gossip1.mesh and gossip1.fanout["foo"].len == 1
|
||||
"foo" notin gossip2.mesh and "foo" notin gossip2.fanout
|
||||
"bar" notin gossip1.mesh and gossip1.fanout["bar"].len == 1
|
||||
"bar" notin gossip2.mesh and "bar" notin gossip2.fanout
|
||||
|
||||
asyncTest "GossipSub's observers should run after message is sent, received and validated":
|
||||
var
|
||||
recvCounter = 0
|
||||
sendCounter = 0
|
||||
validatedCounter = 0
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc recvCounter
|
||||
|
||||
proc onSend(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc sendCounter
|
||||
|
||||
proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) =
|
||||
inc validatedCounter
|
||||
|
||||
let obs0 = PubSubObserver(onSend: onSend)
|
||||
let obs1 = PubSubObserver(onRecv: onRecv, onValidated: onValidated)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].addObserver(obs0)
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = if topic == "foo": ValidationResult.Accept else: ValidationResult.Reject
|
||||
|
||||
nodes[1].addValidator("foo", "bar", validator)
|
||||
|
||||
# Send message that will be accepted by the receiver's validator
|
||||
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 1
|
||||
validatedCounter == 1
|
||||
sendCounter == 1
|
||||
|
||||
# Send message that will be rejected by the receiver's validator
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 2
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check await passed
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
|
||||
let nodes = generateNodes(3, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var cRelayed: Future[void] = newFuture[void]()
|
||||
var bFinished: Future[void] = newFuture[void]()
|
||||
var
|
||||
aReceived = 0
|
||||
cReceived = 0
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
inc aReceived
|
||||
check aReceived < 2
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
inc cReceived
|
||||
check cReceived < 2
|
||||
cRelayed.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
nodes[2].subscribe("foobar", handlerC)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||
|
||||
proc slowValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
try:
|
||||
await cRelayed
|
||||
# Empty A & C caches to detect duplicates
|
||||
gossip1.seen = TimedCache[SaltedId].init()
|
||||
gossip3.seen = TimedCache[SaltedId].init()
|
||||
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||
checkUntilTimeout(
|
||||
try:
|
||||
gossip2.validationSeen[msgId].len > 0
|
||||
except KeyError:
|
||||
false
|
||||
)
|
||||
result = ValidationResult.Accept
|
||||
bFinished.complete()
|
||||
except CatchableError:
|
||||
raiseAssert "err on slowValidator"
|
||||
|
||||
nodes[1].addValidator("foobar", slowValidator)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
gossip1.parameters.floodPublish = true
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
gossip2.parameters.floodPublish = true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check await passed.wait(10.seconds)
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
|
||||
gossip1.parameters.floodPublish = true
|
||||
gossip1.parameters.heartbeatInterval = milliseconds(700)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
|
||||
gossip1.parameters.floodPublish = true
|
||||
gossip1.parameters.heartbeatInterval = milliseconds(700)
|
||||
gossip1.parameters.bandwidthEstimatebps = 0
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1
|
||||
|
||||
await wait(seenFut, 1.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesSparse(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
capture dialer, i:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
try:
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
except KeyError:
|
||||
raiseAssert "seen checked before"
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1
|
||||
|
||||
await wait(seenFut, 60.seconds)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesSparse(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
let dgossip = GossipSub(dialer)
|
||||
dgossip.parameters.dHigh = 2
|
||||
dgossip.parameters.dLow = 1
|
||||
dgossip.parameters.d = 1
|
||||
dgossip.parameters.dOut = 1
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
info "seen up", count = seen.len
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], dialer, "foobar")
|
||||
|
||||
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
|
||||
let publishedTo = nodes[0].publish(
|
||||
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
|
||||
).await
|
||||
check:
|
||||
publishedTo != 0
|
||||
publishedTo != runs
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
asyncTest "GossipSub directPeers: always forward messages":
|
||||
let nodes = generateNodes(3, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[2]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
nodes[1].subscribe("foobar", noop)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut.wait(2.seconds)
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check "foobar" notin GossipSub(nodes[0]).mesh
|
||||
check "foobar" notin GossipSub(nodes[1]).mesh
|
||||
check "foobar" notin GossipSub(nodes[2]).mesh
|
||||
|
||||
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
|
||||
# Given 2 nodes
|
||||
let
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
node0 = nodes[0]
|
||||
node1 = nodes[1]
|
||||
g0 = GossipSub(node0)
|
||||
g1 = GossipSub(node1)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# With message observers
|
||||
var
|
||||
messageReceived0 = newFuture[bool]()
|
||||
messageReceived1 = newFuture[bool]()
|
||||
|
||||
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived0.complete(true)
|
||||
|
||||
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived1.complete(true)
|
||||
|
||||
node0.addObserver(PubSubObserver(onRecv: observer0))
|
||||
node1.addObserver(PubSubObserver(onRecv: observer1))
|
||||
|
||||
# Connect them as direct peers
|
||||
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
|
||||
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
|
||||
|
||||
# When node 0 sends a message
|
||||
let message = "Hello!".toBytes()
|
||||
let publishResult = await node0.publish("foobar", message)
|
||||
|
||||
# None should receive the message as they are not subscribed to the topic
|
||||
let results = await waitForStates(@[messageReceived0, messageReceived1])
|
||||
check:
|
||||
publishResult == 0
|
||||
results[0].isPending()
|
||||
results[1].isPending()
|
||||
|
||||
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
|
||||
# check value before & after decoding equal using protoc cmd tool for reference
|
||||
asyncTest "ControlMessage RPCMsg encoding and decoding":
|
||||
let id: seq[byte] = @[123]
|
||||
let message = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
|
||||
iwant: @[ControlIWant(messageIDs: @[id])],
|
||||
graft: @[ControlGraft(topicID: "foobar")],
|
||||
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
|
||||
idontwant: @[ControlIWant(messageIDs: @[id])],
|
||||
)
|
||||
)
|
||||
)
|
||||
#data encoded using protoc cmd tool
|
||||
let expectedEncoded: seq[byte] =
|
||||
@[
|
||||
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
|
||||
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
|
||||
97, 114, 24, 10, 42, 3, 10, 1, 123,
|
||||
]
|
||||
|
||||
let actualEncoded = encodeRpcMsg(message, true)
|
||||
check:
|
||||
actualEncoded == expectedEncoded
|
||||
|
||||
let actualDecoded = decodeRpcMsg(expectedEncoded).value
|
||||
check:
|
||||
actualDecoded == message
|
||||
409
tests/pubsub/testgossipsubscoring.nim
Normal file
409
tests/pubsub/testgossipsubscoring.nim
Normal file
@@ -0,0 +1,409 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import metrics
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
check (await nodes[0].publish(topic, message)) == 1
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
|
||||
|
||||
await startNodes(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
|
||||
gossip0.subscribe("foobar", handle)
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async: (raw: true).} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
const testDecayInterval = 50.milliseconds
|
||||
gossip.parameters.decayInterval = testDecayInterval
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(testDecayInterval * 5)
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
@@ -1,4 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsub, testgossipsub2, testmcache, testtimedcache, testmessage
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubmeshmanagement,
|
||||
testgossipsubmessagehandling, testgossipsubscoring, testfloodsub, testmcache,
|
||||
testtimedcache, testmessage
|
||||
|
||||
@@ -4,14 +4,15 @@ const
|
||||
libp2p_pubsub_verify {.booldefine.} = true
|
||||
libp2p_pubsub_anonymize {.booldefine.} = false
|
||||
|
||||
import hashes, random, tables, sets, sequtils
|
||||
import chronos, stew/[byteutils, results], chronos/ratelimit
|
||||
import hashes, random, tables, sets, sequtils, sugar
|
||||
import chronos, results, stew/byteutils, chronos/ratelimit
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
protocols/pubsub/errors,
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/gossipsub,
|
||||
protocols/pubsub/floodsub,
|
||||
protocols/pubsub/rpc/messages,
|
||||
@@ -24,7 +25,37 @@ export builders
|
||||
|
||||
randomize()
|
||||
|
||||
type TestGossipSub* = ref object of GossipSub
|
||||
const TEST_GOSSIPSUB_HEARTBEAT_INTERVAL* = 60.milliseconds
|
||||
const HEARTBEAT_TIMEOUT* = # TEST_GOSSIPSUB_HEARTBEAT_INTERVAL + 20%
|
||||
int64(float64(TEST_GOSSIPSUB_HEARTBEAT_INTERVAL.milliseconds) * 1.2).milliseconds
|
||||
|
||||
proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
|
||||
await sleepAsync(HEARTBEAT_TIMEOUT * multiplier)
|
||||
|
||||
type
|
||||
TestGossipSub* = ref object of GossipSub
|
||||
DValues* = object
|
||||
d*: Option[int]
|
||||
dLow*: Option[int]
|
||||
dHigh*: Option[int]
|
||||
dScore*: Option[int]
|
||||
dOut*: Option[int]
|
||||
dLazy*: Option[int]
|
||||
|
||||
proc noop*(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
proc voidTopicHandler*(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc voidPeerHandler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
proc randomPeerId*(): PeerId =
|
||||
try:
|
||||
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
||||
proc getConn(): Future[Connection] {.
|
||||
@@ -45,11 +76,57 @@ proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
||||
onNewPeer(p, pubSubPeer)
|
||||
pubSubPeer
|
||||
|
||||
proc randomPeerId*(): PeerId =
|
||||
try:
|
||||
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
proc setupGossipSubWithPeers*(
|
||||
numPeers: int,
|
||||
topics: seq[string],
|
||||
populateGossipsub: bool = false,
|
||||
populateMesh: bool = false,
|
||||
populateFanout: bool = false,
|
||||
): (TestGossipSub, seq[Connection], seq[PubSubPeer]) =
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
for topic in topics:
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
var peers = newSeq[PubSubPeer]()
|
||||
for i in 0 ..< numPeers:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = voidPeerHandler
|
||||
peers &= peer
|
||||
for topic in topics:
|
||||
if (populateGossipsub):
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
if (populateMesh):
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
if (populateFanout):
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
return (gossipSub, conns, peers)
|
||||
|
||||
proc setupGossipSubWithPeers*(
|
||||
numPeers: int,
|
||||
topic: string,
|
||||
populateGossipsub: bool = false,
|
||||
populateMesh: bool = false,
|
||||
populateFanout: bool = false,
|
||||
): (TestGossipSub, seq[Connection], seq[PubSubPeer]) =
|
||||
return setupGossipSubWithPeers(
|
||||
numPeers, @[topic], populateGossipsub, populateMesh, populateFanout
|
||||
)
|
||||
|
||||
proc teardownGossipSub*(gossipSub: TestGossipSub, conns: seq[Connection]) {.async.} =
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
let mid =
|
||||
@@ -62,6 +139,24 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
$m.data.hash & $m.topic.hash
|
||||
ok mid.toBytes()
|
||||
|
||||
proc applyDValues*(parameters: var GossipSubParams, dValues: Option[DValues]) =
|
||||
if dValues.isNone:
|
||||
return
|
||||
let values = dValues.get
|
||||
# Apply each value if it exists
|
||||
if values.d.isSome:
|
||||
parameters.d = values.d.get
|
||||
if values.dLow.isSome:
|
||||
parameters.dLow = values.dLow.get
|
||||
if values.dHigh.isSome:
|
||||
parameters.dHigh = values.dHigh.get
|
||||
if values.dScore.isSome:
|
||||
parameters.dScore = values.dScore.get
|
||||
if values.dOut.isSome:
|
||||
parameters.dOut = values.dOut.get
|
||||
if values.dLazy.isSome:
|
||||
parameters.dLazy = values.dLazy.get
|
||||
|
||||
proc generateNodes*(
|
||||
num: Natural,
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
@@ -79,6 +174,10 @@ proc generateNodes*(
|
||||
Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
gossipSubVersion: string = "",
|
||||
sendIDontWantOnPublish: bool = false,
|
||||
heartbeatInterval: Duration = TEST_GOSSIPSUB_HEARTBEAT_INTERVAL,
|
||||
floodPublish: bool = false,
|
||||
dValues: Option[DValues] = DValues.none(),
|
||||
gossipFactor: Option[float] = float.none(),
|
||||
): seq[PubSub] =
|
||||
for i in 0 ..< num:
|
||||
let switch = newStandardSwitch(
|
||||
@@ -96,13 +195,16 @@ proc generateNodes*(
|
||||
maxMessageSize = maxMessageSize,
|
||||
parameters = (
|
||||
var p = GossipSubParams.init()
|
||||
p.floodPublish = false
|
||||
p.heartbeatInterval = heartbeatInterval
|
||||
p.floodPublish = floodPublish
|
||||
p.historyLength = 20
|
||||
p.historyGossip = 20
|
||||
p.unsubscribeBackoff = unsubscribeBackoff
|
||||
p.enablePX = enablePX
|
||||
p.overheadRateLimit = overheadRateLimit
|
||||
p.sendIDontWantOnPublish = sendIDontWantOnPublish
|
||||
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
|
||||
applyDValues(p, dValues)
|
||||
p
|
||||
),
|
||||
)
|
||||
@@ -127,13 +229,21 @@ proc generateNodes*(
|
||||
switch.mount(pubsub)
|
||||
result.add(pubsub)
|
||||
|
||||
proc subscribeNodes*(nodes: seq[PubSub]) {.async.} =
|
||||
proc toGossipSub*(nodes: seq[PubSub]): seq[GossipSub] =
|
||||
return nodes.mapIt(GossipSub(it))
|
||||
|
||||
proc connectNodes*[T: PubSub](dialer: T, target: T) {.async.} =
|
||||
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not connect same peer"
|
||||
await dialer.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc connectNodesStar*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
for dialer in nodes:
|
||||
for node in nodes:
|
||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||
await connectNodes(dialer, node)
|
||||
|
||||
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||
proc connectNodesSparse*[T: PubSub](nodes: seq[T], degree: int = 2) {.async.} =
|
||||
if nodes.len < degree:
|
||||
raise
|
||||
(ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
|
||||
@@ -143,18 +253,14 @@ proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||
continue
|
||||
|
||||
for node in nodes:
|
||||
if dialer.switch.peerInfo.peerId != node.peerInfo.peerId:
|
||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||
await connectNodes(dialer, node)
|
||||
|
||||
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
||||
for dialer in nodes:
|
||||
var dialed: seq[PeerId]
|
||||
while dialed.len < nodes.len - 1:
|
||||
let node = sample(nodes)
|
||||
if node.peerInfo.peerId notin dialed:
|
||||
if dialer.peerInfo.peerId != node.peerInfo.peerId:
|
||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||
dialed.add(node.peerInfo.peerId)
|
||||
proc activeWait(
|
||||
interval: Duration, maximum: Moment, timeoutErrorMessage = "Timeout on activeWait"
|
||||
) {.async.} =
|
||||
await sleepAsync(interval)
|
||||
doAssert Moment.now() < maximum, timeoutErrorMessage
|
||||
|
||||
proc waitSub*(sender, receiver: auto, key: string) {.async.} =
|
||||
if sender == receiver:
|
||||
@@ -177,10 +283,14 @@ proc waitSub*(sender, receiver: auto, key: string) {.async.} =
|
||||
)
|
||||
:
|
||||
trace "waitSub sleeping..."
|
||||
await activeWait(5.milliseconds, timeout, "waitSub timeout!")
|
||||
|
||||
# await
|
||||
await sleepAsync(5.milliseconds)
|
||||
doAssert Moment.now() < timeout, "waitSub timeout!"
|
||||
proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
|
||||
let numberOfNodes = nodes.len
|
||||
for x in 0 ..< numberOfNodes:
|
||||
for y in 0 ..< numberOfNodes:
|
||||
if x != y:
|
||||
await waitSub(nodes[x], nodes[y], topic)
|
||||
|
||||
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||
let timeout = Moment.now() + 5.seconds
|
||||
@@ -207,6 +317,200 @@ proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||
if ok == nodes.len:
|
||||
return
|
||||
trace "waitSubGraph sleeping..."
|
||||
await activeWait(5.milliseconds, timeout, "waitSubGraph timeout!")
|
||||
|
||||
await sleepAsync(5.milliseconds)
|
||||
doAssert Moment.now() < timeout, "waitSubGraph timeout!"
|
||||
proc waitForMesh*(
|
||||
sender: auto, receiver: auto, key: string, timeoutDuration = 5.seconds
|
||||
) {.async.} =
|
||||
if sender == receiver:
|
||||
return
|
||||
|
||||
let
|
||||
timeoutMoment = Moment.now() + timeoutDuration
|
||||
gossipsubSender = GossipSub(sender)
|
||||
receiverPeerId = receiver.peerInfo.peerId
|
||||
|
||||
while not gossipsubSender.mesh.hasPeerId(key, receiverPeerId):
|
||||
trace "waitForMesh sleeping..."
|
||||
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")
|
||||
|
||||
type PeerTableType* {.pure.} = enum
|
||||
Gossipsub = "gossipsub"
|
||||
Mesh = "mesh"
|
||||
Fanout = "fanout"
|
||||
|
||||
proc waitForPeersInTable*(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
timeout = 5.seconds,
|
||||
) {.async.} =
|
||||
## Wait until each node in `nodes` has at least the corresponding number of peers from `peerCounts`
|
||||
## in the specified table (mesh, gossipsub, or fanout) for the given topic
|
||||
|
||||
doAssert nodes.len == peerCounts.len, "Node count must match peer count expectations"
|
||||
|
||||
# Helper proc to check current state and update satisfaction status
|
||||
proc checkState(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
satisfied: var seq[bool],
|
||||
): bool =
|
||||
for i in 0 ..< nodes.len:
|
||||
if not satisfied[i]:
|
||||
let fsub = GossipSub(nodes[i])
|
||||
let currentCount =
|
||||
case table
|
||||
of PeerTableType.Mesh:
|
||||
fsub.mesh.getOrDefault(topic).len
|
||||
of PeerTableType.Gossipsub:
|
||||
fsub.gossipsub.getOrDefault(topic).len
|
||||
of PeerTableType.Fanout:
|
||||
fsub.fanout.getOrDefault(topic).len
|
||||
satisfied[i] = currentCount >= peerCounts[i]
|
||||
return satisfied.allIt(it)
|
||||
|
||||
let timeoutMoment = Moment.now() + timeout
|
||||
var
|
||||
satisfied = newSeq[bool](nodes.len)
|
||||
allSatisfied = false
|
||||
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied) # Initial check
|
||||
# Continue checking until all requirements are met or timeout
|
||||
while not allSatisfied:
|
||||
await activeWait(
|
||||
5.milliseconds,
|
||||
timeoutMoment,
|
||||
"Timeout waiting for peer counts in " & $table & " for topic " & topic,
|
||||
)
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied)
|
||||
|
||||
proc startNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
|
||||
|
||||
proc stopNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
|
||||
|
||||
template startNodesAndDeferStop*[T: PubSub](nodes: seq[T]): untyped =
|
||||
await startNodes(nodes)
|
||||
defer:
|
||||
await stopNodes(nodes)
|
||||
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandler: TopicHandler
|
||||
) =
|
||||
for node in nodes:
|
||||
node.subscribe(topic, topicHandler)
|
||||
|
||||
proc unsubscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandler: TopicHandler
|
||||
) =
|
||||
for node in nodes:
|
||||
node.unsubscribe(topic, topicHandler)
|
||||
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandlers: seq[TopicHandler]
|
||||
) =
|
||||
if nodes.len != topicHandlers.len:
|
||||
raise (ref CatchableError)(msg: "nodes and topicHandlers count needs to match!")
|
||||
|
||||
for i in 0 ..< nodes.len:
|
||||
nodes[i].subscribe(topic, topicHandlers[i])
|
||||
|
||||
template tryPublish*(
|
||||
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
|
||||
): untyped =
|
||||
var
|
||||
expiration = Moment.now() + timeout
|
||||
pubs = 0
|
||||
while pubs < require and Moment.now() < expiration:
|
||||
pubs = pubs + call
|
||||
await sleepAsync(wait)
|
||||
|
||||
doAssert pubs >= require, "Failed to publish!"
|
||||
|
||||
proc createCompleteHandler*(): (
|
||||
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
|
||||
) =
|
||||
var fut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
fut.complete(true)
|
||||
|
||||
return (fut, handler)
|
||||
|
||||
proc addIHaveObservers*(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIHaves[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iHave = msgs.control.get.ihave
|
||||
if iHave.len > 0:
|
||||
for msg in iHave:
|
||||
if msg.topicID == topic:
|
||||
receivedIHaves[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
proc addIDontWantObservers*(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIDontWants[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iDontWant = msgs.control.get.idontwant
|
||||
if iDontWant.len > 0:
|
||||
receivedIDontWants[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
# TODO: refactor helper methods from testgossipsub.nim
|
||||
proc setupNodes*(count: int): seq[PubSub] =
|
||||
generateNodes(count, gossip = true)
|
||||
|
||||
proc connectNodes*(nodes: seq[PubSub], target: PubSub) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for node in nodes:
|
||||
node.subscribe("foobar", handler)
|
||||
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc baseTestProcedure*(
|
||||
nodes: seq[PubSub],
|
||||
gossip1: GossipSub,
|
||||
numPeersFirstMsg: int,
|
||||
numPeersSecondMsg: int,
|
||||
) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
block setup:
|
||||
for i in 0 ..< 50:
|
||||
if (await nodes[0].publish("foobar", ("Hello!" & $i).toBytes())) == 19:
|
||||
break setup
|
||||
await sleepAsync(10.milliseconds)
|
||||
check false
|
||||
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](2_500_000))) == numPeersFirstMsg
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_001))) == numPeersSecondMsg
|
||||
|
||||
# Now try with a mesh
|
||||
gossip1.subscribe("foobar", handler)
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.peers("foobar") > 5
|
||||
|
||||
# use a different length so that the message is not equal to the last
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
|
||||
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
@@ -11,8 +11,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/net
|
||||
import tables
|
||||
import chronos, stew/[byteutils, endians2, shims/net]
|
||||
import chronos, stew/[byteutils, endians2]
|
||||
import
|
||||
../../libp2p/[
|
||||
stream/connection,
|
||||
@@ -62,7 +63,8 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
|
||||
var ip: array[4, byte]
|
||||
for i, e in msg[0 ..^ 3]:
|
||||
ip[i] = e
|
||||
$(ipv4(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
$(IpAddress(family: IPv4, address_v4: ip)) & ":" &
|
||||
$(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
of Socks5AddressType.IPv6.byte:
|
||||
let n = 16 + 2 # +2 bytes for the port
|
||||
msg = newSeq[byte](n) # +2 bytes for the port
|
||||
@@ -70,7 +72,8 @@ proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
|
||||
var ip: array[16, byte]
|
||||
for i, e in msg[0 ..^ 3]:
|
||||
ip[i] = e
|
||||
$(ipv6(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
$(IpAddress(family: IPv6, address_v6: ip)) & ":" &
|
||||
$(Port(fromBytesBE(uint16, msg[^2 ..^ 1])))
|
||||
of Socks5AddressType.FQDN.byte:
|
||||
await connSrc.readExactly(addr msg[0], 1)
|
||||
let n = int(uint8.fromBytes(msg[0 .. 0])) + 2 # +2 bytes for the port
|
||||
|
||||
3
tests/testall.nim
Normal file
3
tests/testall.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import testnative, testdaemon, ./pubsub/testpubsub, testinterop
|
||||
45
tests/testbridgestream.nim
Normal file
45
tests/testbridgestream.nim
Normal file
@@ -0,0 +1,45 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import ./helpers
|
||||
import stew/byteutils
|
||||
import ../libp2p/stream/bridgestream
|
||||
|
||||
suite "BridgeStream":
|
||||
asyncTest "send-receive":
|
||||
let (c1, c2) = bridgedConnections()
|
||||
var msg: array[8, byte]
|
||||
|
||||
# c1 -> c2
|
||||
await c1.write("hello c2")
|
||||
await c2.readExactly(addr msg, msg.len)
|
||||
check string.fromBytes(msg) == "hello c2"
|
||||
|
||||
# c2 -> c1
|
||||
await c2.write("hello c1")
|
||||
await c1.readExactly(addr msg, msg.len)
|
||||
check string.fromBytes(msg) == "hello c1"
|
||||
|
||||
await c1.close()
|
||||
await c2.close()
|
||||
|
||||
asyncTest "closing":
|
||||
# closing c1, should also close c2
|
||||
var (c1, c2) = bridgedConnections()
|
||||
await c1.close()
|
||||
expect LPStreamEOFError:
|
||||
await c2.write("hello c1")
|
||||
|
||||
# closing c2, should also close c1
|
||||
(c1, c2) = bridgedConnections()
|
||||
await c2.close()
|
||||
expect LPStreamEOFError:
|
||||
await c1.write("hello c2")
|
||||
@@ -10,7 +10,7 @@
|
||||
# those terms.
|
||||
|
||||
import chronos, stew/byteutils
|
||||
import ../libp2p/stream/bufferstream, ../libp2p/stream/lpstream, ../libp2p/errors
|
||||
import ../libp2p/stream/bufferstream, ../libp2p/stream/lpstream
|
||||
|
||||
import ./helpers
|
||||
|
||||
|
||||
@@ -10,11 +10,9 @@
|
||||
# those terms.
|
||||
|
||||
import std/[sequtils, tables]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos
|
||||
import
|
||||
../libp2p/
|
||||
[connmanager, stream/connection, crypto/crypto, muxers/muxer, peerinfo, errors]
|
||||
import ../libp2p/[connmanager, stream/connection, crypto/crypto, muxers/muxer, peerinfo]
|
||||
|
||||
import helpers
|
||||
|
||||
|
||||
@@ -18,14 +18,14 @@ import
|
||||
discovery/discoverymngr,
|
||||
discovery/rendezvousinterface,
|
||||
]
|
||||
import ./helpers
|
||||
import ./helpers, ./utils/async_tests
|
||||
|
||||
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withAddresses(@[MultiAddress.init(MemoryAutoAddress).tryGet()])
|
||||
.withMemoryTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withRendezVous(rdv)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
import unittest2
|
||||
import nimcrypto/utils
|
||||
import ../libp2p/crypto/[crypto, ecnist]
|
||||
import stew/results
|
||||
import results
|
||||
|
||||
const
|
||||
TestsCount = 10 # number of random tests
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import helpers, commoninterop
|
||||
import ../libp2p
|
||||
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/[relay, client]
|
||||
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/relay
|
||||
|
||||
proc switchMplexCreator(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade): Transport =
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.raises: [LPError].} =
|
||||
@@ -27,7 +27,7 @@ proc switchMplexCreator(
|
||||
|
||||
proc switchYamuxCreator(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade): Transport =
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.raises: [LPError].} =
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user