Compare commits

...

40 Commits

Author SHA1 Message Date
richΛrd
b0f77d24f9 chore(version): update libp2p.nimble to 1.10.0 (#1351) 2025-05-01 05:39:58 -04:00
richΛrd
e32ac492d3 chore: set @vacp2p/p2p team as codeowners of repo (#1352) 2025-05-01 05:03:54 -03:00
Gabriel Cruz
470a7f8cc5 chore: add libp2p CID codec (#1348) 2025-04-27 09:45:40 +00:00
Radosław Kamiński
b269fce289 test(gossipsub): reorganize tests by feature category (#1350) 2025-04-25 16:48:50 +01:00
vladopajic
bc4febe92c fix: git ignore for tests (#1349) 2025-04-24 15:36:46 +02:00
Radosław Kamiński
b5f9bfe0f4 test(gossipsub): optimise heartbeat interval and sleepAsync (#1342) 2025-04-23 18:10:16 +01:00
Gabriel Cruz
4ce1e8119b chore(readme): add gabe as a maintainer (#1346) 2025-04-23 15:57:32 +02:00
Miran
65136b38e2 chore: fix warnings (#1341)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-04-22 19:45:53 +00:00
Gabriel Cruz
ffc114e8d9 chore: fix broken old status-im links (#1332) 2025-04-22 09:14:26 +00:00
Radosław Kamiński
f2be2d6ed5 test: include missing tests in testall (#1338) 2025-04-22 09:45:38 +01:00
Radosław Kamiński
ab690a06a6 test: combine tests (#1335) 2025-04-21 17:39:42 +01:00
Radosław Kamiński
10cdaf14c5 chore(ci): decouple examples from unit tests (#1334) 2025-04-21 16:31:50 +01:00
Radosław Kamiński
ebbfb63c17 chore(test): remove unused flags and simplify testpubsub (#1328) 2025-04-17 13:38:27 +02:00
Álex
ac25da6cea test(gossipsub): message propagation (#1184)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-14 15:49:13 +01:00
Gabriel Cruz
fb41972ba3 chore: rendezvous improvements (#1319) 2025-04-11 13:31:24 +00:00
Richard Ramos
504d1618af fix: bump nim-quic 2025-04-10 17:54:20 -04:00
richΛrd
0f91b23f12 fix: do not use while loop for quic transport errors (#1317) 2025-04-10 21:47:42 +00:00
vladopajic
5ddd62a8b9 chore(git): ignore auto generated test binaries (#1320) 2025-04-10 13:39:04 +00:00
vladopajic
e7f13a7e73 refactor: utilize singe bridgedConnections (#1309) 2025-04-10 08:37:26 -04:00
vladopajic
89e825fb0d fix(quic): continue accept when client certificate is incorrect (#1312) 2025-04-08 21:03:47 +02:00
richΛrd
1b706e84fa chore: bump nim-quic (#1314) 2025-04-08 10:04:31 -04:00
richΛrd
5cafcb70dc chore: remove range checks from rendezvous (#1306) 2025-04-07 12:16:18 +00:00
vladopajic
8c71266058 chore(readme): add quic and memory transports (#1311) 2025-04-04 15:07:31 +00:00
vladopajic
9c986c5c13 feat(transport): add memory transport (#1304) 2025-04-04 15:43:34 +02:00
vladopajic
3d0451d7f2 chore(protocols): remove deprecated utilities (#1305) 2025-04-04 08:44:36 +00:00
richΛrd
b1f65c97ae fix: unsafe string usage (#1308) 2025-04-03 15:33:08 -04:00
vladopajic
5584809fca chore(certificate): update test vectors (#1294) 2025-04-01 17:15:26 +02:00
richΛrd
7586f17b15 fix: set peerId on incoming Quic connection (#1302) 2025-03-31 09:38:30 -04:00
richΛrd
0e16d873c8 feat: withQuicTransport (#1301) 2025-03-30 04:44:49 +00:00
richΛrd
b11acd2118 chore: update quic and expect exception in test (#1300)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-27 12:19:49 -04:00
vladopajic
1376f5b077 chore(quic): add tests with invalid certs (#1297) 2025-03-27 15:19:14 +01:00
richΛrd
340ea05ae5 feat: quic (#1265)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-26 10:17:15 -04:00
vladopajic
024ec51f66 feat(certificate): add date verification (#1299) 2025-03-25 11:50:25 +01:00
richΛrd
efe453df87 refactor: use openssl instead of mbedtls (#1298) 2025-03-24 10:22:52 -04:00
vladopajic
c0f4d903ba feat(certificate): set distinguishable issuer name with peer id (#1296) 2025-03-21 12:38:02 +00:00
vladopajic
28f2b268ae chore(certificate): cosmetics (#1293) 2025-03-19 17:02:14 +00:00
vladopajic
5abb6916b6 feat: X.509 certificate validation (#1292) 2025-03-19 15:40:14 +00:00
richΛrd
e6aec94c0c chore: use token per repo in autobump task (#1288) 2025-03-18 17:12:52 +00:00
vladopajic
9eddc7c662 chore: specify exceptions (#1284) 2025-03-17 13:09:18 +00:00
richΛrd
028c730a4f chore: remove python dependency (#1287) 2025-03-17 08:04:30 -04:00
100 changed files with 5670 additions and 3461 deletions

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @vacp2p/p2p

View File

@@ -96,15 +96,9 @@ jobs:
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
- name: Setup python
run: |
mkdir .venv
python -m venv .venv
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
source .venv/bin/activate
nimble install_pinned
- name: Use gcc 14
@@ -118,8 +112,6 @@ jobs:
- name: Run tests
run: |
source .venv/bin/activate
nim --version
nimble --version
gcc --version

View File

@@ -17,10 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_NIMBUS_ETH2 }}
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NWAKU }}
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NIM_CODEX }}
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -29,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
token: ${{ matrix.target.token }}
- name: Checkout this ref in target repository
run: |

60
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Examples
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
examples:
timeout-minutes: 30
strategy:
fail-fast: false
defaults:
run:
shell: bash
name: "Build Examples"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
shell: bash
os: linux
cpu: amd64
nim_ref: version-1-6
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Build and run examples
run: |
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble examples

8
.gitignore vendored
View File

@@ -17,3 +17,11 @@ examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests).
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
# Second and third rules are here to un-ignores all files with extension and Docker file,
# because it appears that vs code is skipping text search is some tests files without these rules.
tests/**/test*[^.]*
!tests/**/*.*
!tests/**/Dockerfile

View File

@@ -5,11 +5,10 @@ dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
mbedtls;https://github.com/status-im/nim-mbedtls.git@#740fb2f469511adc1772c5cb32395f4076b9e0c5
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
quic;https://github.com/status-im/nim-quic.git@#d54e8f0f2e454604b767fadeae243d95c30c383f
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35

View File

@@ -70,6 +70,8 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
| **Secure Channels** | |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
@@ -147,6 +149,7 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>

View File

@@ -0,0 +1,3 @@
{.used.}
import directchat, tutorial_6_game

View File

@@ -0,0 +1,5 @@
{.used.}
import
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery

View File

@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except:
echo getCurrentException().msg
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData

View File

@@ -158,8 +158,8 @@ waitFor(main())
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)

View File

@@ -17,7 +17,7 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.9.0"
version = "1.10.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -10,9 +10,8 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
"websock", "unittest2",
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc",
"https://github.com/status-im/nim-mbedtls.git"
"websock", "unittest2", "results",
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -26,12 +25,8 @@ let cfg =
import hashes, strutils
proc runTest(
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
) =
proc runTest(filename: string, moreoptions: string = "") =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
@@ -61,51 +56,15 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest(
"pubsub/testpubsub",
sign = false,
verify = false,
moreoptions = "-d:libp2p_pubsub_anonymize=true",
)
task testpubsub_slim, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest(
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
)
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
exec "nimble testnative"
exec "nimble testpubsub"
exec "nimble testdaemon"
exec "nimble testinterop"
runTest("testall")
exec "nimble testfilter"
exec "nimble examples_build"
task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testnative"
exec "nimble testpubsub_slim"
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
@@ -117,18 +76,12 @@ task website, "Build the website":
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
task examples, "Build and run examples":
exec "nimble install -y nimpng"
exec "nimble install -y nico --passNim=--skipParentCfg"
buildSample("tutorial_6_game", false, "--styleCheck:off")
buildSample("examples_build", false, "--styleCheck:off") # build only
buildSample("examples_run", true)
# pin system
# while nimble lockfile

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport],
transports/[transport, tcptransport, quictransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -37,8 +37,11 @@ import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
TransportProvider* {.public.} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise
@@ -151,7 +154,7 @@ proc withTransport*(
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
@@ -162,10 +165,22 @@ proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
MemoryTransport.new(upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
@@ -270,7 +285,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports.add(tProvider(muxedUpgrade, seckey))
transports
if b.secureManagers.len == 0:

View File

@@ -12,8 +12,8 @@
{.push raises: [].}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
import stew/[base58, results]
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
export results
@@ -41,6 +41,7 @@ const ContentIdsList = [
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("libp2p-key"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),

View File

@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import stew/results
import results
export results, utility
# This is workaround for Nim's `import` bug

View File

@@ -18,7 +18,7 @@
{.push raises: [].}
import bearssl/[ec, rand]
import stew/results
import results
from stew/assign2 import assign
export results

View File

@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/[results, ctops]
import stew/ctops
import results
import ../utility

View File

@@ -18,7 +18,8 @@ import constants
import nimcrypto/[hash, sha2]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import stew/[results, ctops]
import results
import stew/ctops
import ../../utility

View File

@@ -11,7 +11,8 @@
{.push raises: [].}
import stew/[endians2, results, ctops]
import stew/[endians2, ctops]
import results
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
dest[0] = cast[byte](value)
1
else:
var s = 0
var res = 0
while v != 0:
v = v shr 7
s += 7
inc(res)
if len(dest) >= res:
var k = 0
while s != 0:
s -= 7
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
inc(k)
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return ok(field)
else:
return err(Asn1Error.NoSupport)
inclass = false
ttag = 0
else:
return err(Asn1Error.NoSupport)

View File

@@ -17,7 +17,8 @@
import bearssl/[rsa, rand, hash]
import minasn1
import stew/[results, ctops]
import results
import stew/ctops
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import bearssl/rand
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
export sha2, results, rand

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import chronos
import stew/results
import results
import peerid, stream/connection, transports/transport
export results
@@ -31,14 +31,14 @@ method connect*(
## a protocol
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method connect*(
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method dial*(
self: Dial, peerId: PeerId, protos: seq[string]
@@ -47,7 +47,7 @@ method dial*(
## existing connection
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dial*(
self: Dial,
@@ -60,14 +60,14 @@ method dial*(
## a connection if one doesn't exist already
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.
base, async: (raises: [DialFailedError, CancelledError])
.} =
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.tryDial] abstract method not implemented!")

View File

@@ -9,8 +9,7 @@
import std/tables
import stew/results
import pkg/[chronos, chronicles, metrics]
import pkg/[chronos, chronicles, metrics, results]
import
dial,

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
import chronos, chronicles, results
import ../errors
type
@@ -59,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr:
raise newException(KeyError, "Attritute not found")
raise newException(KeyError, "Attribute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -86,12 +86,12 @@ type
method request*(
self: DiscoveryInterface, pa: PeerAttributes
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
doAssert(false, "Not implemented!")
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
method advertise*(
self: DiscoveryInterface
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
doAssert(false, "Not implemented!")
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
type
DiscoveryQuery* = ref object

View File

@@ -26,14 +26,14 @@ proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(
self: RendezVousInterface, pa: PeerAttributes
) {.async: (raises: [DiscoveryError, CancelledError]).} =
var namespace = ""
var namespace = Opt.none(string)
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = string attr.to(RdvNamespace)
namespace = Opt.some(string attr.to(RdvNamespace))
elif attr.ofType(DiscoveryService):
namespace = string attr.to(DiscoveryService)
namespace = Opt.some(string attr.to(DiscoveryService))
elif attr.ofType(PeerId):
namespace = $attr.to(PeerId)
namespace = Opt.some($attr.to(PeerId))
else:
# unhandled type
return
@@ -44,8 +44,8 @@ method request*(
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
peer.add(DiscoveryService(namespace.get()))
peer.add(RdvNamespace(namespace.get()))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)

View File

@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
## IPv6 validateBuffer() implementation.
pathValidateBufferNoSlash(vb)
proc memoryStB(s: string, vb: var VBuffer): bool =
## Memory stringToBuffer() implementation.
pathStringToBuffer(s, vb)
proc memoryBtS(vb: var VBuffer, s: var string): bool =
## Memory bufferToString() implementation.
pathBufferToString(vb, s)
proc memoryVB(vb: var VBuffer): bool =
## Memory validateBuffer() implementation.
pathValidateBuffer(vb)
proc portStB(s: string, vb: var VBuffer): bool =
## Port number stringToBuffer() implementation.
var port: array[2, byte]
@@ -355,6 +367,10 @@ const
)
TranscoderDNS* =
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
TranscoderMemory* = Transcoder(
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
)
ProtocolsList = [
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
@@ -393,6 +409,9 @@ const
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
MAProtocol(
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
),
]
DNSANY* = mapEq("dns")
@@ -453,6 +472,8 @@ const
CircuitRelay* = mapEq("p2p-circuit")
Memory* = mapEq("memory")
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
for item in ProtocolsList:
result[item.mcodec] = item

View File

@@ -16,7 +16,8 @@
{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
import results
import stew/[base32, base58, base64]
type
MultiBaseStatus* {.pure.} = enum

View File

@@ -13,7 +13,7 @@
import tables, hashes
import vbuffer
import stew/results
import results
export results
## List of officially supported codecs can BE found here
@@ -396,6 +396,7 @@ const MultiCodecList = [
("onion3", 0x01BD),
("p2p-circuit", 0x0122),
("libp2p-peer-record", 0x0301),
("memory", 0x0309),
("dns", 0x35),
("dns4", 0x36),
("dns6", 0x37),
@@ -403,6 +404,7 @@ const MultiCodecList = [
# IPLD formats
("dag-pb", 0x70),
("dag-cbor", 0x71),
("libp2p-key", 0x72),
("dag-json", 0x129),
("git-raw", 0x78),
("eth-block", 0x90),

View File

@@ -27,7 +27,7 @@ import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
import varint, vbuffer, multicodec, multibase
import stew/base58
import stew/results
import results
export results
# This is workaround for Nim `import` bug.
export sha, sha2, keccak, blake2, hash, utils

View File

@@ -52,7 +52,7 @@ method newStream*(
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("Not implemented!")
raiseAssert("[Muxer.newStream] abstract method not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
@@ -68,4 +68,4 @@ proc new*(
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
raiseAssert("Not implemented!")
raiseAssert("[Muxer.getStreams] abstract method not implemented!")

View File

@@ -22,7 +22,7 @@ method resolveTxt*(
self: NameResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
## Get TXT record
raiseAssert "Not implemented!"
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
method resolveIp*(
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
@@ -30,7 +30,7 @@ method resolveIp*(
async: (raises: [CancelledError, TransportAddressError]), base
.} =
## Resolve the specified address
raiseAssert "Not implemented!"
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
proc getHostname*(ma: MultiAddress): string =
let

View File

@@ -14,7 +14,8 @@
import
std/[hashes, strutils],
stew/[base58, results],
stew/base58,
results,
chronicles,
nimcrypto/utils,
utility,

View File

@@ -11,7 +11,7 @@
{.push public.}
import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import pkg/[chronos, chronicles, results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
export peerid, multiaddress, crypto, routing_record, errors, results

View File

@@ -160,10 +160,10 @@ proc updatePeerInfo*(
peerStore[KeyBook][info.peerId] = pubkey
info.agentVersion.withValue(agentVersion):
peerStore[AgentBook][info.peerId] = agentVersion.string
peerStore[AgentBook][info.peerId] = agentVersion
info.protoVersion.withValue(protoVersion):
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
peerStore[ProtoVersionBook][info.peerId] = protoVersion
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos

View File

@@ -11,7 +11,7 @@
{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
import ../varint, ../utility, stew/endians2, results
export results, utility
{.push public.}

View File

@@ -96,7 +96,7 @@ method dialMe*(
of ResponseStatus.Ok:
try:
response.ma.tryGet()
except:
except ResultError[void]:
raiseAssert("checked with if")
of ResponseStatus.DialError:
raise newException(

View File

@@ -9,8 +9,8 @@
{.push raises: [].}
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import ../../../multiaddress, ../../../peerid, ../../../errors
import ../../../protobuf/minprotobuf

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/results
import results
import chronos, chronicles
import
../../protocol,

View File

@@ -10,7 +10,8 @@
{.push raises: [].}
import macros
import stew/[objects, results]
import stew/objects
import results
import ../../../peerinfo, ../../../signed_envelope
import ../../../protobuf/minprotobuf

View File

@@ -13,8 +13,7 @@
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import results, chronos, chronicles
import
../protobuf/minprotobuf,
../peerinfo,

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import chronos, stew/results
import chronos, results
import ../stream/connection
export results
@@ -66,21 +66,6 @@ template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
p.handlerImpl = handler
# Callbacks that are annotated with `{.async: (raises).}` explicitly
# document the types of errors that they may raise, but are not compatible
# with `LPProtoHandler` and need to use a custom `proc` type.
# They are internally wrapped into a `LPProtoHandler`, but still allow the
# compiler to check that their `{.async: (raises).}` annotation is correct.
# https://github.com/nim-lang/Nim/issues/23432
func `handler=`*[E](
p: LPProtocol,
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
) {.deprecated: "Use `LPProtoHandler` that explicitly specifies raised exceptions.".} =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
p.handlerImpl = wrap
proc new*(
T: type LPProtocol,
codecs: seq[string],
@@ -96,17 +81,3 @@ proc new*(
else:
maxIncomingStreams,
)
proc new*[E](
T: type LPProtocol,
codecs: seq[string],
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
maxIncomingStreams: Opt[int] | int = Opt.none(int),
): T {.
deprecated:
"Use `new` with `LPProtoHandler` that explicitly specifies raised exceptions."
.} =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
T.new(codec, wrap, maxIncomingStreams)

View File

@@ -11,7 +11,7 @@
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects, results]
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protocol,
../protobuf/minprotobuf,
@@ -37,6 +37,9 @@ const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
@@ -61,7 +64,7 @@ type
Cookie = object
offset: uint64
ns: string
ns: Opt[string]
Register = object
ns: string
@@ -77,7 +80,7 @@ type
ns: string
Discover = object
ns: string
ns: Opt[string]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
@@ -98,7 +101,8 @@ type
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
result.write(2, c.ns)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode(r: Register): ProtoBuffer =
@@ -125,7 +129,8 @@ proc encode(u: Unregister): ProtoBuffer =
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
@@ -159,13 +164,17 @@ proc encode(msg: Message): ProtoBuffer =
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var c: Cookie
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
@@ -217,13 +226,16 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, d.ns)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
@@ -413,10 +425,10 @@ proc save(
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len notin 1 .. 255:
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl notin rdv.minTTL .. rdv.maxTTL:
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
@@ -444,7 +456,7 @@ proc discover(
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.len notin 0 .. 255:
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
@@ -457,20 +469,19 @@ proc discover(
return
else:
Cookie(offset: rdv.registered.low().uint64 - 1)
if cookie.ns != d.ns or
cookie.offset notin rdv.registered.low().uint64 .. rdv.registered.high().uint64:
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get() or
cookie.offset < rdv.registered.low().uint64 or
cookie.offset > rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let
nsSalted = d.ns & rdv.salt
namespaces =
if d.ns != "":
try:
rdv.namespaces[nsSalted]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(cookie.offset.int .. rdv.registered.high())
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
@@ -514,15 +525,15 @@ proc advertisePeer(
rdv.sema.release()
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len notin 1 .. 255:
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl notin rdv.minDuration .. rdv.maxDuration:
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
@@ -537,7 +548,7 @@ proc advertise*(
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer)
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
@@ -561,7 +572,7 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
@[]
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int, peers: seq[PeerId]
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
@@ -570,7 +581,7 @@ proc request*(
if l <= 0 or l > DiscoverLimit.int:
raise newException(AdvertiseError, "Invalid limit")
if ns.len notin 0 .. 255:
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
@@ -582,15 +593,18 @@ proc request*(
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
try:
Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(65536)
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
@@ -604,12 +618,14 @@ proc request*(
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][ns] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
@@ -632,8 +648,9 @@ proc request*(
else:
s[pr.peerId] = (pr, r)
limit.dec()
for (_, r) in s.values():
rdv.save(ns, peer, r, false)
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
@@ -652,10 +669,15 @@ proc request*(
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
@@ -668,7 +690,7 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len notin 1 .. 255:
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
@@ -688,7 +710,7 @@ proc unsubscribe*(
for peer in peerIds:
unsubscribePeer(peer)
discard await allFutures(futs).withTimeout(5.seconds)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
@@ -784,8 +806,10 @@ proc new*(
rdv.setup(switch)
return rdv
proc deletesRegister(rdv: RendezVous) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", 1.minutes:
proc deletesRegister(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)

View File

@@ -20,7 +20,6 @@ import ../../peerid
import ../../peerinfo
import ../../protobuf/minprotobuf
import ../../utility
import ../../errors
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]

View File

@@ -11,15 +11,14 @@
{.push raises: [].}
import std/[strformat]
import stew/results
import results
import chronos, chronicles
import
../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
../../peerinfo
export protocol, results
@@ -82,7 +81,7 @@ method readMessage*(
): Future[seq[byte]] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[SecureConn.readMessage] abstract method not implemented!")
method getWrapped*(s: SecureConn): Connection =
s.stream
@@ -92,7 +91,7 @@ method handshake*(
): Future[SecureConn] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[Secure.handshake] abstract method not implemented!")
proc handleConn(
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/[sequtils, times]
import pkg/stew/results
import pkg/results
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
export peerid, multiaddress, signed_envelope

View File

@@ -10,8 +10,8 @@
{.push raises: [].}
import std/sequtils
import stew/[byteutils, results, endians2]
import chronos, chronos/transports/[osnet, ipnet], chronicles
import stew/endians2
import chronos, chronos/transports/[osnet, ipnet], chronicles, results
import ../[multiaddress, multicodec]
import ../switch
@@ -73,7 +73,6 @@ proc new*(
return T(networkInterfaceProvider: networkInterfaceProvider)
proc getProtocolArgument*(ma: MultiAddress, codec: MultiCodec): MaResult[seq[byte]] =
var buffer: seq[byte]
for item in ma:
let
ritem = ?item

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/sugar
import pkg/stew/[results, byteutils]
import pkg/stew/byteutils, pkg/results
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
export crypto

View File

@@ -0,0 +1,63 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import pkg/chronos
import connection, bufferstream
export connection
type
WriteHandler = proc(data: seq[byte]): Future[void] {.
async: (raises: [CancelledError, LPStreamError])
.}
BridgeStream* = ref object of BufferStream
writeHandler: WriteHandler
closeHandler: proc(): Future[void] {.async: (raises: []).}
method write*(
s: BridgeStream, msg: seq[byte]
): Future[void] {.public, async: (raises: [CancelledError, LPStreamError], raw: true).} =
s.writeHandler(msg)
method closeImpl*(s: BridgeStream): Future[void] {.async: (raises: [], raw: true).} =
if not isNil(s.closeHandler):
discard s.closeHandler()
procCall BufferStream(s).closeImpl()
method getWrapped*(s: BridgeStream): Connection =
nil
proc bridgedConnections*(
closeTogether: bool = true, dirA = Direction.In, dirB = Direction.In
): (BridgeStream, BridgeStream) =
let connA = BridgeStream()
let connB = BridgeStream()
connA.dir = dirA
connB.dir = dirB
connA.initStream()
connB.initStream()
connA.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connB.pushData(data)
connB.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connA.pushData(data)
if closeTogether:
connA.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connB.close()
connB.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connA.close()
return (connA, connB)

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[strformat]
import stew/results
import results
import chronos, chronicles, metrics
import connection
import ../utility

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[hashes, oids, strformat]
import stew/results
import results
import chronicles, chronos, metrics
import lpstream, ../multiaddress, ../peerinfo, ../errors
@@ -124,7 +124,7 @@ proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
return
method getWrapped*(s: Connection): Connection {.base.} =
raiseAssert("Not implemented!")
raiseAssert("[Connection.getWrapped] abstract method not implemented!")
when defined(libp2p_agents_metrics):
proc setShortAgent*(s: Connection, shortAgent: string) =

View File

@@ -133,7 +133,7 @@ method readOnce*(
## Reads whatever is available in the stream,
## up to `nbytes`. Will block if nothing is
## available
raiseAssert("Not implemented!")
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
proc readExactly*(
s: LPStream, pbytes: pointer, nbytes: int
@@ -242,7 +242,7 @@ method write*(
async: (raises: [CancelledError, LPStreamError], raw: true), base, public
.} =
# Write `msg` to stream, waiting for the write to be finished
raiseAssert("Not implemented!")
raiseAssert("[LPStream.write] abstract method not implemented!")
proc writeLp*(
s: LPStream, msg: openArray[byte]

View File

@@ -77,7 +77,7 @@ method setup*(
return true
method run*(self: Service, switch: Switch) {.base, async: (raises: [CancelledError]).} =
doAssert(false, "Not implemented!")
doAssert(false, "[Service.run] abstract method not implemented!")
method stop*(
self: Service, switch: Switch

View File

@@ -0,0 +1,122 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import locks
import tables
import pkg/chronos
import pkg/chronicles
import ./transport
import ../multiaddress
import ../stream/connection
import ../stream/bridgestream
type
MemoryTransportError* = object of transport.TransportError
MemoryTransportAcceptStopped* = object of MemoryTransportError
type MemoryListener* = object
address: string
accept: Future[Connection]
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].}
proc init(
_: type[MemoryListener],
address: string,
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].},
): MemoryListener =
return MemoryListener(
accept: newFuture[Connection]("MemoryListener.accept"),
address: address,
onListenerEnd: onListenerEnd,
)
proc close*(self: MemoryListener) =
if not (self.accept.finished):
self.accept.fail(newException(MemoryTransportAcceptStopped, "Listener closed"))
self.onListenerEnd(self.address)
proc accept*(
self: MemoryListener
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
return self.accept
proc dial*(
self: MemoryListener
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
let (connA, connB) = bridgedConnections()
self.onListenerEnd(self.address)
self.accept.complete(connA)
let dFut = newFuture[Connection]("MemoryListener.dial")
dFut.complete(connB)
return dFut
type memoryConnManager = ref object
listeners: Table[string, MemoryListener]
connections: Table[string, Connection]
lock: Lock
proc init(_: type[memoryConnManager]): memoryConnManager =
var m = memoryConnManager()
initLock(m.lock)
return m
proc onListenerEnd(
self: memoryConnManager
): proc(address: string) {.closure, gcsafe, raises: [].} =
proc cb(address: string) {.closure, gcsafe, raises: [].} =
acquire(self.lock)
defer:
release(self.lock)
try:
if address in self.listeners:
self.listeners.del(address)
except KeyError:
raiseAssert "checked with if"
return cb
proc accept*(
self: memoryConnManager, address: string
): MemoryListener {.raises: [MemoryTransportError].} =
acquire(self.lock)
defer:
release(self.lock)
if address in self.listeners:
raise newException(MemoryTransportError, "Memory address already in use")
let listener = MemoryListener.init(address, self.onListenerEnd())
self.listeners[address] = listener
return listener
proc dial*(
self: memoryConnManager, address: string
): MemoryListener {.raises: [MemoryTransportError].} =
acquire(self.lock)
defer:
release(self.lock)
if address notin self.listeners:
raise newException(MemoryTransportError, "No memory listener found")
try:
return self.listeners[address]
except KeyError:
raiseAssert "checked with if"
let instance: memoryConnManager = memoryConnManager.init()
proc getInstance*(): memoryConnManager {.gcsafe.} =
{.gcsafe.}:
instance

View File

@@ -0,0 +1,127 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## Memory transport implementation
import std/sequtils
import pkg/chronos
import pkg/chronicles
import ./transport
import ../multiaddress
import ../stream/connection
import ../crypto/crypto
import ../upgrademngrs/upgrade
import ./memorymanager
export connection
export MemoryTransportError, MemoryTransportAcceptStopped
const MemoryAutoAddress* = "/memory/*"
logScope:
topics = "libp2p memorytransport"
type MemoryTransport* = ref object of Transport
rng: ref HmacDrbgContext
connections: seq[Connection]
listener: Opt[MemoryListener]
proc new*(
T: typedesc[MemoryTransport],
upgrade: Upgrade = Upgrade(),
rng: ref HmacDrbgContext = newRng(),
): T =
T(upgrader: upgrade, rng: rng)
proc listenAddress(self: MemoryTransport, ma: MultiAddress): MultiAddress =
if $ma != MemoryAutoAddress:
return ma
# when special address is used `/memory/*` use any free address.
# here we assume that any random generated address will be free.
var randomBuf: array[10, byte]
hmacDrbgGenerate(self.rng[], randomBuf)
return MultiAddress.init("/memory/" & toHex(randomBuf)).get()
method start*(
self: MemoryTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
if self.running:
return
trace "starting memory transport on addrs", address = $addrs
self.addrs = addrs.mapIt(self.listenAddress(it))
self.running = true
method stop*(self: MemoryTransport) {.async: (raises: []).} =
if not self.running:
return
trace "stopping memory transport", address = $self.addrs
self.running = false
# closing listener will throw interruption error to caller of accept()
let listener = self.listener
if listener.isSome:
listener.get().close()
# end all connections
await noCancel allFutures(self.connections.mapIt(it.close()))
method accept*(
self: MemoryTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
if not self.running:
raise newException(MemoryTransportError, "Transport closed, no more connections!")
var listener: MemoryListener
try:
listener = getInstance().accept($self.addrs[0])
self.listener = Opt.some(listener)
let conn = await listener.accept()
self.connections.add(conn)
self.listener = Opt.none(MemoryListener)
return conn
except CancelledError as e:
listener.close()
raise e
except MemoryTransportError as e:
raise e
except CatchableError:
raiseAssert "should never happen"
method dial*(
self: MemoryTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
try:
let listener = getInstance().dial($ma)
let conn = await listener.dial()
self.connections.add(conn)
return conn
except CancelledError as e:
raise e
except MemoryTransportError as e:
raise e
except CatchableError:
raiseAssert "should never happen"
proc dial*(
self: MemoryTransport, ma: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)
): Future[Connection] {.gcsafe.} =
self.dial("", ma)
method handles*(self: MemoryTransport, ma: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(self).handles(ma):
if ma.protocols.isOk:
return Memory.match(ma)

View File

@@ -2,6 +2,7 @@ import std/sequtils
import pkg/chronos
import pkg/chronicles
import pkg/quic
import results
import ../multiaddress
import ../multicodec
import ../stream/connection
@@ -9,6 +10,7 @@ import ../wire
import ../muxers/muxer
import ../upgrademngrs/upgrade
import ./transport
import tls/certificate
export multiaddress
export multicodec
@@ -23,6 +25,9 @@ type
QuicConnection = quic.Connection
QuicTransportError* = object of transport.TransportError
QuicTransportDialError* = object of transport.TransportDialError
QuicTransportAcceptStopped* = object of QuicTransportError
const alpn = "libp2p"
# Stream
type QuicStream* = ref object of P2PConnection
@@ -81,15 +86,19 @@ method close*(session: QuicSession) {.async: (raises: []).} =
proc getStream*(
session: QuicSession, direction = Direction.In
): Future[QuicStream] {.async: (raises: [CatchableError]).} =
var stream: Stream
case direction
of Direction.In:
stream = await session.connection.incomingStream()
of Direction.Out:
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
return QuicStream.new(stream, session.observedAddr, session.peerId)
): Future[QuicStream] {.async: (raises: [QuicTransportError]).} =
try:
var stream: Stream
case direction
of Direction.In:
stream = await session.connection.incomingStream()
of Direction.Out:
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
return QuicStream.new(stream, session.observedAddr, session.peerId)
except CatchableError as exc:
# TODO: incomingStream is using {.async.} with no raises
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
@@ -131,19 +140,65 @@ method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
method close*(m: QuicMuxer) {.async: (raises: []).} =
try:
await m.quicSession.close()
m.handleFut.cancel()
m.handleFut.cancelSoon()
except CatchableError as exc:
discard
# Transport
type QuicUpgrade = ref object of Upgrade
type CertGenerator =
proc(kp: KeyPair): CertificateX509 {.gcsafe, raises: [TLSCertificateError].}
type QuicTransport* = ref object of Transport
listener: Listener
client: QuicClient
privateKey: PrivateKey
connections: seq[P2PConnection]
rng: ref HmacDrbgContext
certGenerator: CertGenerator
func new*(_: type QuicTransport, u: Upgrade): QuicTransport =
QuicTransport(upgrader: QuicUpgrade(ms: u.ms))
proc makeCertificateVerifier(): CertificateVerifier =
proc certificateVerifier(serverName: string, certificatesDer: seq[seq[byte]]): bool =
if certificatesDer.len != 1:
trace "CertificateVerifier: expected one certificate in the chain",
cert_count = certificatesDer.len
return false
let cert =
try:
parse(certificatesDer[0])
except CertificateParsingError as e:
trace "CertificateVerifier: failed to parse certificate", msg = e.msg
return false
return cert.verify()
return CustomCertificateVerifier.init(certificateVerifier)
proc defaultCertGenerator(
kp: KeyPair
): CertificateX509 {.gcsafe, raises: [TLSCertificateError].} =
return generateX509(kp, encodingFormat = EncodingFormat.PEM)
proc new*(_: type QuicTransport, u: Upgrade, privateKey: PrivateKey): QuicTransport =
return QuicTransport(
upgrader: QuicUpgrade(ms: u.ms),
privateKey: privateKey,
certGenerator: defaultCertGenerator,
)
proc new*(
_: type QuicTransport,
u: Upgrade,
privateKey: PrivateKey,
certGenerator: CertGenerator,
): QuicTransport =
return QuicTransport(
upgrader: QuicUpgrade(ms: u.ms),
privateKey: privateKey,
certGenerator: certGenerator,
)
method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises: [].} =
if not procCall Transport(transport).handles(address):
@@ -155,12 +210,32 @@ method start*(
) {.async: (raises: [LPError, transport.TransportError]).} =
doAssert self.listener.isNil, "start() already called"
#TODO handle multiple addr
let pubkey = self.privateKey.getPublicKey().valueOr:
doAssert false, "could not obtain public key"
return
try:
self.listener = listen(initTAddress(addrs[0]).tryGet)
if self.rng.isNil:
self.rng = newRng()
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
self.client = QuicClient.init(tlsConfig, rng = self.rng)
self.listener =
QuicServer.init(tlsConfig, rng = self.rng).listen(initTAddress(addrs[0]).tryGet)
await procCall Transport(self).start(addrs)
self.addrs[0] =
MultiAddress.init(self.listener.localAddress(), IPPROTO_UDP).tryGet() &
MultiAddress.init("/quic-v1").get()
except QuicConfigError as exc:
doAssert false, "invalid quic setup: " & $exc.msg
except TLSCertificateError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
except QuicError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
except TransportOsError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
self.running = true
@@ -174,50 +249,69 @@ method stop*(transport: QuicTransport) {.async: (raises: []).} =
await transport.listener.stop()
except CatchableError as exc:
trace "Error shutting down Quic transport", description = exc.msg
transport.listener.destroy()
transport.running = false
transport.listener = nil
proc wrapConnection(
transport: QuicTransport, connection: QuicConnection
): P2PConnection {.raises: [Defect, TransportOsError, LPError].} =
): QuicSession {.raises: [TransportOsError, MaError].} =
let
remoteAddr = connection.remoteAddress()
observedAddr =
MultiAddress.init(remoteAddr, IPPROTO_UDP).get() &
MultiAddress.init("/quic-v1").get()
conres = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
conres.initStream()
session = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
session.initStream()
transport.connections.add(session)
transport.connections.add(conres)
proc onClose() {.async: (raises: []).} =
await noCancel conres.join()
transport.connections.keepItIf(it != conres)
await noCancel session.join()
transport.connections.keepItIf(it != session)
trace "Cleaned up client"
asyncSpawn onClose()
return conres
return session
method accept*(
self: QuicTransport
): Future[P2PConnection] {.async: (raises: [transport.TransportError, CancelledError]).} =
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
doAssert not self.listener.isNil, "call start() before calling accept()"
if not self.running:
# stop accept only when transport is stopped (not when error occurs)
raise newException(QuicTransportAcceptStopped, "Quic transport stopped")
try:
let connection = await self.listener.accept()
return self.wrapConnection(connection)
except CancelledError as e:
raise e
except CatchableError as e:
raise (ref QuicTransportError)(msg: e.msg, parent: e)
except CancelledError as exc:
raise exc
except QuicError as exc:
debug "Quic Error", description = exc.msg
except MaError as exc:
debug "Multiaddr Error", description = exc.msg
except CatchableError as exc: # TODO: removing this requires async/raises in nim-quic
info "Unexpected error accepting quic connection", description = exc.msg
except TransportOsError as exc:
debug "OS Error", description = exc.msg
method dial*(
self: QuicTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[P2PConnection] {.async: (raises: [transport.TransportError, CancelledError]).} =
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
try:
let connection = await dial(initTAddress(address).tryGet)
return self.wrapConnection(connection)
let quicConnection = await self.client.dial(initTAddress(address).tryGet)
return self.wrapConnection(quicConnection)
except CancelledError as e:
raise e
except CatchableError as e:
@@ -227,8 +321,13 @@ method upgrade*(
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
): Future[Muxer] {.async: (raises: [CancelledError, LPError]).} =
let qs = QuicSession(conn)
if peerId.isSome:
qs.peerId = peerId.get()
qs.peerId =
if peerId.isSome:
peerId.get()
else:
let certificates = qs.connection.certificates()
let cert = parse(certificates[0])
cert.peerId()
let muxer = QuicMuxer(quicSession: qs, connection: conn)
muxer.streamHandler = proc(conn: P2PConnection) {.async: (raises: []).} =

View File

@@ -0,0 +1,962 @@
#include <openssl/bn.h>
#include <openssl/ec.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/objects.h>
#include <openssl/pem.h>
#include <openssl/rand.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
#include <openssl/core_names.h>
#include <openssl/param_build.h>
#include <openssl/types.h>
#else
#include <openssl/rand_drbg.h>
#endif
#include "certificate.h"
#define LIBP2P_OID "1.3.6.1.4.1.53594.1.1"
struct cert_context_s {
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
OSSL_LIB_CTX *lib_ctx; /* OpenSSL library context */
EVP_RAND_CTX *drbg; /* DRBG context */
#else
RAND_DRBG *drbg;
#endif
};
struct cert_key_s {
EVP_PKEY *pkey; /* OpenSSL EVP_PKEY */
};
// Function to initialize CTR_DRBG
cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
cert_context_t *ctx) {
if (!seed) {
return CERT_ERROR_NULL_PARAM;
}
// Allocate context
struct cert_context_s *c = calloc(1, sizeof(struct cert_context_s));
if (c == NULL) {
return CERT_ERROR_MEMORY;
}
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_RAND_CTX *drbg = NULL;
EVP_RAND *rand_algo = NULL;
OSSL_LIB_CTX *libctx = OSSL_LIB_CTX_new(); // Create a new library context
if (!libctx) {
return CERT_ERROR_MEMORY;
}
rand_algo = EVP_RAND_fetch(libctx, "CTR-DRBG", NULL);
if (!rand_algo) {
return CERT_ERROR_DRBG_INIT;
}
drbg = EVP_RAND_CTX_new(rand_algo, NULL);
EVP_RAND_free(rand_algo); // Free the algorithm object, no longer needed
if (!drbg) {
return CERT_ERROR_MEMORY;
}
OSSL_PARAM params[2];
params[0] = OSSL_PARAM_construct_utf8_string(OSSL_DRBG_PARAM_CIPHER,
"AES-256-CTR", 0);
params[1] = OSSL_PARAM_construct_end();
if (!EVP_RAND_CTX_set_params(drbg, params)) {
RANDerr(0, RAND_R_ERROR_INITIALISING_DRBG);
EVP_RAND_CTX_free(drbg);
return CERT_ERROR_DRBG_CONFIG;
}
int res = EVP_RAND_instantiate(drbg, 0, 0, (const unsigned char *)seed,
seed_len, NULL);
if (res != 1) {
EVP_RAND_CTX_free(drbg);
return CERT_ERROR_DRBG_SEED;
}
c->lib_ctx = libctx;
c->drbg = drbg;
#else
RAND_DRBG *drbg = RAND_DRBG_new(NID_aes_256_ctr, 0, NULL);
if (!drbg)
return CERT_ERROR_DRBG_INIT;
if (RAND_DRBG_instantiate(drbg, (const unsigned char *)seed, seed_len) != 1) {
RAND_DRBG_free(drbg);
return CERT_ERROR_DRBG_SEED;
}
c->drbg = drbg;
#endif
*ctx = c;
return CERT_SUCCESS;
}
// Function to free CTR_DRBG context resources
void cert_free_ctr_drbg(cert_context_t ctx) {
if (ctx == NULL)
return;
struct cert_context_s *c = (struct cert_context_s *)ctx;
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_RAND_CTX_free(c->drbg);
OSSL_LIB_CTX_free(c->lib_ctx);
#else
RAND_DRBG_free(c->drbg);
#endif
free(c);
}
// Function to ensure the libp2p OID is registered
int ensure_libp2p_oid() {
int nid = OBJ_txt2nid(LIBP2P_OID);
if (nid == NID_undef) {
// OID not yet registered, create it
nid = OBJ_create(LIBP2P_OID, "libp2p_tls", "libp2p TLS extension");
if (!nid) {
return CERT_ERROR_NID;
}
}
return nid;
}
// Function to generate a key
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out) {
unsigned char priv_key_bytes[32]; // 256 bits for secp256r1
BIGNUM *priv_bn = NULL;
EVP_PKEY *pkey = NULL;
cert_error_t ret_code = CERT_SUCCESS;
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_PKEY_CTX *pctx;
#else
EC_KEY *ec_key = NULL;
#endif
if (ctx == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Allocate key structure
struct cert_key_s *key = calloc(1, sizeof(struct cert_key_s));
if (key == NULL) {
return CERT_ERROR_MEMORY;
}
// Generate random bytes for private key using our RNG
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (EVP_RAND_generate(ctx->drbg, priv_key_bytes, sizeof(priv_key_bytes), 0, 0,
NULL, 0) <= 0) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#else
if (RAND_DRBG_bytes(ctx->drbg, priv_key_bytes, sizeof(priv_key_bytes)) != 1) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#endif
// Convert bytes to BIGNUM for private key
priv_bn = BN_bin2bn(priv_key_bytes, sizeof(priv_key_bytes), NULL);
if (!priv_bn) {
ret_code = CERT_ERROR_BIGNUM_CONV;
goto cleanup;
}
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL);
if (!pctx) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
if (EVP_PKEY_keygen_init(pctx) <= 0) {
ret_code = CERT_ERROR_INIT_KEYGEN;
goto cleanup;
}
if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, NID_X9_62_prime256v1) <= 0) {
fprintf(stderr, "Error setting curve\n");
ret_code = CERT_ERROR_SET_CURVE;
goto cleanup;
}
// Generate the public key from the private key
if (EVP_PKEY_keygen(pctx, &pkey) <= 0) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
#else
// Create EC key from random bytes
ec_key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!ec_key) {
ret_code = CERT_ERROR_ECKEY_GEN;
goto cleanup;
}
// Set private key and compute public key
if (!EC_KEY_set_private_key(ec_key, priv_bn)) {
ret_code = CERT_ERROR_SET_KEY;
goto cleanup;
}
// Generate the public key from the private key
if (!EC_KEY_generate_key(ec_key)) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
// Convert EC_KEY to EVP_PKEY
pkey = EVP_PKEY_new();
if (!pkey || !EVP_PKEY_set1_EC_KEY(pkey, ec_key)) {
ret_code = CERT_ERROR_EVP_PKEY_EC_KEY;
goto cleanup;
}
#endif
key->pkey = pkey;
*out = key;
cleanup:
OPENSSL_cleanse(priv_key_bytes, sizeof(priv_key_bytes));
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (pctx)
EVP_PKEY_CTX_free(pctx);
#else
if (ec_key)
EC_KEY_free(ec_key);
#endif
if (priv_bn)
BN_free(priv_bn);
if (ret_code != CERT_SUCCESS) {
if (pkey)
EVP_PKEY_free(pkey);
free(key);
*out = NULL;
}
return ret_code;
}
int init_cert_buffer(cert_buffer **buffer, const unsigned char *src_data,
size_t data_len) {
if (!buffer) {
return CERT_ERROR_NULL_PARAM;
}
*buffer = (cert_buffer *)malloc(sizeof(cert_buffer));
if (!*buffer) {
return CERT_ERROR_MEMORY;
}
memset(*buffer, 0, sizeof(cert_buffer));
(*buffer)->data = (unsigned char *)malloc(data_len);
if (!(*buffer)->data) {
free(*buffer);
*buffer = NULL;
return CERT_ERROR_MEMORY;
}
memcpy((*buffer)->data, src_data, data_len);
(*buffer)->len = data_len;
return CERT_SUCCESS;
}
// Function to generate a self-signed X.509 certificate with custom extension
cert_error_t cert_generate(cert_context_t ctx, cert_key_t key,
cert_buffer **out, cert_buffer *signature,
cert_buffer *ident_pubk, const char *cn,
const char *validFrom, const char *validTo,
cert_format_t format) {
X509 *x509 = NULL;
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
X509_EXTENSION *ex = NULL;
BIGNUM *serial_bn = NULL;
X509_NAME *name = NULL;
X509_EXTENSION *ku_ex = NULL;
ASN1_BIT_STRING *usage = NULL;
ASN1_OCTET_STRING *oct_sign = NULL;
ASN1_OCTET_STRING *oct_pubk = NULL;
ASN1_OCTET_STRING *ext_oct = NULL;
ASN1_TIME *start_time = NULL;
ASN1_TIME *end_time = NULL;
unsigned char *seq_der = NULL;
int ret = 0;
cert_error_t ret_code = CERT_SUCCESS;
if (ctx == NULL || key == NULL) {
ret_code = CERT_ERROR_NULL_PARAM;
goto cleanup;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (pkey == NULL) {
ret_code = CERT_ERROR_NULL_PARAM;
goto cleanup;
}
// Allocate result structure
*out = (cert_buffer *)malloc(sizeof(cert_buffer));
if (!*out) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memset(*out, 0, sizeof(cert_buffer));
// Create X509 certificate
x509 = X509_new();
if (!x509) {
ret_code = CERT_ERROR_CERT_GEN;
goto cleanup;
}
// Set version to X509v3
if (!X509_set_version(x509, 2)) {
ret_code = CERT_ERROR_X509_VER;
goto cleanup;
}
// Set random serial number
serial_bn = BN_new();
if (!serial_bn) {
ret_code = CERT_ERROR_BIGNUM_GEN;
goto cleanup;
}
unsigned char serial_bytes[20]; // Adjust size as needed
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (EVP_RAND_generate(ctx->drbg, serial_bytes, sizeof(serial_bytes), 0, 0,
NULL, 0) <= 0) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#else
if (RAND_DRBG_bytes(ctx->drbg, serial_bytes, sizeof(serial_bytes)) != 1) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#endif
if (!BN_bin2bn(serial_bytes, sizeof(serial_bytes), serial_bn)) {
ret_code = CERT_ERROR_BIGNUM_CONV;
goto cleanup;
}
if (!BN_to_ASN1_INTEGER(serial_bn, X509_get_serialNumber(x509))) {
ret_code = CERT_ERROR_SERIAL_WRITE;
goto cleanup;
}
// Set subject and issuer using the provided cn
name = X509_NAME_new();
if (!name) {
ret_code = CERT_ERROR_X509_NAME;
goto cleanup;
}
if (!X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
(const unsigned char *)cn, -1, -1, 0)) {
ret_code = CERT_ERROR_X509_CN;
goto cleanup;
}
if (!X509_set_subject_name(x509, name)) {
ret_code = CERT_ERROR_X509_SUBJECT;
goto cleanup;
}
if (!X509_set_issuer_name(x509, name)) {
ret_code = CERT_ERROR_X509_ISSUER;
goto cleanup;
}
// Set validity period
start_time = ASN1_TIME_new();
end_time = ASN1_TIME_new();
if (!start_time || !end_time) {
ret_code = CERT_ERROR_AS1_TIME_GEN;
goto cleanup;
}
if (!ASN1_TIME_set_string(start_time, validFrom) ||
!ASN1_TIME_set_string(end_time, validTo)) {
ret_code = CERT_ERROR_VALIDITY_PERIOD;
goto cleanup;
}
if (!X509_set1_notBefore(x509, start_time) ||
!X509_set1_notAfter(x509, end_time)) {
ret_code = CERT_ERROR_VALIDITY_PERIOD;
goto cleanup;
}
// Set public key
if (!X509_set_pubkey(x509, pkey)) {
ret_code = CERT_ERROR_PUBKEY_SET;
goto cleanup;
}
// Add custom extension
int nid = ensure_libp2p_oid();
if (nid <= 0) {
ret_code = nid;
goto cleanup;
}
unsigned char *p;
int seq_len, total_len;
// Allocate and initialize ASN1_OCTET_STRING objects
oct_pubk = ASN1_OCTET_STRING_new();
if (!oct_pubk) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
oct_sign = ASN1_OCTET_STRING_new();
if (!oct_sign) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(oct_pubk, ident_pubk->data, ident_pubk->len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(oct_sign, signature->data, signature->len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Calculate DER-encoded lengths of the OCTET STRINGs
int oct_pubk_len = i2d_ASN1_OCTET_STRING(oct_pubk, NULL);
int oct_sign_len = i2d_ASN1_OCTET_STRING(oct_sign, NULL);
seq_len = oct_pubk_len + oct_sign_len;
// Compute the exact required length for the SEQUENCE
total_len = ASN1_object_size(1, seq_len, V_ASN1_SEQUENCE);
// Allocate the exact required space
seq_der = OPENSSL_malloc(total_len);
if (!seq_der) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
// Encode the sequence. p is moved fwd as it is written
p = seq_der;
ASN1_put_object(&p, 1, seq_len, V_ASN1_SEQUENCE, V_ASN1_UNIVERSAL);
i2d_ASN1_OCTET_STRING(oct_pubk, &p);
i2d_ASN1_OCTET_STRING(oct_sign, &p);
// Wrap the encoded sequence in an ASN1_OCTET_STRING
ext_oct = ASN1_OCTET_STRING_new();
if (!ext_oct) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(ext_oct, seq_der, total_len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Create extension with the octet string
ex = X509_EXTENSION_create_by_NID(NULL, nid, 0, ext_oct);
if (!ex) {
ret_code = CERT_ERROR_EXTENSION_GEN;
goto cleanup;
}
// Add extension to certificate
if (!X509_add_ext(x509, ex, -1)) {
ret_code = CERT_ERROR_EXTENSION_ADD;
goto cleanup;
}
/*
// Add Key Usage extension
usage = ASN1_BIT_STRING_new();
if (!usage) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
// Set bits for DIGITAL_SIGNATURE (bit 0) and KEY_ENCIPHERMENT (bit 2)
if (!ASN1_BIT_STRING_set_bit(usage, 0, 1) ||
!ASN1_BIT_STRING_set_bit(usage, 2, 1)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
*/
// Create Key Usage extension
/*ku_ex = X509_EXTENSION_create_by_NID(NULL, NID_key_usage, 1,
usage); // 1 for critical
if (!ku_ex) {
ret_code = CERT_ERROR_EXTENSION_GEN;
goto cleanup;
}
// Add extension to certificate
if (!X509_add_ext(x509, ku_ex, -1)) {
ret_code = CERT_ERROR_EXTENSION_ADD;
goto cleanup;
}*/
// Sign the certificate with SHA256
if (!X509_sign(x509, pkey, EVP_sha256())) {
ret_code = CERT_ERROR_SIGN;
goto cleanup;
}
// Convert to requested format (DER or PEM)
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
ret = i2d_X509_bio(bio, x509);
} else { // PEM format
ret = PEM_write_bio_X509(bio, x509);
}
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
BIO_get_mem_ptr(bio, &bptr);
(*out)->data = (unsigned char *)malloc(bptr->length);
if (!(*out)->data) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memcpy((*out)->data, bptr->data, bptr->length);
(*out)->len = bptr->length;
cleanup:
if (bio)
BIO_free(bio);
if (ex)
X509_EXTENSION_free(ex);
// if (usage)
// ASN1_BIT_STRING_free(usage);
if (ku_ex)
X509_EXTENSION_free(ku_ex);
if (oct_sign)
ASN1_OCTET_STRING_free(oct_sign);
if (oct_pubk)
ASN1_OCTET_STRING_free(oct_pubk);
if (ext_oct)
ASN1_OCTET_STRING_free(ext_oct);
if (seq_der)
OPENSSL_free(seq_der);
if (serial_bn)
BN_free(serial_bn);
if (name)
X509_NAME_free(name);
if (x509)
X509_free(x509);
if (start_time)
ASN1_TIME_free(start_time);
if (end_time)
ASN1_TIME_free(end_time);
if (ret_code != CERT_SUCCESS && (*out)) {
if ((*out)->data)
free((*out)->data);
free((*out));
*out = NULL;
}
return ret_code;
}
// Function to parse a certificate and extract custom extension and public key
cert_error_t cert_parse(cert_buffer *cert, cert_format_t format,
cert_parsed **out) {
X509 *x509 = NULL;
BIO *bio = NULL;
int extension_index;
X509_EXTENSION *ex = NULL;
ASN1_OCTET_STRING *ext_data = NULL;
ASN1_SEQUENCE_ANY *seq = NULL;
ASN1_OCTET_STRING *oct1 = NULL;
ASN1_OCTET_STRING *oct2 = NULL;
EVP_PKEY *pkey = NULL;
unsigned char *pubkey_buf = NULL;
cert_error_t ret_code;
// Allocate result structure
*out = (cert_parsed *)malloc(sizeof(cert_parsed));
if (!*out) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memset(*out, 0, sizeof(cert_parsed));
// Create BIO from memory
bio = BIO_new_mem_buf(cert->data, cert->len);
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
// Parse certificate based on format
if (format == 0) { // DER format
x509 = d2i_X509_bio(bio, NULL);
} else { // PEM format
x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL);
}
if (!x509) {
ret_code = CERT_ERROR_X509_READ;
goto cleanup;
}
// Find custom extension by OID - use the existing OID or create it if needed
int nid = ensure_libp2p_oid();
if (!nid) {
goto cleanup;
}
extension_index = X509_get_ext_by_NID(x509, nid, -1);
if (extension_index < 0) {
ret_code = CERT_ERROR_EXTENSION_NOT_FOUND;
goto cleanup;
}
// Get extension
ex = X509_get_ext(x509, extension_index);
if (!ex) {
ret_code = CERT_ERROR_EXTENSION_GET;
goto cleanup;
}
// Get extension data
ext_data = X509_EXTENSION_get_data(ex);
if (!ext_data) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Point to the data
const unsigned char *p;
p = ASN1_STRING_get0_data(ext_data);
// Decode the SEQUENCE
seq = d2i_ASN1_SEQUENCE_ANY(NULL, &p, ASN1_STRING_length(ext_data));
if (!seq) {
ret_code = CERT_ERROR_DECODE_SEQUENCE;
goto cleanup;
}
// Check if we have exactly two items in the sequence
if (sk_ASN1_TYPE_num(seq) != 2) {
ret_code = CERT_ERROR_NOT_ENOUGH_SEQ_ELEMS;
goto cleanup;
}
// Get the first octet string
ASN1_TYPE *type1 = sk_ASN1_TYPE_value(seq, 0);
if (type1->type != V_ASN1_OCTET_STRING) {
ret_code = CERT_ERROR_NOT_OCTET_STR;
goto cleanup;
}
oct1 = type1->value.octet_string;
// Get the second octet string
ASN1_TYPE *type2 = sk_ASN1_TYPE_value(seq, 1);
if (type2->type != V_ASN1_OCTET_STRING) {
ret_code = CERT_ERROR_NOT_OCTET_STR;
goto cleanup;
}
oct2 = type2->value.octet_string;
ret_code =
init_cert_buffer(&((*out)->ident_pubk), ASN1_STRING_get0_data(oct1),
ASN1_STRING_length(oct1));
if (ret_code != 0) {
goto cleanup;
}
ret_code = init_cert_buffer(&((*out)->signature), ASN1_STRING_get0_data(oct2),
ASN1_STRING_length(oct2));
if (ret_code != 0) {
goto cleanup;
}
// Get public key
pkey = X509_get_pubkey(x509);
if (!pkey) {
ret_code = CERT_ERROR_PUBKEY_GET;
goto cleanup;
}
// Get public key length
int pubkey_len = i2d_PUBKEY(pkey, NULL);
if (pubkey_len <= 0) {
ret_code = CERT_ERROR_PUBKEY_DER_LEN;
goto cleanup;
}
pubkey_buf = (unsigned char *)malloc(pubkey_len);
if (!pubkey_buf) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
unsigned char *temp = pubkey_buf;
if (i2d_PUBKEY(pkey, &temp) <= 0) {
ret_code = CERT_ERROR_PUBKEY_DER_CONV;
goto cleanup;
}
ret_code = init_cert_buffer(&(*out)->cert_pubkey, pubkey_buf, pubkey_len);
if (ret_code != CERT_SUCCESS) {
goto cleanup;
}
const ASN1_TIME *not_before = X509_get0_notBefore(x509);
if (not_before) {
// Convert ASN1_TIME to a more usable format
char *not_before_str = NULL;
BIO *bio_nb = BIO_new(BIO_s_mem());
if (bio_nb) {
if (ASN1_TIME_print(bio_nb, not_before)) {
size_t len = BIO_ctrl_pending(bio_nb);
not_before_str = malloc(len + 1);
if (not_before_str) {
BIO_read(bio_nb, not_before_str, len);
not_before_str[len] = '\0';
// Store in the output structure
(*out)->valid_from = not_before_str;
}
}
BIO_free(bio_nb);
} else {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
}
const ASN1_TIME *not_after = X509_get0_notAfter(x509);
if (not_after) {
// Convert ASN1_TIME to a more usable format
char *not_after_str = NULL;
BIO *bio_na = BIO_new(BIO_s_mem());
if (bio_na) {
if (ASN1_TIME_print(bio_na, not_after)) {
size_t len = BIO_ctrl_pending(bio_na);
not_after_str = malloc(len + 1);
if (not_after_str) {
BIO_read(bio_na, not_after_str, len);
not_after_str[len] = '\0';
// Store in the output structure
(*out)->valid_to = not_after_str;
}
}
BIO_free(bio_na);
}
}
ret_code = CERT_SUCCESS;
cleanup:
if (pkey)
EVP_PKEY_free(pkey);
if (x509)
X509_free(x509);
if (bio)
BIO_free(bio);
if (pubkey_buf)
free(pubkey_buf);
if (ret_code != CERT_SUCCESS && (*out)) {
cert_free_parsed(*out);
out = NULL;
}
return ret_code;
}
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
cert_format_t format) {
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
int ret;
cert_error_t ret_code = CERT_SUCCESS;
if (key == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (!pkey) {
return CERT_ERROR_NULL_PARAM;
}
// Create memory BIO
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
// Write key in DER format to BIO
ret = i2d_PrivateKey_bio(bio, pkey);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
} else {
// No encryption is used (NULL cipher, NULL password)
ret = PEM_write_bio_PrivateKey(bio, pkey, NULL, NULL, 0, NULL, NULL);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
}
// Get the data from BIO
BIO_get_mem_ptr(bio, &bptr);
ret = init_cert_buffer(out, (const unsigned char *)bptr->data, bptr->length);
if (ret != CERT_SUCCESS) {
goto cleanup;
}
cleanup:
if (bio)
BIO_free(bio);
if (ret_code != CERT_SUCCESS && *out) {
if ((*out)->data)
free((*out)->data);
free(*out);
*out = NULL;
}
return ret_code;
}
cert_error_t cert_serialize_pubk(cert_key_t key, cert_buffer **out,
cert_format_t format) {
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
int ret;
cert_error_t ret_code = CERT_SUCCESS;
if (key == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (!pkey) {
return CERT_ERROR_NULL_PARAM;
}
// Create memory BIO
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
// Write key in DER format to BIO
ret = i2d_PUBKEY_bio(bio, pkey);
if (!ret) {
unsigned long err = ERR_get_error();
printf("openssl err: %s\n", ERR_error_string(err, NULL));
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
} else {
ret = PEM_write_bio_PUBKEY(bio, pkey);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
}
// Get the data from BIO
BIO_get_mem_ptr(bio, &bptr);
ret = init_cert_buffer(out, (const unsigned char *)bptr->data, bptr->length);
if (ret != CERT_SUCCESS) {
goto cleanup;
}
cleanup:
if (bio)
BIO_free(bio);
if (ret_code != CERT_SUCCESS && *out) {
if ((*out)->data)
free((*out)->data);
free(*out);
*out = NULL;
}
return ret_code;
}
void cert_free_buffer(cert_buffer *buffer) {
if (buffer) {
if (buffer->data)
free(buffer->data);
free(buffer);
}
}
// Function to free the parsed certificate struct
void cert_free_parsed(cert_parsed *cert) {
if (cert) {
if (cert->cert_pubkey)
cert_free_buffer(cert->cert_pubkey);
if (cert->ident_pubk)
cert_free_buffer(cert->ident_pubk);
if (cert->signature)
cert_free_buffer(cert->signature);
if (cert->valid_from)
free(cert->valid_from);
if (cert->valid_to)
free(cert->valid_to);
free(cert);
}
}
// Function to free key resources
void cert_free_key(cert_key_t key) {
if (key == NULL)
return;
struct cert_key_s *k = (struct cert_key_s *)key;
EVP_PKEY_free(k->pkey);
free(k);
}

View File

@@ -0,0 +1,187 @@
#ifndef LIBP2P_CERT_H
#define LIBP2P_CERT_H
#include <stddef.h>
#include <stdint.h>
typedef struct cert_context_s *cert_context_t;
typedef struct cert_key_s *cert_key_t;
typedef int32_t cert_error_t;
#define CERT_SUCCESS 0
#define CERT_ERROR_NULL_PARAM -1
#define CERT_ERROR_MEMORY -2
#define CERT_ERROR_DRBG_INIT -3
#define CERT_ERROR_DRBG_CONFIG -4
#define CERT_ERROR_DRBG_SEED -5
#define CERT_ERROR_KEY_GEN -6
#define CERT_ERROR_CERT_GEN -7
#define CERT_ERROR_EXTENSION_GEN -8
#define CERT_ERROR_EXTENSION_ADD -9
#define CERT_ERROR_EXTENSION_DATA -10
#define CERT_ERROR_BIO_GEN -11
#define CERT_ERROR_SIGN -12
#define CERT_ERROR_ENCODING -13
#define CERT_ERROR_PARSE -14
#define CERT_ERROR_RAND -15
#define CERT_ERROR_ECKEY_GEN -16
#define CERT_ERROR_BIGNUM_CONV -17
#define CERT_ERROR_SET_KEY -18
#define CERT_ERROR_VALIDITY_PERIOD -19
#define CERT_ERROR_BIO_WRITE -20
#define CERT_ERROR_SERIAL_WRITE -21
#define CERT_ERROR_EVP_PKEY_EC_KEY -22
#define CERT_ERROR_X509_VER -23
#define CERT_ERROR_BIGNUM_GEN -24
#define CERT_ERROR_X509_NAME -25
#define CERT_ERROR_X509_CN -26
#define CERT_ERROR_X509_SUBJECT -27
#define CERT_ERROR_X509_ISSUER -28
#define CERT_ERROR_AS1_TIME_GEN -29
#define CERT_ERROR_PUBKEY_SET -30
#define CERT_ERROR_AS1_OCTET -31
#define CERT_ERROR_X509_READ -32
#define CERT_ERROR_PUBKEY_GET -33
#define CERT_ERROR_EXTENSION_NOT_FOUND -34
#define CERT_ERROR_EXTENSION_GET -35
#define CERT_ERROR_DECODE_SEQUENCE -36
#define CERT_ERROR_NOT_ENOUGH_SEQ_ELEMS -37
#define CERT_ERROR_NOT_OCTET_STR -38
#define CERT_ERROR_NID -39
#define CERT_ERROR_PUBKEY_DER_LEN -40
#define CERT_ERROR_PUBKEY_DER_CONV -41
#define CERT_ERROR_INIT_KEYGEN -42
#define CERT_ERROR_SET_CURVE -43
typedef enum { CERT_FORMAT_DER = 0, CERT_FORMAT_PEM = 1 } cert_format_t;
/* Buffer structure for raw key data */
typedef struct {
unsigned char *data; /* data buffer */
size_t len; /* Length of data */
} cert_buffer;
/* Struct to hold the parsed certificate data */
typedef struct {
cert_buffer *signature;
cert_buffer *ident_pubk;
cert_buffer *cert_pubkey;
char *valid_from;
char *valid_to;
} cert_parsed;
/**
* Initialize the CTR-DRBG for cryptographic operations
* This function creates and initializes a CTR-DRBG context using
* the provided seed for entropy. The DRBG is configured to use
* AES-256-CTR as the underlying cipher.
*
* @param seed A null-terminated string used to seed the DRBG. Must not be NULL.
* @param ctx Pointer to a context pointer that will be allocated and
* initialized. The caller is responsible for eventually freeing this context
* with the appropriate cleanup function.
*
* @return CERT_SUCCESS on successful initialization, an error code otherwise
*/
cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
cert_context_t *ctx);
/**
* Generate an EC key pair for use with certificates
*
* @param ctx Context pointer obtained from `cert_init_drbg`
* @param out Pointer to store the generated key
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out);
/**
* Serialize a key's private key to a format
*
* @param key The key to export
* @param out Pointer to a buffer structure that will be populated with the key
* @param format output format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
cert_format_t format);
/**
* Serialize a key's public key to a format
*
* @param key The key to export
* @param out Pointer to a buffer structure that will be populated with the key
* @param format output format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_serialize_pubk(cert_key_t key, cert_buffer **out,
cert_format_t format);
/**
* Generate a self-signed X.509 certificate with libp2p extension
*
* @param ctx Context pointer obtained from `cert_init_drbg`
* @param key Key to use
* @param out Pointer to a buffer that will be populated with a certificate
* @param signature buffer that contains a signature
* @param ident_pubk buffer that contains the bytes of an identity pubk
* @param common_name Common name to use for the certificate subject/issuer
* @param validFrom Date from which certificate is issued
* @param validTo Date to which certificate is issued
* @param format Certificate format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_generate(cert_context_t ctx, cert_key_t key,
cert_buffer **out, cert_buffer *signature,
cert_buffer *ident_pubk, const char *cn,
const char *validFrom, const char *validTo,
cert_format_t format);
/**
* Parse a certificate to extract the custom extension and public key
*
* @param cert Buffer containing the certificate data
* @param format Certificate format
* @param cert_parsed Pointer to a structure containing the parsed
* certificate data.
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_parse(cert_buffer *cert, cert_format_t format,
cert_parsed **out);
/**
* Free all resources associated with a CTR-DRBG context
*
* @param ctx The context to free
*/
void cert_free_ctr_drbg(cert_context_t ctx);
/**
* Free memory allocated for a parsed certificate
*
* @param cert Pointer to the parsed certificate structure
*/
void cert_free_parsed(cert_parsed *cert);
/**
* Free all resources associated with a key
*
* @param key The key to free
*/
void cert_free_key(cert_key_t key);
/**
* Free memory allocated for a buffer
*
* @param buffer Pointer to the buffer structure
*/
void cert_free_buffer(cert_buffer *buffer);
#endif /* LIBP2P_CERT_H */

View File

@@ -7,46 +7,26 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/[sequtils, strutils, exitprocs]
import std/[sequtils, exitprocs]
import strutils
import times
import stew/byteutils
import chronicles
import mbedtls/pk
import mbedtls/ctr_drbg as ctr_drbg_module
import mbedtls/entropy as entropy_module
import mbedtls/ecp
import mbedtls/sha256
import mbedtls/md
import mbedtls/asn1
import mbedtls/asn1write
import mbedtls/x509
import mbedtls/x509_crt
import mbedtls/oid
import mbedtls/debug
import mbedtls/error
import nimcrypto/utils
import ../../crypto/crypto
import ../../errors
import ./certificate_ffi
import ../../../libp2p/peerid
logScope:
topics = "libp2p tls certificate"
# Constants and OIDs
const
P2P_SIGNING_PREFIX = "libp2p-tls-handshake:"
SIGNATURE_ALG = MBEDTLS_MD_SHA256
EC_GROUP_ID = MBEDTLS_ECP_DP_SECP256R1
LIBP2P_EXT_OID_DER: array[10, byte] =
[0x2B, 0x06, 0x01, 0x04, 0x01, 0x83, 0xA2, 0x5A, 0x01, 0x01]
# "1.3.6.1.4.1.53594.1.1"
# Exception types for TLS certificate errors
type
TLSCertificateError* = object of LPError
ASN1EncodingError* = object of TLSCertificateError
KeyGenerationError* = object of TLSCertificateError
CertificateCreationError* = object of TLSCertificateError
CertificatePubKeySerializationError* = object of TLSCertificateError
CertificateParsingError* = object of TLSCertificateError
IdentityPubKeySerializationError* = object of TLSCertificateError
IdentitySigningError* = object of TLSCertificateError
@@ -58,184 +38,129 @@ type
signature*: seq[byte]
P2pCertificate* = object
certificate*: mbedtls_x509_crt
extension*: P2pExtension
pubKeyDer: seq[byte]
validFrom: Time
validTo: Time
CertificateX509* = object
certificate*: seq[byte]
# Complete ASN.1 DER content (certificate, signature algorithm and signature).
privateKey*: seq[byte] # Private key used to sign certificate
type EncodingFormat* = enum
DER
PEM
proc ptrInc*(p: ptr byte, n: uint): ptr byte =
## Utility function to increment a pointer by n bytes.
cast[ptr byte](cast[uint](p) + n)
proc cert_format_t(self: EncodingFormat): cert_format_t =
if self == EncodingFormat.DER: CERT_FORMAT_DER else: CERT_FORMAT_PEM
proc toCertBuffer(self: seq[uint8]): cert_buffer =
cert_buffer(data: self[0].unsafeAddr, length: self.len.csize_t)
proc toSeq(self: ptr cert_buffer): seq[byte] =
toOpenArray(cast[ptr UncheckedArray[byte]](self.data), 0, self.length.int - 1).toSeq()
# Initialize entropy and DRBG contexts at the module level
var
entropy: mbedtls_entropy_context
ctrDrbg: mbedtls_ctr_drbg_context
cert_ctx: cert_context_t = nil
drbgInitialized = false
func publicKey*(cert: P2pCertificate): PublicKey =
return PublicKey.init(cert.extension.publicKey).get()
func peerId*(cert: P2pCertificate): PeerId =
return PeerId.init(cert.publicKey()).tryGet()
proc initializeDRBG() {.raises: [KeyGenerationError].} =
## Function to initialize entropy and DRBG context if not already initialized.
if not drbgInitialized:
mbedtls_entropy_init(addr entropy)
mbedtls_ctr_drbg_init(addr ctrDrbg)
# Seed the random number generator
let personalization = "libp2p_tls"
let ret = mbedtls_ctr_drbg_seed(
addr ctrDrbg,
mbedtls_entropy_func,
addr entropy,
cast[ptr byte](personalization.cstring),
personalization.len.uint,
let ret = cert_init_drbg(
personalization.cstring, personalization.len.csize_t, addr cert_ctx
)
if ret != 0:
if ret != CERT_SUCCESS:
raise newException(KeyGenerationError, "Failed to seed CTR_DRBG")
drbgInitialized = true
proc cleanupDRBG() =
## Function to free entropy and DRBG context.
if drbgInitialized:
mbedtls_ctr_drbg_free(addr ctrDrbg)
mbedtls_entropy_free(addr entropy)
cert_free_ctr_drbg(cert_ctx)
drbgInitialized = false
# Register cleanup function to free entropy and DRBG context
addExitProc(cleanupDRBG)
proc generateSignedKey(
signature: seq[byte], pubKey: seq[byte]
): seq[byte] {.raises: [ASN1EncodingError].} =
## Generates the ASN.1-encoded SignedKey structure.
func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
## Creates message used for certificate signature.
##
## The SignedKey structure contains the public key and its signature,
## encoded as a SEQUENCE of two OCTET STRINGs.
##
## Parameters:
## - `signature`: The signature bytes.
## - `pubKey`: The public key bytes.
##
## Returns:
## A sequence of bytes representing the ASN.1-encoded SignedKey.
##
## Raises:
## - `ASN1EncodingError` if ASN.1 encoding fails.
const extValueSize = 256 # Buffer size for ASN.1 encoding
var
extValue: array[extValueSize, byte]
extPtr: ptr byte = addr extValue[extValueSize - 1]
# Start at the end of the buffer as mbedtls_asn1_write_octet_string works backwards in data buffer.
startPtr: ptr byte = addr extValue[0]
len = 0
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
let prefixLen = P2P_SIGNING_PREFIX.len.int
let msg = newSeq[byte](prefixLen + pubKey.len)
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)
# Write signature OCTET STRING
let retSig = mbedtls_asn1_write_octet_string(
addr extPtr, startPtr, unsafeAddr signature[0], signature.len.uint
)
if retSig < 0:
raise newException(ASN1EncodingError, "Failed to write signature OCTET STRING")
len += retSig
return msg
# Write publicKey OCTET STRING
let retPub = mbedtls_asn1_write_octet_string(
addr extPtr, startPtr, unsafeAddr pubKey[0], pubKey.len.uint
)
if retPub < 0:
raise newException(ASN1EncodingError, "Failed to write publicKey OCTET STRING")
len += retPub
func makeIssuerDN(identityKeyPair: KeyPair): string {.inline.} =
let issuerDN =
try:
"CN=" & $(PeerId.init(identityKeyPair.pubkey).tryGet())
except LPError:
raiseAssert "pubkey must be set"
# Total length of the SEQUENCE contents
let contentLen = retSig + retPub
# Write SEQUENCE length
let retLen = mbedtls_asn1_write_len(addr extPtr, startPtr, contentLen.uint)
if retLen < 0:
raise newException(ASN1EncodingError, "Failed to write SEQUENCE length")
len += retLen
return issuerDN
# Write SEQUENCE tag
let retTag = mbedtls_asn1_write_tag(
addr extPtr, startPtr, MBEDTLS_ASN1_CONSTRUCTED or MBEDTLS_ASN1_SEQUENCE
)
if retTag < 0:
raise newException(ASN1EncodingError, "Failed to write SEQUENCE tag")
len += retTag
proc makeASN1Time(time: Time): string {.inline.} =
let str =
try:
let f = initTimeFormat("yyyyMMddhhmmss")
format(time.utc(), f)
except TimeFormatParseError:
raiseAssert "time format is const and checked with test"
# Calculate dataOffset based on the accumulated length
let dataOffset = extValueSize - len - 1
return str & "Z"
# Extract the relevant portion of extValue as a seq[byte]
let extValueSeq = toSeq(extValue[dataOffset ..< extValueSize])
# Return the extension content
return extValueSeq
proc makeLibp2pExtension(
identityKeypair: KeyPair, certificateKeypair: mbedtls_pk_context
): seq[byte] {.
proc makeExtValues(
identityKeypair: KeyPair, certKey: cert_key_t
): tuple[signature: cert_buffer, pubkey: cert_buffer] {.
raises: [
CertificateCreationError, IdentityPubKeySerializationError, IdentitySigningError,
ASN1EncodingError, TLSCertificateError,
CertificatePubKeySerializationError, IdentitySigningError,
IdentityPubKeySerializationError,
]
.} =
## Creates the libp2p extension containing the SignedKey.
##
## The libp2p extension is an ASN.1-encoded structure that includes
## the public key and its signature over the certificate's public key.
## Creates the buffers to be used for writing the libp2p extension
##
## Parameters:
## - `identityKeypair`: The peer's identity key pair.
## - `certificateKeypair`: The key pair used for the certificate.
## - `certificateKey`: The key used for the certificate.
##
## Returns:
## A sequence of bytes representing the libp2p extension.
##
## Raises:
## - `CertificateCreationError` if public key serialization fails.
## - `IdentitySigningError` if signing the message fails.
## - `CertificatePubKeySerializationError` if serialization of certificate public key fails
## - `IdentityPubKeySerializationError` if serialization of identity public key fails.
## - `IdentitySigningError` if signing the hash fails.
## - `ASN1EncodingError` if ASN.1 encoding fails.
# Serialize the Certificate's Public Key
var
certPubKeyDer: array[512, byte]
certPubKeyDerLen: cint
certPubKeyDerLen = mbedtls_pk_write_pubkey_der(
unsafeAddr certificateKeypair, addr certPubKeyDer[0], certPubKeyDer.len.uint
)
if certPubKeyDerLen < 0:
var derCert: ptr cert_buffer = nil
let ret = cert_serialize_pubk(certKey, derCert.addr, DER.cert_format_t())
if ret != CERT_SUCCESS:
raise newException(
CertificateCreationError, "Failed to write certificate public key in DER format"
CertificatePubKeySerializationError, "Failed to serialize the certificate pubkey"
)
# Adjust pointer to the start of the data
let certPubKeyDerPtr = addr certPubKeyDer[certPubKeyDer.len - certPubKeyDerLen]
let certificatePubKeyDer = derCert.toSeq()
# Create the Message to Sign
var msg = newSeq[byte](P2P_SIGNING_PREFIX.len + certPubKeyDerLen.int.int)
let msg = makeSignatureMessage(certificatePubKeyDer)
# Copy the prefix into msg
for i in 0 ..< P2P_SIGNING_PREFIX.len:
msg[i] = byte(P2P_SIGNING_PREFIX[i])
# Copy the public key DER into msg
copyMem(addr msg[P2P_SIGNING_PREFIX.len], certPubKeyDerPtr, certPubKeyDerLen.int)
# Compute SHA-256 hash of the message
var hash: array[32, byte]
let hashRet = mbedtls_sha256(
msg[0].addr, msg.len.uint, addr hash[0], 0 # 0 for SHA-256
)
if hashRet != 0:
# Since hashing failure is critical and unlikely, we can raise a general exception
raise newException(TLSCertificateError, "Failed to compute SHA-256 hash")
# Sign the hash with the Identity Key
let signatureResult = identityKeypair.seckey.sign(hash)
# Sign the message with the Identity Key
let signatureResult = identityKeypair.seckey.sign(msg)
if signatureResult.isErr:
raise newException(
IdentitySigningError, "Failed to sign the hash with the identity key"
IdentitySigningError, "Failed to sign the message with the identity key"
)
let signature = signatureResult.get().data
@@ -247,285 +172,79 @@ proc makeLibp2pExtension(
)
let pubKeyBytes = pubKeyBytesResult.get()
# Generate the SignedKey ASN.1 structure
return generateSignedKey(signature, pubKeyBytes)
return (signature.toCertBuffer(), pubKeyBytes.toCertBuffer())
proc generate*(
identityKeyPair: KeyPair, encodingFormat: EncodingFormat = EncodingFormat.DER
): (seq[byte], seq[byte]) {.
proc generateX509*(
identityKeyPair: KeyPair,
validFrom: Time = fromUnix(157813200),
validTo: Time = fromUnix(67090165200),
encodingFormat: EncodingFormat = EncodingFormat.DER,
): CertificateX509 {.
raises: [
KeyGenerationError, CertificateCreationError, ASN1EncodingError,
IdentityPubKeySerializationError, IdentitySigningError, TLSCertificateError,
KeyGenerationError, IdentitySigningError, IdentityPubKeySerializationError,
CertificateCreationError, CertificatePubKeySerializationError,
]
.} =
## Generates a self-signed X.509 certificate with the libp2p extension.
##
## Parameters:
## - `identityKeyPair`: The peer's identity key pair.
## - `encodingFormat`: The encoding format of generated certificate.
##
## Returns:
## A tuple containing:
## - The certificate.
## - The private key.
## - `raw` - The certificate content (encoded using encodingFormat).
## - `privateKey` - The private key.
##
## Raises:
## - `KeyGenerationError` if key generation fails.
## - `CertificateCreationError` if certificate creation fails.
## - `ASN1EncodingError` if encoding fails.
# Ensure DRBG contexts are initialized
initializeDRBG()
var
crt: mbedtls_x509write_cert
certKey: mbedtls_pk_context
ret: cint
mbedtls_entropy_init(addr entropy)
mbedtls_ctr_drbg_init(addr ctrDrbg)
mbedtls_x509write_crt_init(addr crt)
mbedtls_pk_init(addr certKey)
defer:
mbedtls_entropy_free(addr entropy)
mbedtls_ctr_drbg_free(addr ctrDrbg)
mbedtls_pk_free(addr certKey)
mbedtls_x509write_crt_free(addr crt)
# Seed the random number generator
let personalization = "libp2p_tls"
ret = mbedtls_ctr_drbg_seed(
addr ctrDrbg,
mbedtls_entropy_func,
addr entropy,
cast[ptr byte](personalization.cstring),
personalization.len.uint,
)
if ret != 0:
raise newException(KeyGenerationError, "Failed to seed CTR_DRBG")
# Initialize certificate key
ret = mbedtls_pk_setup(addr certKey, mbedtls_pk_info_from_type(MBEDTLS_PK_ECKEY))
if ret != 0:
raise newException(KeyGenerationError, "Failed to set up certificate key context")
# Generate key pair for the certificate
let G =
try:
mb_pk_ec(certKey)
except MbedTLSError as e:
raise newException(KeyGenerationError, e.msg)
ret = mbedtls_ecp_gen_key(EC_GROUP_ID, G, mbedtls_ctr_drbg_random, addr ctrDrbg)
if ret != 0:
var certKey: cert_key_t
var ret = cert_generate_key(cert_ctx, certKey.addr)
if ret != CERT_SUCCESS:
raise
newException(KeyGenerationError, "Failed to generate EC key pair for certificate")
newException(KeyGenerationError, "Failed to generate certificate key - " & $ret)
## Initialize libp2p extension
let libp2pExtension = makeLibp2pExtension(identityKeyPair, certKey)
let issuerDN = makeIssuerDN(identityKeyPair)
let libp2pExtension = makeExtValues(identityKeyPair, certKey)
let validFromAsn1 = makeASN1Time(validFrom)
let validToAsn1 = makeASN1Time(validTo)
var certificate: ptr cert_buffer = nil
# Set the Subject and Issuer Name (self-signed)
ret = mbedtls_x509write_crt_set_subject_name(addr crt, "CN=libp2p.io")
if ret != 0:
raise newException(CertificateCreationError, "Failed to set subject name")
ret = mbedtls_x509write_crt_set_issuer_name(addr crt, "CN=libp2p.io")
if ret != 0:
raise newException(CertificateCreationError, "Failed to set issuer name")
# Set Validity Period
let notBefore = "19750101000000"
let notAfter = "40960101000000"
ret =
mbedtls_x509write_crt_set_validity(addr crt, notBefore.cstring, notAfter.cstring)
if ret != 0:
raise newException(
CertificateCreationError, "Failed to set certificate validity period"
)
# Assign the Public Key to the Certificate
mbedtls_x509write_crt_set_subject_key(addr crt, addr certKey)
mbedtls_x509write_crt_set_issuer_key(addr crt, addr certKey) # Self-signed
# Add the libp2p Extension
let oid = string.fromBytes(LIBP2P_EXT_OID_DER)
ret = mbedtls_x509write_crt_set_extension(
addr crt,
oid, # OID
oid.len.uint, # OID length
0, # Critical flag
unsafeAddr libp2pExtension[0], # Extension data
libp2pExtension.len.uint, # Extension data length
ret = cert_generate(
cert_ctx, certKey, certificate.addr, libp2pExtension.signature.unsafeAddr,
libp2pExtension.pubkey.unsafeAddr, issuerDN.cstring, validFromAsn1.cstring,
validToAsn1.cstring, encodingFormat.cert_format_t,
)
if ret != 0:
raise newException(
CertificateCreationError, "Failed to set libp2p extension in certificate"
)
if ret != CERT_SUCCESS:
raise
newException(CertificateCreationError, "Failed to generate certificate - " & $ret)
# Set Basic Constraints (optional, e.g., CA:FALSE)
ret = mbedtls_x509write_crt_set_basic_constraints(
addr crt,
0, # is_ca
-1, # max_pathlen (-1 for no limit)
)
if ret != 0:
raise newException(CertificateCreationError, "Failed to set basic constraints")
var privKDer: ptr cert_buffer = nil
ret = cert_serialize_privk(certKey, privKDer.addr, encodingFormat.cert_format_t)
if ret != CERT_SUCCESS:
raise newException(KeyGenerationError, "Failed to serialize privK - " & $ret)
# Set Key Usage
ret = mbedtls_x509write_crt_set_key_usage(
addr crt, MBEDTLS_X509_KU_DIGITAL_SIGNATURE or MBEDTLS_X509_KU_KEY_ENCIPHERMENT
)
if ret != 0:
raise newException(CertificateCreationError, "Failed to set key usage")
let outputCertificate = certificate.toSeq()
let outputPrivateKey = privKDer.toSeq()
# Set the MD algorithm
mbedtls_x509write_crt_set_md_alg(addr crt, SIGNATURE_ALG)
cert_free_buffer(certificate)
cert_free_buffer(privKDer)
# Generate a random serial number
const SERIAL_LEN = 20
var serialBuffer: array[SERIAL_LEN, byte]
ret = mbedtls_ctr_drbg_random(addr ctrDrbg, addr serialBuffer[0], SERIAL_LEN)
if ret != 0:
raise newException(CertificateCreationError, "Failed to generate serial number")
return CertificateX509(certificate: outputCertificate, privateKey: outputPrivateKey)
# Set the serial number
ret = mbedtls_x509write_crt_set_serial_raw(addr crt, addr serialBuffer[0], SERIAL_LEN)
if ret != 0:
raise newException(CertificateCreationError, "Failed to set serial number")
proc parseCertTime*(certTime: string): Time {.raises: [TimeParseError].} =
var timeNoZone = certTime[0 ..^ 5] # removes GMT part
# days with 1 digit have additional space -> strip it
timeNoZone = timeNoZone.replace(" ", " ")
# Prepare Buffer for Certificate Serialization
const CERT_BUFFER_SIZE = 4096
var certBuffer: array[CERT_BUFFER_SIZE, byte]
var outputCertificate: seq[byte]
if encodingFormat == EncodingFormat.DER:
let certLen: cint = mbedtls_x509write_crt_der(
addr crt,
addr certBuffer[0],
CERT_BUFFER_SIZE.uint,
mbedtls_ctr_drbg_random,
addr ctrDrbg,
)
if certLen < 0:
raise newException(
CertificateCreationError, "Failed to write certificate in DER format"
)
# Adjust the buffer to contain only the data
outputCertificate =
toSeq(certBuffer[(CERT_BUFFER_SIZE - certLen) ..< CERT_BUFFER_SIZE])
else:
let ret = mbedtls_x509write_crt_pem(
addr crt,
addr certBuffer[0],
CERT_BUFFER_SIZE.uint,
mbedtls_ctr_drbg_random,
addr ctrDrbg,
)
if ret != 0:
raise newException(
CertificateCreationError, "Failed to write certificate in PEM format"
)
let n = certBuffer.find(0'u8) # Find the index of the first null byte
outputCertificate = certBuffer[0 .. n - 1].toSeq()
# Serialize the Private Key
var privKeyBuffer: array[2048, byte]
var outputPrivateKey: seq[byte]
if encodingFormat == EncodingFormat.DER:
let privKeyLen = mbedtls_pk_write_key_der(
addr certKey, addr privKeyBuffer[0], privKeyBuffer.len.uint
)
if privKeyLen < 0:
raise newException(
CertificateCreationError, "Failed to write private key in DER format"
)
# Adjust the buffer to contain only the data
outputPrivateKey =
toSeq(privKeyBuffer[(privKeyBuffer.len - privKeyLen) ..< privKeyBuffer.len])
else:
let ret = mbedtls_pk_write_key_pem(
addr certKey, addr privKeyBuffer[0], privKeyBuffer.len.uint
)
if ret != 0:
raise newException(
CertificateCreationError, "Failed to write private key in PEM format"
)
let n = privKeyBuffer.find(0'u8) # Find the index of the first null byte
outputPrivateKey = privKeyBuffer[0 .. n - 1].toSeq()
# Return the Serialized Certificate and Private Key
return (outputCertificate, outputPrivateKey)
proc libp2pext(
p_ctx: pointer,
crt: ptr mbedtls_x509_crt,
oid: ptr mbedtls_x509_buf,
critical: cint,
p: ptr byte,
endPtr: ptr byte,
): cint {.cdecl.} =
## Callback function to parse the libp2p extension.
##
## This function is used as a callback by mbedtls during certificate parsing
## to extract the libp2p extension containing the SignedKey.
##
## Parameters:
## - `p_ctx`: Pointer to the P2pExtension object to store the parsed data.
## - `crt`: Pointer to the certificate being parsed.
## - `oid`: Pointer to the OID of the extension.
## - `critical`: Critical flag of the extension.
## - `p`: Pointer to the start of the extension data.
## - `endPtr`: Pointer to the end of the extension data.
##
## Returns:
## - 0 on success, or a negative error code on failure.
# Check if the OID matches the libp2p extension
if oid.len != LIBP2P_EXT_OID_DER.len:
return MBEDTLS_ERR_OID_NOT_FOUND # Extension not handled by this callback
for i in 0 ..< LIBP2P_EXT_OID_DER.len:
if ptrInc(oid.p, i.uint)[] != LIBP2P_EXT_OID_DER[i]:
return MBEDTLS_ERR_OID_NOT_FOUND # Extension not handled by this callback
var parsePtr = p
# Parse SEQUENCE tag and length
var len: uint
if mbedtls_asn1_get_tag(
addr parsePtr, endPtr, addr len, MBEDTLS_ASN1_CONSTRUCTED or MBEDTLS_ASN1_SEQUENCE
) != 0:
debug "Failed to parse SEQUENCE in libp2p extension"
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
# Parse publicKey OCTET STRING
var pubKeyLen: uint
if mbedtls_asn1_get_tag(
addr parsePtr, endPtr, addr pubKeyLen, MBEDTLS_ASN1_OCTET_STRING
) != 0:
debug "Failed to parse publicKey OCTET STRING in libp2p extension"
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
# Extract publicKey
var publicKey = newSeq[byte](int(pubKeyLen))
copyMem(addr publicKey[0], parsePtr, int(pubKeyLen))
parsePtr = ptrInc(parsePtr, pubKeyLen)
# Parse signature OCTET STRING
var signatureLen: uint
if mbedtls_asn1_get_tag(
addr parsePtr, endPtr, addr signatureLen, MBEDTLS_ASN1_OCTET_STRING
) != 0:
debug "Failed to parse signature OCTET STRING in libp2p extension"
return MBEDTLS_ERR_ASN1_UNEXPECTED_TAG
# Extract signature
var signature = newSeq[byte](int(signatureLen))
copyMem(addr signature[0], parsePtr, int(signatureLen))
# Store the publicKey and signature in the P2pExtension
let extension = cast[ptr P2pExtension](p_ctx)
extension[].publicKey = publicKey
extension[].signature = signature
return 0 # Success
const certTimeFormat = "MMM d hh:mm:ss yyyy"
const f = initTimeFormat(certTimeFormat)
return parse(timeNoZone, f, utc()).toTime()
proc parse*(
certificateDer: seq[byte]
@@ -540,23 +259,54 @@ proc parse*(
##
## Raises:
## - `CertificateParsingError` if certificate parsing fails.
var crt: mbedtls_x509_crt
mbedtls_x509_crt_init(addr crt)
defer:
mbedtls_x509_crt_free(addr crt)
var extension = P2pExtension()
let ret = mbedtls_x509_crt_parse_der_with_ext_cb(
addr crt,
unsafeAddr certificateDer[0],
certificateDer.len.uint,
0,
libp2pext,
addr extension,
)
if ret != 0:
let certDerBuffer = certificateDer.toCertBuffer()
let certParsed: ptr cert_parsed = nil
defer:
cert_free_parsed(certParsed)
let ret =
cert_parse(certDerBuffer.unsafeAddr, DER.cert_format_t(), certParsed.unsafeAddr)
if ret != CERT_SUCCESS:
raise newException(
CertificateParsingError, "Failed to parse certificate, error code: " & $ret
)
return P2pCertificate(certificate: crt, extension: extension)
var validFrom, validTo: Time
try:
validFrom = parseCertTime($certParsed.valid_from)
validTo = parseCertTime($certParsed.valid_to)
except TimeParseError as e:
raise newException(
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
)
P2pCertificate(
extension: P2pExtension(
signature: certParsed.signature.toSeq(), publicKey: certParsed.ident_pubk.toSeq()
),
pubKeyDer: certParsed.cert_pbuk.toSeq(),
validFrom: validFrom,
validTo: validTo,
)
proc verify*(self: P2pCertificate): bool =
## Verifies that P2pCertificate has signature that was signed by owner of the certificate.
##
## Parameters:
## - `self`: The P2pCertificate.
##
## Returns:
## `true` if certificate is valid.
let currentTime = now().utc().toTime()
if not (currentTime >= self.validFrom and currentTime < self.validTo):
return false
var sig: Signature
var key: PublicKey
if sig.init(self.extension.signature) and key.init(self.extension.publicKey):
let msg = makeSignatureMessage(self.pubKeyDer)
return sig.verify(msg, key)
return false

View File

@@ -0,0 +1,81 @@
when defined(macosx):
{.passl: "-L/opt/homebrew/opt/openssl@3/lib -lcrypto".}
{.passc: "-I/opt/homebrew/opt/openssl@3/include".}
else:
{.passl: "-lcrypto".}
{.compile: "./certificate.c".}
type
cert_error_t* = int32
cert_format_t* {.size: sizeof(cuint).} = enum
CERT_FORMAT_DER = 0
CERT_FORMAT_PEM = 1
cert_buffer* {.pure, inheritable, bycopy.} = object
data*: ptr uint8
length*: csize_t
cert_parsed* {.pure, inheritable, bycopy.} = object
signature*: ptr cert_buffer
ident_pubk*: ptr cert_buffer
cert_pbuk*: ptr cert_buffer
valid_from*: cstring
valid_to*: cstring
cert_context_s* = object
cert_key_s* = object
cert_context_t* = ptr cert_context_s
cert_key_t* = ptr cert_key_s
const CERT_SUCCESS* = 0
proc cert_init_drbg*(
seed: cstring, seed_len: csize_t, ctx: ptr cert_context_t
): cert_error_t {.cdecl, importc: "cert_init_drbg".}
proc cert_generate_key*(
ctx: cert_context_t, out_arg: ptr cert_key_t
): cert_error_t {.cdecl, importc: "cert_generate_key".}
proc cert_serialize_privk*(
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
): cert_error_t {.cdecl, importc: "cert_serialize_privk".}
proc cert_serialize_pubk*(
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
): cert_error_t {.cdecl, importc: "cert_serialize_pubk".}
proc cert_generate*(
ctx: cert_context_t,
key: cert_key_t,
out_arg: ptr ptr cert_buffer,
signature: ptr cert_buffer,
ident_pubk: ptr cert_buffer,
cn: cstring,
validFrom: cstring,
validTo: cstring,
format: cert_format_t,
): cert_error_t {.cdecl, importc: "cert_generate".}
proc cert_parse*(
cert: ptr cert_buffer, format: cert_format_t, out_arg: ptr ptr cert_parsed
): cert_error_t {.cdecl, importc: "cert_parse".}
proc cert_free_ctr_drbg*(
ctx: cert_context_t
): void {.cdecl, importc: "cert_free_ctr_drbg".}
proc cert_free_key*(key: cert_key_t): void {.cdecl, importc: "cert_free_key".}
proc cert_free_buffer*(
buffer: ptr cert_buffer
): void {.cdecl, importc: "cert_free_buffer".}
proc cert_free_parsed*(
cert: ptr cert_parsed
): void {.cdecl, importc: "cert_free_parsed".}

View File

@@ -11,9 +11,8 @@
{.push raises: [].}
import std/strformat
import chronos, chronicles, strutils
import stew/[byteutils, endians2, results, objects]
import chronos, chronicles, strutils, results
import stew/[byteutils, endians2, objects]
import ../multicodec
import
transport,
@@ -302,7 +301,7 @@ proc new*(
flags: set[ServerFlags] = {},
): TorSwitch {.raises: [LPError], public.} =
var builder = SwitchBuilder.new().withRng(rng).withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TorTransport.new(torServer, flags, upgr)
)
if addresses.len != 0:
@@ -325,7 +324,7 @@ proc new*(
return torSwitch
method addTransport*(s: TorSwitch, t: Transport) =
doAssert(false, "not implemented!")
doAssert(false, "[TorSwitch.addTransport ] abstract method not implemented!")
method getTorTransport*(s: TorSwitch): Transport {.base.} =
return s.transports[0]

View File

@@ -66,7 +66,7 @@ method accept*(
## accept incoming connections
##
doAssert(false, "Not implemented!")
doAssert(false, "[Transport.accept] abstract method not implemented!")
method dial*(
self: Transport,
@@ -79,7 +79,7 @@ method dial*(
## dial a peer
##
doAssert(false, "Not implemented!")
doAssert(false, "[Transport.dial] abstract method not implemented!")
proc dial*(
self: Transport, address: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)

View File

@@ -45,7 +45,7 @@ type
method upgrade*(
self: Upgrade, conn: Connection, peerId: Opt[PeerId]
): Future[Muxer] {.async: (raises: [CancelledError, LPError], raw: true), base.} =
raiseAssert("Not implemented!")
raiseAssert("[Upgrade.upgrade] abstract method not implemented!")
proc secure*(
self: Upgrade, conn: Connection, peerId: Opt[PeerId]

View File

@@ -10,7 +10,8 @@
{.push raises: [].}
import std/[sets, options, macros]
import stew/[byteutils, results]
import stew/byteutils
import results
export results

View File

@@ -18,7 +18,8 @@
{.push raises: [].}
import stew/[byteutils, leb128, results]
import stew/[byteutils, leb128]
import results
export leb128, results
type

View File

@@ -20,7 +20,7 @@ when defined(windows): import winlean else: import posix
const
RTRANSPMA* = mapOr(TCP, WebSockets, UNIX)
TRANSPMA* = mapOr(RTRANSPMA, QUIC, UDP)
TRANSPMA* = mapOr(RTRANSPMA, QUIC, QUIC_V1, UDP)
proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
## Initialize ``TransportAddress`` with MultiAddress ``ma``.
@@ -75,7 +75,7 @@ proc connect*(
## ``bufferSize`` is size of internal buffer for transport.
##
if not (RTRANSPMA.match(ma)):
if not (TRANSPMA.match(ma)):
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
let transportAddress = initTAddress(ma).tryGet()

View File

@@ -1,8 +1,8 @@
site_name: nim-libp2p
repo_url: https://github.com/status-im/nim-libp2p
repo_name: status-im/nim-libp2p
site_url: https://status-im.github.io/nim-libp2p/docs
repo_url: https://github.com/vacp2p/nim-libp2p
repo_name: vacp2p/nim-libp2p
site_url: https://vacp2p.github.io/nim-libp2p/docs
# Can't find a way to point the edit to the .nim instead
# of the .md
edit_uri: ''

View File

@@ -8,7 +8,7 @@ import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
type
SwitchCreator = proc(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport =
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.gcsafe, raises: [LPError].}
@@ -319,7 +319,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
let nativeNode = swCreator(
ma = wsAddress,
prov = proc(upgr: Upgrade): Transport =
prov = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr),
)
@@ -359,7 +359,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
.withRng(crypto.newRng())
.withMplex()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withNoise()

View File

@@ -1,15 +1,9 @@
{.used.}
import chronos, stew/[byteutils, results]
import chronos, results, stew/byteutils
import
../libp2p/
[
stream/connection,
transports/transport,
upgrademngrs/upgrade,
multiaddress,
errors,
]
[stream/connection, transports/transport, upgrademngrs/upgrade, multiaddress]
import ./helpers

View File

@@ -12,10 +12,12 @@ import ../libp2p/stream/chronosstream
import ../libp2p/muxers/mplex/lpchannel
import ../libp2p/protocols/secure/secure
import ../libp2p/switch
import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ../libp2p/nameresolving/mockresolver
import "."/[asyncunit, errorhelpers]
export asyncunit, errorhelpers, mockresolver
import errorhelpers
import utils/async_tests
export async_tests, errorhelpers, mockresolver
const
StreamTransportTrackerName = "stream.transport"
@@ -47,7 +49,7 @@ template checkTrackers*() =
{.push warning[BareExcept]: off.}
try:
GC_fullCollect()
except:
except CatchableError:
discard
when defined(nimHasWarnBareExcept):
{.pop.}
@@ -90,25 +92,6 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
testBufferStream.initStream()
testBufferStream
proc bridgedConnections*(): (Connection, Connection) =
let
connA = TestBufferStream()
connB = TestBufferStream()
connA.dir = Direction.Out
connB.dir = Direction.In
connA.initStream()
connB.initStream()
connA.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connB.pushData(data)
connB.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connA.pushData(data)
return (connA, connB)
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
## Periodically checks a given condition until it is true or a timeout occurs.
##

View File

@@ -5,9 +5,7 @@ WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y python python3 python3-pip python3-venv curl
RUN mkdir .venv && python3 -m venv .venv && . .venv/bin/activate
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
@@ -15,7 +13,7 @@ COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
FROM --platform=linux/amd64 debian:bookworm-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -49,13 +49,10 @@ suite "FloodSub":
check topic == "foobar"
completionFut.complete(true)
let
nodes = generateNodes(2)
let nodes = generateNodes(2)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await subscribeNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
@@ -71,48 +68,33 @@ suite "FloodSub":
agentA == "nim-libp2p"
agentB == "nim-libp2p"
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut.concat())
asyncTest "FloodSub basic publish/subscribe B -> A":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
let
nodes = generateNodes(2)
let nodes = generateNodes(2)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await subscribeNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
await waitSub(nodes[1], nodes[0], "foobar")
check (await nodes[1].publish("foobar", "Hello!".toBytes())) > 0
check (await completionFut.wait(5.seconds)) == true
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let
nodes = generateNodes(2)
let nodes = generateNodes(2)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await subscribeNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
@@ -130,21 +112,15 @@ suite "FloodSub":
check (await nodes[0].publish("foobar", "Hello!".toBytes())) > 0
check (await handlerFut) == true
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should fail":
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
nodes = generateNodes(2)
let nodes = generateNodes(2)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
await subscribeNodes(nodes)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
@@ -159,23 +135,17 @@ suite "FloodSub":
discard await nodes[0].publish("foobar", "Hello!".toBytes())
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
let
nodes = generateNodes(2)
let nodes = generateNodes(2)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
await subscribeNodes(nodes)
nodes[1].subscribe("foo", handler)
await waitSub(nodes[0], nodes[1], "foo")
nodes[1].subscribe("bar", handler)
@@ -194,10 +164,6 @@ suite "FloodSub":
check (await nodes[0].publish("foo", "Hello!".toBytes())) > 0
check (await nodes[0].publish("bar", "Hello!".toBytes())) > 0
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub multiple peers, no self trigger":
var runs = 10
@@ -219,11 +185,10 @@ suite "FloodSub":
counter,
)
let
nodes = generateNodes(runs, triggerSelf = false)
nodesFut = nodes.mapIt(it.switch.start())
let nodes = generateNodes(runs, triggerSelf = false)
await subscribeNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
for i in 0 ..< runs:
nodes[i].subscribe("foobar", futs[i][1])
@@ -241,9 +206,6 @@ suite "FloodSub":
await allFuturesThrowing(pubs)
await allFuturesThrowing(futs.mapIt(it[0]))
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub multiple peers, with self trigger":
var runs = 10
@@ -266,11 +228,10 @@ suite "FloodSub":
counter,
)
let
nodes = generateNodes(runs, triggerSelf = true)
nodesFut = nodes.mapIt(it.switch.start())
let nodes = generateNodes(runs, triggerSelf = true)
await subscribeNodes(nodes)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
for i in 0 ..< runs:
nodes[i].subscribe("foobar", futs[i][1])
@@ -299,10 +260,6 @@ suite "FloodSub":
# remove the topic tho
node.topics.len == 0
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub message size validation":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async.} =
@@ -313,11 +270,9 @@ suite "FloodSub":
bigNode = generateNodes(1)
smallNode = generateNodes(1, maxMessageSize = 200)
# start switches
nodesFut =
await allFinished(bigNode[0].switch.start(), smallNode[0].switch.start())
startNodesAndDeferStop(bigNode & smallNode)
await connectNodesStar(bigNode & smallNode)
await subscribeNodes(bigNode & smallNode)
bigNode[0].subscribe("foo", handler)
smallNode[0].subscribe("foo", handler)
await waitSub(bigNode[0], smallNode[0], "foo")
@@ -337,10 +292,6 @@ suite "FloodSub":
check (await smallNode[0].publish("foo", bigMessage)) > 0
check (await bigNode[0].publish("foo", bigMessage)) > 0
await allFuturesThrowing(smallNode[0].switch.stop(), bigNode[0].switch.stop())
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async.} =
@@ -350,11 +301,9 @@ suite "FloodSub":
bigNode1 = generateNodes(1, maxMessageSize = 20000000)
bigNode2 = generateNodes(1, maxMessageSize = 20000000)
# start switches
nodesFut =
await allFinished(bigNode1[0].switch.start(), bigNode2[0].switch.start())
startNodesAndDeferStop(bigNode1 & bigNode2)
await connectNodesStar(bigNode1 & bigNode2)
await subscribeNodes(bigNode1 & bigNode2)
bigNode2[0].subscribe("foo", handler)
await waitSub(bigNode1[0], bigNode2[0], "foo")
@@ -364,7 +313,3 @@ suite "FloodSub":
checkUntilTimeout:
messageReceived == 1
await allFuturesThrowing(bigNode1[0].switch.stop(), bigNode2[0].switch.stop())
await allFuturesThrowing(nodesFut)

View File

@@ -1,925 +0,0 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[options, deques, sequtils, enumerate, algorithm]
import stew/byteutils
import ../../libp2p/builders
import ../../libp2p/errors
import ../../libp2p/crypto/crypto
import ../../libp2p/stream/bufferstream
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
import ../../libp2p/switch
import ../../libp2p/muxers/muxer
import ../../libp2p/protocols/pubsub/rpc/protobuf
import utils
import ../helpers
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
const MsgIdSuccess = "msg id gen success"
suite "GossipSub internal":
teardown:
checkTrackers()
asyncTest "subscribe/unsubscribeAll":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
discard
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
# test via dynamic dispatch
gossipSub.PubSub.subscribe(topic, handler)
check:
gossipSub.topics.contains(topic)
gossipSub.gossipsub[topic].len() > 0
gossipSub.mesh[topic].len() > 0
# test via dynamic dispatch
gossipSub.PubSub.unsubscribeAll(topic)
check:
topic notin gossipSub.topics # not in local topics
topic notin gossipSub.mesh # not in mesh
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "topic params":
let params = TopicParams.init()
params.validateParameters().tryGet()
asyncTest "`rebalanceMesh` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh - bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var scoreLow = -11'f64
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.score = scoreLow
gossipSub.gossipsub[topic].incl(peer)
scoreLow += 1.0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# low score peers should not be in mesh, that's why the count must be 4
check gossipSub.mesh[topic].len == 4
for peer in gossipSub.mesh[topic]:
check peer.score >= 0.0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`rebalanceMesh` Degree Hi":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len ==
gossipSub.parameters.d + gossipSub.parameters.dScore
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`replenishFanout` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
var peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic].incl(peer)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic1 = "foobar1"
let topic2 = "foobar2"
gossipSub.topicParams[topic1] = TopicParams.init()
gossipSub.topicParams[topic2] = TopicParams.init()
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic1].incl(peer)
gossipSub.fanout[topic2].incl(peer)
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
# generate mesh and fanout peers
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# generate gossipsub (free standing) peers
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
check gossipSub.fanout[topic].len == 15
check gossipSub.mesh[topic].len == 15
check gossipSub.gossipsub[topic].len == 15
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
for p in peers.keys:
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.fanout[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "Drop messages of topics without subscription":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "Disconnect bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.handler = handler
peer.appScore = gossipSub.parameters.graylistThreshold - 1
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
gossipSub.updateScores()
await sleepAsync(100.millis)
check:
# test our disconnect mechanics
gossipSub.gossipsub.peers(topic) == 0
# also ensure we cleanup properly the peersInIP table
gossipSub.peersInIP.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
for i in 0 .. gossipSub.topicsHigh + 10:
tooManyTopics &= "topic" & $i
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
let conn = TestBufferStream.new(noop)
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
peer.behaviourPenalty > 0.0
await conn.close()
await gossipSub.switch.stop()
asyncTest "invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
expect(CatchableError):
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
peerId, Moment.now() + 1.hours
)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff
check prunes.len != 0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# expect 0 since they are all backing off
check gossipSub.mesh[topic].len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff - remote":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.mesh[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len != 0
for i in 0 ..< 15:
let peerId = conns[i].peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.handlePrune(
peer,
@[
ControlPrune(
topicID: topic,
peers: @[],
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
)
],
)
# expect topic cleaned up since they are all pruned
check topic notin gossipSub.mesh
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh Degree Hi - audit scenario":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.parameters.dScore = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dOut = 3
gossipSub.parameters.dHigh = 12
gossipSub.parameters.dLow = 4
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.In
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 40.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
for i in 0 ..< 7:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.Out
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 10.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 13
gossipSub.rebalanceMesh(topic)
# ensure we are above dlow
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
var outbound = 0
for peer in gossipSub.mesh[topic]:
if peer.sendConn.transportDir == Direction.Out:
inc outbound
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "handleIHave/Iwant tests":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.subscribe(topic, handler2)
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
for i in 0 ..< 30:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# Peers with no budget should not request messages
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
let id = @[0'u8, 1, 2, 3]
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let genmsg = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
genmsg.len == 1
check gossipSub.mcache.msgs.len == 1
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
proc setupTest(): Future[
tuple[
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
]
] {.async.} =
let nodes = generateNodes(2, gossip = true, verifySignature = false)
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await nodes[1].switch.connect(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip0: GossipSub = GossipSub(nodes[0])
var gossip1: GossipSub = GossipSub(nodes[1])
return (gossip0, gossip1, receivedMessages)
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
proc createMessages(
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
var iwantMessageIds = newSeq[MessageId]()
var sentMessages = initHashSet[seq[byte]]()
for i, size in enumerate([size1, size2]):
let data = newSeqWith(size, i.byte)
sentMessages.incl(data)
let msg =
Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
iwantMessageIds.add(iwantMessageId)
gossip1.mcache.put(iwantMessageId, msg)
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
peer.sentIHaves[^1].incl(iwantMessageId)
return (iwantMessageIds, sentMessages)
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize div 2 + 1
let (iwantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
await sleepAsync(300.milliseconds)
checkUntilTimeout:
receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let size1 = gossip1.maxMessageSize div 2
let size2 = gossip1.maxMessageSize div 3
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let maxSize = gossip1.maxMessageSize
let size1 = maxSize div 2
let size2 = maxSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
if seqs[0] < seqs[1]:
smallestSet.incl(seqs[0])
else:
smallestSet.incl(seqs[1])
checkUntilTimeout:
receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)

File diff suppressed because it is too large Load Diff

View File

@@ -1,387 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import sequtils, options, tables, sets
import chronos, stew/byteutils, chronicles
import
utils,
../../libp2p/[
errors,
peerid,
peerinfo,
stream/connection,
stream/bufferstream,
crypto/crypto,
protocols/pubsub/pubsub,
protocols/pubsub/gossipsub,
protocols/pubsub/pubsubpeer,
protocols/pubsub/peertable,
protocols/pubsub/rpc/messages,
]
import ../helpers
template tryPublish(
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
): untyped =
var
expiration = Moment.now() + timeout
pubs = 0
while pubs < require and Moment.now() < expiration:
pubs = pubs + call
await sleepAsync(wait)
doAssert pubs >= require, "Failed to publish!"
suite "GossipSub":
teardown:
checkTrackers()
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
var runs = 10
let
nodes = generateNodes(runs, gossip = true, triggerSelf = true)
nodesFut = nodes.mapIt(it.switch.start())
await subscribeSparseNodes(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
let dgossip = GossipSub(dialer)
dgossip.parameters.dHigh = 2
dgossip.parameters.dLow = 1
dgossip.parameters.d = 1
dgossip.parameters.dOut = 1
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
info "seen up", count = seen.len
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
let publishedTo = nodes[0].publish(
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
).await
check:
publishedTo != 0
publishedTo != runs
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
await allFuturesThrowing(nodesFut)
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let
nodes = generateNodes(2, gossip = true)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
# We must subscribe before setting the validator
nodes[0].subscribe("foobar", handler)
var gossip = GossipSub(nodes[0])
let invalidDetected = newFuture[void]()
gossip.subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
await subscribeNodes(nodes)
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub test directPeers":
let nodes = generateNodes(2, gossip = true)
await allFutures(nodes[0].switch.start(), nodes[1].switch.start())
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
let invalidDetected = newFuture[void]()
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await subscribeNodes(nodes)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
asyncTest "GossipSub directPeers: always forward messages":
let
nodes = generateNodes(3, gossip = true)
# start switches
nodesFut = await allFinished(
nodes[0].switch.start(), nodes[1].switch.start(), nodes[2].switch.start()
)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
await GossipSub(nodes[2]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
nodes[1].subscribe("foobar", noop)
nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
check "foobar" notin GossipSub(nodes[2]).mesh
await allFuturesThrowing(
nodes[0].switch.stop(), nodes[1].switch.stop(), nodes[2].switch.stop()
)
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
let
nodes = generateNodes(2, gossip = true)
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
GossipSub(nodes[1]).updateScores()
# peer shouldn't be in our mesh
check:
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
GossipSub(nodes[1]).parameters.graylistThreshold
GossipSub(nodes[1]).updateScores()
handlerFut = newFuture[void]()
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
# Without directPeers, this would fail
await handlerFut.wait(1.seconds)
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let
nodes = generateNodes(runs, gossip = true, triggerSelf = true)
nodesFut = nodes.mapIt(it.switch.start())
await subscribeNodes(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1, 5.seconds, 3.minutes
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
# Removing some subscriptions
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].unsubscribeAll("foobar")
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].subscribe("foobar", handler)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
await allFuturesThrowing(nodes.mapIt(allFutures(it.switch.stop())))
await allFuturesThrowing(nodesFut)
asyncTest "GossipSub scoring - decayInterval":
let nodes = generateNodes(2, gossip = true)
var gossip = GossipSub(nodes[0])
# MacOs has some nasty jitter when sleeping
# (up to 7 ms), so we need some pretty long
# sleeps to be safe here
gossip.parameters.decayInterval = 300.milliseconds
let
# start switches
nodesFut = await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await subscribeNodes(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
await handlerFut
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
100
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
await sleepAsync(1500.milliseconds)
# We should have decayed 5 times, though allowing 4..6
check:
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
50.0 .. 66.0
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
await allFuturesThrowing(nodesFut.concat())

View File

@@ -0,0 +1,196 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../helpers
suite "GossipSub Fanout Management":
teardown:
checkTrackers()
asyncTest "`replenishFanout` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
var peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic].incl(peer)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic1 = "foobar1"
let topic2 = "foobar2"
gossipSub.topicParams[topic1] = TopicParams.init()
gossipSub.topicParams[topic2] = TopicParams.init()
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow the topic to expire
var conns = newSeq[Connection]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.fanout[topic1].incl(peer)
gossipSub.fanout[topic2].incl(peer)
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "e2e - GossipSub send over fanout A -> B":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
var observed = 0
let
obs1 = PubSubObserver(
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
inc observed
)
obs2 = PubSubObserver(
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
inc observed
)
nodes[1].addObserver(obs1)
nodes[0].addObserver(obs2)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
check:
"foobar" in gossip1.gossipsub
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
await passed.wait(2.seconds)
check observed == 2
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
startNodesAndDeferStop(nodes)
GossipSub(nodes[1]).parameters.d = 0
GossipSub(nodes[1]).parameters.dHigh = 0
GossipSub(nodes[1]).parameters.dLow = 0
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
let gsNode = GossipSub(nodes[1])
checkUntilTimeout:
gsNode.mesh.getOrDefault("foobar").len == 0
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
(
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check:
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
await passed.wait(2.seconds)
trace "test done, stopping..."

View File

@@ -0,0 +1,717 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message]
import ../helpers, ../utils/[futures]
const MsgIdSuccess = "msg id gen success"
suite "GossipSub Gossip Protocol":
teardown:
checkTrackers()
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
# generate mesh and fanout peers
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# generate gossipsub (free standing) peers
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
check gossipSub.fanout[topic].len == 15
check gossipSub.mesh[topic].len == 15
check gossipSub.gossipsub[topic].len == 15
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
for p in peers.keys:
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
discard
let topic = "foobar"
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.fanout[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let peers = gossipSub.getGossipPeers()
check peers.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "handleIHave/Iwant tests":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.subscribe(topic, handler2)
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
for i in 0 ..< 30:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# Peers with no budget should not request messages
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
let id = @[0'u8, 1, 2, 3]
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
# Peers with budget should request messages. If ids are repeated, only one request should be generated
block:
# Define a new connection
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let genmsg = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
genmsg.len == 1
check gossipSub.mcache.msgs.len == 1
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
let
numberOfNodes = 5
topic = "foobar"
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are interconnected
await connectNodesStar(nodes)
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 4), PeerTableType.Gossipsub
)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
await waitForHeartbeat()
# At least one of the nodes should have received an iHave message
# The check is made this way because the mesh structure changes from run to run
let receivedIHaves = receivedIHavesRef[]
check:
anyIt(receivedIHaves, it > 0)
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
await waitForHeartbeat()
# None of the nodes should have received an iHave message
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len == 0
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
)
nodes = generateNodes(
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
await waitForHeartbeat(2)
# At least 8 of the nodes should have received an iHave message
# That's because the gossip factor is 0.5 over 16 available nodes
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len >= 8
asyncTest "adaptive gossip dissemination, with dLazy priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var receivedIHavesRef = new seq[int]
addIHaveObservers(nodes, topic, receivedIHavesRef)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
await waitForHeartbeat(2)
# At least 6 of the nodes should have received an iHave message
# That's because the dLazy is 6
let receivedIHaves = receivedIHavesRef[]
check:
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
# All nodes are checking for iDontWant messages
var receivedIDontWantsRef = new seq[int]
addIDontWantObservers(nodes, receivedIDontWantsRef)
# And are connected in a line
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
# When node 0 sends a large message
let largeMsg = newSeq[byte](1000)
check (await nodes[0].publish(topic, largeMsg)) == 1
await waitForHeartbeat()
# Only node 2 should have received the iDontWant message
let receivedIDontWants = receivedIDontWantsRef[]
check:
receivedIDontWants[0] == 0
receivedIDontWants[1] == 0
receivedIDontWants[2] == 1
asyncTest "e2e - GossipSub peer exchange":
# A, B & C are subscribed to something
# B unsubcribe from it, it should send
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let nodes =
generateNodes(2, gossip = true, enablePX = true) &
generateNodes(1, gossip = true, sendSignedPeerRecord = true)
startNodesAndDeferStop(nodes)
var
gossip0 = GossipSub(nodes[0])
gossip1 = GossipSub(nodes[1])
gossip2 = GossipSub(nodes[2])
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
nodes[2].subscribe("foobar", handler)
for x in 0 ..< 3:
for y in 0 ..< 3:
if x != y:
await waitSub(nodes[x], nodes[y], "foobar")
# Setup record handlers for all nodes
var
passed0: Future[void] = newFuture[void]()
passed2: Future[void] = newFuture[void]()
gossip0.routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == "foobar"
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed0.complete()
)
gossip1.routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
raiseAssert "should not get here"
)
gossip2.routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == "foobar"
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed2.complete()
)
# Unsubscribe from the topic
nodes[1].unsubscribe("foobar", handler)
# Then verify what nodes receive the PX
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted()
results[1].isCompleted()
asyncTest "e2e - iDontWant":
# 3 nodes: A <=> B <=> C
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
# and check that B doesn't relay the message to C.
# We also check that B sends IDONTWANT to C, but not A
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let nodes = generateNodes(3, gossip = true, msgIdProvider = dumbMsgIdProvider)
startNodesAndDeferStop(nodes)
await nodes[0].switch.connect(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await nodes[1].switch.connect(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} =
doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
nodes[2].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
var gossip3: GossipSub = GossipSub(nodes[2])
check:
gossip3.mesh.peers("foobar") == 1
gossip3.broadcast(
gossip3.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
)
),
isHighPriority = true,
)
checkUntilTimeout:
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
await bFinished
checkUntilTimeout:
toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 1)
check:
toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
asyncTest "e2e - iDontWant is broadcasted on publish":
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let nodes = generateNodes(
2, gossip = true, msgIdProvider = dumbMsgIdProvider, sendIDontWantOnPublish = true
)
startNodesAndDeferStop(nodes)
await nodes[0].switch.connect(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
proc handlerA(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip2: GossipSub = GossipSub(nodes[1])
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
checkUntilTimeout:
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
asyncTest "e2e - iDontWant is sent only for 1.2":
# 3 nodes: A <=> B <=> C
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
# and check that B doesn't relay the message to C.
# We also check that B sends IDONTWANT to C, but not A
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let
nodeA = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
nodeB = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
nodeC = generateNodes(
1,
gossip = true,
msgIdProvider = dumbMsgIdProvider,
gossipSubVersion = GossipSubCodec_11,
)[0]
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
await nodeA.switch.connect(
nodeB.switch.peerInfo.peerId, nodeB.switch.peerInfo.addrs
)
await nodeB.switch.connect(
nodeC.switch.peerInfo.peerId, nodeC.switch.peerInfo.addrs
)
let bFinished = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerB(topic: string, data: seq[byte]) {.async.} =
bFinished.complete()
nodeA.subscribe("foobar", handler)
nodeB.subscribe("foobar", handlerB)
nodeC.subscribe("foobar", handler)
await waitSubGraph(@[nodeA, nodeB, nodeC], "foobar")
var gossipA: GossipSub = GossipSub(nodeA)
var gossipB: GossipSub = GossipSub(nodeB)
var gossipC: GossipSub = GossipSub(nodeC)
check:
gossipC.mesh.peers("foobar") == 1
tryPublish await nodeA.publish("foobar", newSeq[byte](10000)), 1
await bFinished
# "check" alone isn't suitable for testing that a condition is true after some time has passed. Below we verify that
# peers A and C haven't received an IDONTWANT message from B, but we need wait some time for potential in flight messages to arrive.
await waitForHeartbeat()
check:
toSeq(gossipC.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
toSeq(gossipA.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
asyncTest "Peer must send right gosspipsub version":
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok(newSeq[byte](10))
let node0 = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
let node1 = generateNodes(
1,
gossip = true,
msgIdProvider = dumbMsgIdProvider,
gossipSubVersion = GossipSubCodec_10,
)[0]
startNodesAndDeferStop(@[node0, node1])
await node0.switch.connect(
node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs
)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
node0.subscribe("foobar", handler)
node1.subscribe("foobar", handler)
await waitSubGraph(@[node0, node1], "foobar")
var gossip0: GossipSub = GossipSub(node0)
var gossip1: GossipSub = GossipSub(node1)
checkUntilTimeout:
gossip0.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
checkUntilTimeout:
gossip1.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10

View File

@@ -0,0 +1,522 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import utils
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../helpers, ../utils/[futures]
suite "GossipSub Mesh Management":
teardown:
checkTrackers()
asyncTest "topic params":
let params = TopicParams.init()
params.validateParameters().tryGet()
asyncTest "subscribe/unsubscribeAll":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
discard
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
# test via dynamic dispatch
gossipSub.PubSub.subscribe(topic, handler)
check:
gossipSub.topics.contains(topic)
gossipSub.gossipsub[topic].len() > 0
gossipSub.mesh[topic].len() > 0
# test via dynamic dispatch
gossipSub.PubSub.unsubscribeAll(topic)
check:
topic notin gossipSub.topics # not in local topics
topic notin gossipSub.mesh # not in mesh
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`rebalanceMesh` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len == gossipSub.parameters.d
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh - bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
var scoreLow = -11'f64
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.score = scoreLow
gossipSub.gossipsub[topic].incl(peer)
scoreLow += 1.0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# low score peers should not be in mesh, that's why the count must be 4
check gossipSub.mesh[topic].len == 4
for peer in gossipSub.mesh[topic]:
check peer.score >= 0.0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "`rebalanceMesh` Degree Hi":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len ==
gossipSub.parameters.d + gossipSub.parameters.dScore
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
peerId, Moment.now() + 1.hours
)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff
check prunes.len != 0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# expect 0 since they are all backing off
check gossipSub.mesh[topic].len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh fail due to backoff - remote":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 15:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.mesh[topic].incl(peer)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len != 0
for i in 0 ..< 15:
let peerId = conns[i].peerId
let peer = gossipSub.getPubSubPeer(peerId)
gossipSub.handlePrune(
peer,
@[
ControlPrune(
topicID: topic,
peers: @[],
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
)
],
)
# expect topic cleaned up since they are all pruned
check topic notin gossipSub.mesh
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "rebalanceMesh Degree Hi - audit scenario":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let topic = "foobar"
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
gossipSub.topicParams[topic] = TopicParams.init()
gossipSub.parameters.dScore = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dOut = 3
gossipSub.parameters.dHigh = 12
gossipSub.parameters.dLow = 4
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
for i in 0 ..< 6:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.In
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 40.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
for i in 0 ..< 7:
let conn = TestBufferStream.new(noop)
conn.transportDir = Direction.Out
conns &= conn
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.score = 10.0
peer.sendConn = conn
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
check gossipSub.mesh[topic].len == 13
gossipSub.rebalanceMesh(topic)
# ensure we are above dlow
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
var outbound = 0
for peer in gossipSub.mesh[topic]:
if peer.sendConn.transportDir == Direction.Out:
inc outbound
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "dont prune peers if mesh len is less than d_high":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let expectedNumberOfPeers = numberOfNodes - 1
await waitForPeersInTable(
nodes,
topic,
newSeqWith(numberOfNodes, expectedNumberOfPeers),
PeerTableType.Gossipsub,
)
for i in 0 ..< numberOfNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len == expectedNumberOfPeers
gossip.fanout.len == 0
asyncTest "prune peers if mesh len is higher than d_high":
let
numberOfNodes = 15
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let
expectedNumberOfPeers = numberOfNodes - 1
dHigh = 12
d = 6
dLow = 4
await waitForPeersInTable(
nodes,
topic,
newSeqWith(numberOfNodes, expectedNumberOfPeers),
PeerTableType.Gossipsub,
)
for i in 0 ..< numberOfNodes:
var gossip = GossipSub(nodes[i])
check:
gossip.gossipsub[topic].len == expectedNumberOfPeers
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
gossip.fanout.len == 0
asyncTest "GossipSub unsub - resub faster than backoff":
# For this test to work we'd require a way to disable fanout.
# There's not a way to toggle it, and mocking it didn't work as there's not a reliable mock available.
skip()
return
# Instantiate handlers and validators
var handlerFut0 = newFuture[bool]()
proc handler0(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut0.complete(true)
var handlerFut1 = newFuture[bool]()
proc handler1(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut1.complete(true)
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
check topic == "foobar"
validatorFut.complete(true)
result = ValidationResult.Accept
# Setup nodes and start switches
let
nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 5.seconds)
topic = "foobar"
# Connect nodes
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# Subscribe both nodes to the topic and node1 (receiver) to the validator
nodes[0].subscribe(topic, handler0)
nodes[1].subscribe(topic, handler1)
nodes[1].addValidator("foobar", validator)
await sleepAsync(DURATION_TIMEOUT)
# Wait for both nodes to verify others' subscription
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], topic)
subs &= waitSub(nodes[0], nodes[1], topic)
await allFuturesThrowing(subs)
# When unsubscribing and resubscribing in a short time frame, the backoff period should be triggered
nodes[1].unsubscribe(topic, handler1)
await sleepAsync(DURATION_TIMEOUT)
nodes[1].subscribe(topic, handler1)
await sleepAsync(DURATION_TIMEOUT)
# Backoff is set to 5 seconds, and the amount of sleeping time since the unsubsribe until now is 3-4s~
# Meaning, the subscription shouldn't have been processed yet because it's still in backoff period
# When publishing under this condition
discard await nodes[0].publish("foobar", "Hello!".toBytes())
await sleepAsync(DURATION_TIMEOUT)
# Then the message should not be received:
check:
validatorFut.toState().isPending()
handlerFut1.toState().isPending()
handlerFut0.toState().isPending()
validatorFut.reset()
handlerFut0.reset()
handlerFut1.reset()
# If we wait backoff period to end, around 1-2s
await waitForMesh(nodes[0], nodes[1], topic, 3.seconds)
discard await nodes[0].publish("foobar", "Hello!".toBytes())
await sleepAsync(DURATION_TIMEOUT)
# Then the message should be received
check:
validatorFut.toState().isCompleted()
handlerFut1.toState().isCompleted()
handlerFut0.toState().isPending()
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
checkUntilTimeout:
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], "foobar")
subs &= waitSub(nodes[0], nodes[1], "foobar")
await allFuturesThrowing(subs)
let
gossip1 = GossipSub(nodes[0])
gossip2 = GossipSub(nodes[1])
check:
"foobar" in gossip1.topics
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
# We must subscribe before setting the validator
nodes[0].subscribe("foobar", handler)
var gossip = GossipSub(nodes[0])
let invalidDetected = newFuture[void]()
gossip.subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "GossipSub test directPeers":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
let invalidDetected = newFuture[void]()
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await connectNodesStar(nodes)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)

View File

@@ -0,0 +1,861 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils, enumerate]
import stew/byteutils
import utils
import sugar
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
import ../../libp2p/muxers/muxer
import ../helpers, ../utils/[futures]
const MsgIdSuccess = "msg id gen success"
proc setupTest(): Future[
tuple[
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
]
] {.async.} =
let nodes = generateNodes(2, gossip = true, verifySignature = false)
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
await nodes[1].switch.connect(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip0: GossipSub = GossipSub(nodes[0])
var gossip1: GossipSub = GossipSub(nodes[1])
return (gossip0, gossip1, receivedMessages)
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
proc createMessages(
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
var iwantMessageIds = newSeq[MessageId]()
var sentMessages = initHashSet[seq[byte]]()
for i, size in enumerate([size1, size2]):
let data = newSeqWith(size, i.byte)
sentMessages.incl(data)
let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
iwantMessageIds.add(iwantMessageId)
gossip1.mcache.put(iwantMessageId, msg)
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
peer.sentIHaves[^1].incl(iwantMessageId)
return (iwantMessageIds, sentMessages)
suite "GossipSub Message Handling":
teardown:
checkTrackers()
asyncTest "Drop messages of topics without subscription":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
for i in 0 .. gossipSub.topicsHigh + 10:
tooManyTopics &= "topic" & $i
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
let conn = TestBufferStream.new(noop)
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
peer.behaviourPenalty > 0.0
await conn.close()
await gossipSub.switch.stop()
asyncTest "invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
expect(CatchableError):
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
await gossipSub.switch.stop()
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize div 2 + 1
let (iwantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
await sleepAsync(300.milliseconds)
checkUntilTimeout:
receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let size1 = gossip1.maxMessageSize div 2
let size2 = gossip1.maxMessageSize div 3
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
checkUntilTimeout:
receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let maxSize = gossip1.maxMessageSize
let size1 = maxSize div 2
let size2 = maxSize + 10
let (bigIWantMessageIds, sentMessages) =
createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(
gossip1.mesh["foobar"],
RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
)
)
),
isHighPriority = false,
)
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
if seqs[0] < seqs[1]:
smallestSet.incl(seqs[0])
else:
smallestSet.incl(seqs[1])
checkUntilTimeout:
receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)
asyncTest "messages are not sent back to source or forwarding peer":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
startNodesAndDeferStop(nodes)
let (handlerFut0, handler0) = createCompleteHandler()
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are connected in a ring
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
await connectNodes(nodes[2], nodes[0])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
await waitForHeartbeat()
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
let results =
await waitForStates(@[handlerFut0, handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
check:
results[0].isPending()
results[1].isCompleted()
results[2].isCompleted()
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], "foobar")
subs &= waitSub(nodes[0], nodes[1], "foobar")
await allFuturesThrowing(subs)
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
check topic == "foobar"
validatorFut.complete(true)
result = ValidationResult.Accept
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) and (await handlerFut)
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = ValidationResult.Reject
validatorFut.complete(true)
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) == true
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
var validatorFut = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = ValidationResult.Ignore
validatorFut.complete(true)
nodes[1].addValidator("foobar", validator)
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check (await validatorFut) == true
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foo", handler)
nodes[1].subscribe("bar", handler)
var passed, failed: Future[bool] = newFuture[bool]()
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result =
if topic == "foo":
passed.complete(true)
ValidationResult.Accept
else:
failed.complete(true)
ValidationResult.Reject
nodes[1].addValidator("foo", "bar", validator)
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
check ((await passed) and (await failed) and (await handlerFut))
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check:
"foo" notin gossip1.mesh and gossip1.fanout["foo"].len == 1
"foo" notin gossip2.mesh and "foo" notin gossip2.fanout
"bar" notin gossip1.mesh and gossip1.fanout["bar"].len == 1
"bar" notin gossip2.mesh and "bar" notin gossip2.fanout
asyncTest "GossipSub's observers should run after message is sent, received and validated":
var
recvCounter = 0
sendCounter = 0
validatedCounter = 0
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
inc recvCounter
proc onSend(peer: PubSubPeer, msgs: var RPCMsg) =
inc sendCounter
proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) =
inc validatedCounter
let obs0 = PubSubObserver(onSend: onSend)
let obs1 = PubSubObserver(onRecv: onRecv, onValidated: onValidated)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].addObserver(obs0)
nodes[1].addObserver(obs1)
nodes[1].subscribe("foo", handler)
nodes[1].subscribe("bar", handler)
proc validator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
result = if topic == "foo": ValidationResult.Accept else: ValidationResult.Reject
nodes[1].addValidator("foo", "bar", validator)
# Send message that will be accepted by the receiver's validator
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
check:
recvCounter == 1
validatedCounter == 1
sendCounter == 1
# Send message that will be rejected by the receiver's validator
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
check:
recvCounter == 2
validatedCounter == 1
sendCounter == 2
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check await passed
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
check:
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
# 3 nodes: A, B, C
# A publishes, C relays, B is having a long validation
# so B should not send to anyone
let nodes = generateNodes(3, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var cRelayed: Future[void] = newFuture[void]()
var bFinished: Future[void] = newFuture[void]()
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
nodes[2].subscribe("foobar", handlerC)
await waitSubGraph(nodes, "foobar")
var gossip1: GossipSub = GossipSub(nodes[0])
var gossip2: GossipSub = GossipSub(nodes[1])
var gossip3: GossipSub = GossipSub(nodes[2])
proc slowValidator(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
try:
await cRelayed
# Empty A & C caches to detect duplicates
gossip1.seen = TimedCache[SaltedId].init()
gossip3.seen = TimedCache[SaltedId].init()
let msgId = toSeq(gossip2.validationSeen.keys)[0]
checkUntilTimeout(
try:
gossip2.validationSeen[msgId].len > 0
except KeyError:
false
)
result = ValidationResult.Accept
bFinished.complete()
except CatchableError:
raiseAssert "err on slowValidator"
nodes[1].addValidator("foobar", slowValidator)
checkUntilTimeout:
gossip1.mesh.getOrDefault("foobar").len == 2
gossip2.mesh.getOrDefault("foobar").len == 2
gossip3.mesh.getOrDefault("foobar").len == 2
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
await bFinished
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
var gossip1: GossipSub = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
var gossip2: GossipSub = GossipSub(nodes[1])
gossip2.parameters.floodPublish = true
await connectNodesStar(nodes)
# nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
check await passed.wait(10.seconds)
check:
"foobar" in gossip1.gossipsub
"foobar" notin gossip2.gossipsub
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "e2e - GossipSub floodPublish limit":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
gossip1.parameters.heartbeatInterval = milliseconds(700)
startNodesAndDeferStop(nodes)
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
gossip1.parameters.floodPublish = true
gossip1.parameters.heartbeatInterval = milliseconds(700)
gossip1.parameters.bandwidthEstimatebps = 0
startNodesAndDeferStop(nodes)
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
asyncTest "e2e - GossipSub with multiple peers":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1
await wait(seenFut, 1.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
asyncTest "e2e - GossipSub with multiple peers (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesSparse(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
try:
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
except KeyError:
raiseAssert "seen checked before"
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1
await wait(seenFut, 60.seconds)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesSparse(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
let dgossip = GossipSub(dialer)
dgossip.parameters.dHigh = 2
dgossip.parameters.dLow = 1
dgossip.parameters.d = 1
dgossip.parameters.dOut = 1
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
info "seen up", count = seen.len
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
let publishedTo = nodes[0].publish(
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
).await
check:
publishedTo != 0
publishedTo != runs
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
asyncTest "GossipSub directPeers: always forward messages":
let nodes = generateNodes(3, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
)
await GossipSub(nodes[2]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
nodes[1].subscribe("foobar", noop)
nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
check "foobar" notin GossipSub(nodes[2]).mesh
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
# Given 2 nodes
let
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true)
node0 = nodes[0]
node1 = nodes[1]
g0 = GossipSub(node0)
g1 = GossipSub(node1)
startNodesAndDeferStop(nodes)
# With message observers
var
messageReceived0 = newFuture[bool]()
messageReceived1 = newFuture[bool]()
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived0.complete(true)
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
for message in msgs.messages:
if message.topic == "foobar":
messageReceived1.complete(true)
node0.addObserver(PubSubObserver(onRecv: observer0))
node1.addObserver(PubSubObserver(onRecv: observer1))
# Connect them as direct peers
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
# When node 0 sends a message
let message = "Hello!".toBytes()
let publishResult = await node0.publish("foobar", message)
# None should receive the message as they are not subscribed to the topic
let results = await waitForStates(@[messageReceived0, messageReceived1])
check:
publishResult == 0
results[0].isPending()
results[1].isPending()

View File

@@ -0,0 +1,418 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import metrics
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../../libp2p/muxers/muxer
import ../helpers, ../utils/[futures]
suite "GossipSub Scoring":
teardown:
checkTrackers()
asyncTest "Disconnect bad peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
check false
let topic = "foobar"
var conns = newSeq[Connection]()
for i in 0 ..< 30:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.sendConn = conn
peer.handler = handler
peer.appScore = gossipSub.parameters.graylistThreshold - 1
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
gossipSub.updateScores()
await sleepAsync(100.millis)
check:
# test our disconnect mechanics
gossipSub.gossipsub.peers(topic) == 0
# also ensure we cleanup properly the peersInIP table
gossipSub.peersInIP.len == 0
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
g0 = GossipSub(nodes[0])
startNodesAndDeferStop(nodes)
# Nodes 1 and 2 are connected to node 0
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[0], nodes[2])
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are subscribed to the same topic
nodes[1].subscribe(topic, handler1)
nodes[2].subscribe(topic, handler2)
await waitForHeartbeat()
# Given node 2's score is below the threshold
for peer in g0.gossipsub.getOrDefault(topic):
if peer.peerId == nodes[2].peerInfo.peerId:
peer.score = (g0.parameters.publishThreshold - 1)
# When node 0 publishes a message to topic "foo"
let message = "Hello!".toBytes()
check (await nodes[0].publish(topic, message)) == 1
await waitForHeartbeat(2)
# Then only node 1 should receive the message
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted(true)
results[1].isPending()
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
let nodes =
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
await startNodes(nodes)
await connectNodesStar(nodes)
proc handle(topic: string, data: seq[byte]) {.async.} =
discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
gossip0.subscribe("foobar", handle)
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
try:
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
)
except KeyError:
0
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
isHighPriority = true,
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
isHighPriority = true,
)
await waitForHeartbeat()
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
# Simulate sending an undecodable message
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
newSeqWith(33, 1.byte), isHighPriority = true
)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
newSeqWith(35, 1.byte), isHighPriority = true
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let msg = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: "foobar",
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
backoff: 123'u64,
)
]
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
let msg2 = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: "foobar",
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
backoff: 123'u64,
)
]
)
)
)
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let topic = "foobar"
proc execValidator(
topic: string, message: messages.Message
): Future[ValidationResult] {.async: (raw: true).} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
isHighPriority = true,
)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
await GossipSub(nodes[1]).addDirectPeer(
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
)
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
GossipSub(nodes[1]).updateScores()
# peer shouldn't be in our mesh
check:
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
GossipSub(nodes[1]).parameters.graylistThreshold
GossipSub(nodes[1]).updateScores()
handlerFut = newFuture[void]()
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
# Without directPeers, this would fail
await handlerFut.wait(1.seconds)
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topic == "foobar"
if not seenFut.finished() and seen.len >= runs:
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSubGraph(nodes, "foobar")
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
tryPublish await wait(
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
1.minutes,
), 1, 5.seconds, 3.minutes
await wait(seenFut, 5.minutes)
check:
seen.len >= runs
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
var gossip = GossipSub(node)
check:
"foobar" in gossip.gossipsub
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
# Removing some subscriptions
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].unsubscribeAll("foobar")
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0 ..< runs:
if i mod 3 != 0:
nodes[i].subscribe("foobar", handler)
# Waiting 2 heartbeats
for _ in 0 .. 1:
let evnt = newAsyncEvent()
GossipSub(nodes[0]).heartbeatEvents &= evnt
await evnt.wait()
# ensure peer stats are stored properly and kept properly
check:
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
asyncTest "GossipSub scoring - decayInterval":
let nodes = generateNodes(2, gossip = true)
var gossip = GossipSub(nodes[0])
const testDecayInterval = 50.milliseconds
gossip.parameters.decayInterval = testDecayInterval
startNodesAndDeferStop(nodes)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
await handlerFut
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
100
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
# We should have decayed 5 times, though allowing 4..6
await sleepAsync(testDecayInterval * 5)
check:
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
50.0 .. 66.0

View File

@@ -1,4 +1,6 @@
{.used.}
import
testfloodsub, testgossipsub, testgossipsub2, testmcache, testtimedcache, testmessage
testgossipsubfanout, testgossipsubgossip, testgossipsubmeshmanagement,
testgossipsubmessagehandling, testgossipsubscoring, testfloodsub, testmcache,
testtimedcache, testmessage

View File

@@ -4,7 +4,7 @@ const
libp2p_pubsub_verify {.booldefine.} = true
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables, sets, sequtils
import hashes, random, tables, sets, sequtils, sugar
import chronos, stew/[byteutils, results], chronos/ratelimit
import
../../libp2p/[
@@ -12,19 +12,35 @@ import
protocols/pubsub/errors,
protocols/pubsub/pubsub,
protocols/pubsub/pubsubpeer,
protocols/pubsub/peertable,
protocols/pubsub/gossipsub,
protocols/pubsub/floodsub,
protocols/pubsub/rpc/messages,
protocols/secure/secure,
]
import ../helpers
import ../helpers, ../utils/futures
import chronicles
export builders
randomize()
type TestGossipSub* = ref object of GossipSub
const TEST_GOSSIPSUB_HEARTBEAT_INTERVAL* = 60.milliseconds
const HEARTBEAT_TIMEOUT* = # TEST_GOSSIPSUB_HEARTBEAT_INTERVAL + 20%
int64(float64(TEST_GOSSIPSUB_HEARTBEAT_INTERVAL.milliseconds) * 1.2).milliseconds
proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
await sleepAsync(HEARTBEAT_TIMEOUT * multiplier)
type
TestGossipSub* = ref object of GossipSub
DValues* = object
d*: Option[int]
dLow*: Option[int]
dHigh*: Option[int]
dScore*: Option[int]
dOut*: Option[int]
dLazy*: Option[int]
proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
proc getConn(): Future[Connection] {.
@@ -62,6 +78,24 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
$m.data.hash & $m.topic.hash
ok mid.toBytes()
proc applyDValues(parameters: var GossipSubParams, dValues: Option[DValues]) =
if dValues.isNone:
return
let values = dValues.get
# Apply each value if it exists
if values.d.isSome:
parameters.d = values.d.get
if values.dLow.isSome:
parameters.dLow = values.dLow.get
if values.dHigh.isSome:
parameters.dHigh = values.dHigh.get
if values.dScore.isSome:
parameters.dScore = values.dScore.get
if values.dOut.isSome:
parameters.dOut = values.dOut.get
if values.dLazy.isSome:
parameters.dLazy = values.dLazy.get
proc generateNodes*(
num: Natural,
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
@@ -79,6 +113,10 @@ proc generateNodes*(
Opt.none(tuple[bytes: int, interval: Duration]),
gossipSubVersion: string = "",
sendIDontWantOnPublish: bool = false,
heartbeatInterval: Duration = TEST_GOSSIPSUB_HEARTBEAT_INTERVAL,
floodPublish: bool = false,
dValues: Option[DValues] = DValues.none(),
gossipFactor: Option[float] = float.none(),
): seq[PubSub] =
for i in 0 ..< num:
let switch = newStandardSwitch(
@@ -96,13 +134,16 @@ proc generateNodes*(
maxMessageSize = maxMessageSize,
parameters = (
var p = GossipSubParams.init()
p.floodPublish = false
p.heartbeatInterval = heartbeatInterval
p.floodPublish = floodPublish
p.historyLength = 20
p.historyGossip = 20
p.unsubscribeBackoff = unsubscribeBackoff
p.enablePX = enablePX
p.overheadRateLimit = overheadRateLimit
p.sendIDontWantOnPublish = sendIDontWantOnPublish
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
applyDValues(p, dValues)
p
),
)
@@ -127,13 +168,18 @@ proc generateNodes*(
switch.mount(pubsub)
result.add(pubsub)
proc subscribeNodes*(nodes: seq[PubSub]) {.async.} =
proc connectNodes*(dialer: PubSub, target: PubSub) {.async.} =
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
"Could not connect same peer"
await dialer.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc connectNodesStar*(nodes: seq[PubSub]) {.async.} =
for dialer in nodes:
for node in nodes:
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
await connectNodes(dialer, node)
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
proc connectNodesSparse*(nodes: seq[PubSub], degree: int = 2) {.async.} =
if nodes.len < degree:
raise
(ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
@@ -143,18 +189,14 @@ proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
continue
for node in nodes:
if dialer.switch.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
await connectNodes(dialer, node)
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
for dialer in nodes:
var dialed: seq[PeerId]
while dialed.len < nodes.len - 1:
let node = sample(nodes)
if node.peerInfo.peerId notin dialed:
if dialer.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc activeWait(
interval: Duration, maximum: Moment, timeoutErrorMessage = "Timeout on activeWait"
) {.async.} =
await sleepAsync(interval)
doAssert Moment.now() < maximum, timeoutErrorMessage
proc waitSub*(sender, receiver: auto, key: string) {.async.} =
if sender == receiver:
@@ -177,10 +219,14 @@ proc waitSub*(sender, receiver: auto, key: string) {.async.} =
)
:
trace "waitSub sleeping..."
await activeWait(5.milliseconds, timeout, "waitSub timeout!")
# await
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubAllNodes*(nodes: seq[auto], topic: string) {.async.} =
let numberOfNodes = nodes.len
for x in 0 ..< numberOfNodes:
for y in 0 ..< numberOfNodes:
if x != y:
await waitSub(nodes[x], nodes[y], topic)
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
@@ -207,6 +253,198 @@ proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
if ok == nodes.len:
return
trace "waitSubGraph sleeping..."
await activeWait(5.milliseconds, timeout, "waitSubGraph timeout!")
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSubGraph timeout!"
proc waitForMesh*(
sender: auto, receiver: auto, key: string, timeoutDuration = 5.seconds
) {.async.} =
if sender == receiver:
return
let
timeoutMoment = Moment.now() + timeoutDuration
gossipsubSender = GossipSub(sender)
receiverPeerId = receiver.peerInfo.peerId
while not gossipsubSender.mesh.hasPeerId(key, receiverPeerId):
trace "waitForMesh sleeping..."
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")
type PeerTableType* {.pure.} = enum
Gossipsub = "gossipsub"
Mesh = "mesh"
Fanout = "fanout"
proc waitForPeersInTable*(
nodes: seq[auto],
topic: string,
peerCounts: seq[int],
table: PeerTableType,
timeout = 5.seconds,
) {.async.} =
## Wait until each node in `nodes` has at least the corresponding number of peers from `peerCounts`
## in the specified table (mesh, gossipsub, or fanout) for the given topic
doAssert nodes.len == peerCounts.len, "Node count must match peer count expectations"
# Helper proc to check current state and update satisfaction status
proc checkState(
nodes: seq[auto],
topic: string,
peerCounts: seq[int],
table: PeerTableType,
satisfied: var seq[bool],
): bool =
for i in 0 ..< nodes.len:
if not satisfied[i]:
let fsub = GossipSub(nodes[i])
let currentCount =
case table
of PeerTableType.Mesh:
fsub.mesh.getOrDefault(topic).len
of PeerTableType.Gossipsub:
fsub.gossipsub.getOrDefault(topic).len
of PeerTableType.Fanout:
fsub.fanout.getOrDefault(topic).len
satisfied[i] = currentCount >= peerCounts[i]
return satisfied.allIt(it)
let timeoutMoment = Moment.now() + timeout
var
satisfied = newSeq[bool](nodes.len)
allSatisfied = false
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied) # Initial check
# Continue checking until all requirements are met or timeout
while not allSatisfied:
await activeWait(
5.milliseconds,
timeoutMoment,
"Timeout waiting for peer counts in " & $table & " for topic " & topic,
)
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied)
proc startNodes*(nodes: seq[PubSub]) {.async.} =
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
proc stopNodes*(nodes: seq[PubSub]) {.async.} =
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
template startNodesAndDeferStop*(nodes: seq[PubSub]): untyped =
await startNodes(nodes)
defer:
await stopNodes(nodes)
proc subscribeAllNodes*(nodes: seq[PubSub], topic: string, topicHandler: TopicHandler) =
for node in nodes:
node.subscribe(topic, topicHandler)
proc subscribeAllNodes*(
nodes: seq[PubSub], topic: string, topicHandlers: seq[TopicHandler]
) =
if nodes.len != topicHandlers.len:
raise (ref CatchableError)(msg: "nodes and topicHandlers count needs to match!")
for i in 0 ..< nodes.len:
nodes[i].subscribe(topic, topicHandlers[i])
template tryPublish*(
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
): untyped =
var
expiration = Moment.now() + timeout
pubs = 0
while pubs < require and Moment.now() < expiration:
pubs = pubs + call
await sleepAsync(wait)
doAssert pubs >= require, "Failed to publish!"
proc noop*(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
proc voidTopicHandler*(topic: string, data: seq[byte]) {.async.} =
discard
proc createCompleteHandler*(): (
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
) =
var fut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
fut.complete(true)
return (fut, handler)
proc addIHaveObservers*(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
let numberOfNodes = nodes.len
receivedIHaves[] = repeat(0, numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iHave = msgs.control.get.ihave
if iHave.len > 0:
for msg in iHave:
if msg.topicID == topic:
receivedIHaves[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
nodes[i].addObserver(pubsubObserver)
proc addIDontWantObservers*(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
let numberOfNodes = nodes.len
receivedIDontWants[] = repeat(0, numberOfNodes)
for i in 0 ..< numberOfNodes:
var pubsubObserver: PubSubObserver
capture i:
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
if msgs.control.isSome:
let iDontWant = msgs.control.get.idontwant
if iDontWant.len > 0:
receivedIDontWants[i] += 1
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
nodes[i].addObserver(pubsubObserver)
# TODO: refactor helper methods from testgossipsub.nim
proc setupNodes*(count: int): seq[PubSub] =
generateNodes(count, gossip = true)
proc connectNodes*(nodes: seq[PubSub], target: PubSub) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for node in nodes:
node.subscribe("foobar", handler)
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc baseTestProcedure*(
nodes: seq[PubSub],
gossip1: GossipSub,
numPeersFirstMsg: int,
numPeersSecondMsg: int,
) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
block setup:
for i in 0 ..< 50:
if (await nodes[0].publish("foobar", ("Hello!" & $i).toBytes())) == 19:
break setup
await sleepAsync(10.milliseconds)
check false
check (await nodes[0].publish("foobar", newSeq[byte](2_500_000))) == numPeersFirstMsg
check (await nodes[0].publish("foobar", newSeq[byte](500_001))) == numPeersSecondMsg
# Now try with a mesh
gossip1.subscribe("foobar", handler)
checkUntilTimeout:
gossip1.mesh.peers("foobar") > 5
# use a different length so that the message is not equal to the last
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
proc `$`*(peer: PubSubPeer): string =
shortLog(peer)

3
tests/testall.nim Normal file
View File

@@ -0,0 +1,3 @@
{.used.}
import testnative, testdaemon, ./pubsub/testpubsub, testinterop

View File

@@ -0,0 +1,45 @@
{.used.}
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import ./helpers
import stew/byteutils
import ../libp2p/stream/bridgestream
suite "BridgeStream":
asyncTest "send-receive":
let (c1, c2) = bridgedConnections()
var msg: array[8, byte]
# c1 -> c2
await c1.write("hello c2")
await c2.readExactly(addr msg, msg.len)
check string.fromBytes(msg) == "hello c2"
# c2 -> c1
await c2.write("hello c1")
await c1.readExactly(addr msg, msg.len)
check string.fromBytes(msg) == "hello c1"
await c1.close()
await c2.close()
asyncTest "closing":
# closing c1, should also close c2
var (c1, c2) = bridgedConnections()
await c1.close()
expect LPStreamEOFError:
await c2.write("hello c1")
# closing c2, should also close c1
(c1, c2) = bridgedConnections()
await c2.close()
expect LPStreamEOFError:
await c1.write("hello c2")

View File

@@ -18,14 +18,14 @@ import
discovery/discoverymngr,
discovery/rendezvousinterface,
]
import ./helpers
import ./helpers, ./utils/[futures, async_tests]
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withAddresses(@[MultiAddress.init(MemoryAutoAddress).tryGet()])
.withMemoryTransport()
.withMplex()
.withNoise()
.withRendezVous(rdv)

View File

@@ -12,7 +12,7 @@
import unittest2
import nimcrypto/utils
import ../libp2p/crypto/[crypto, ecnist]
import stew/results
import results
const
TestsCount = 10 # number of random tests

View File

@@ -4,7 +4,7 @@ import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/[relay, c
proc switchMplexCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport =
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =
@@ -27,7 +27,7 @@ proc switchMplexCreator(
proc switchYamuxCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport =
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =

View File

@@ -0,0 +1,149 @@
{.used.}
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import stew/byteutils
import ../libp2p/[transports/memorytransport, multiaddress]
import ./helpers
suite "Memory transport":
teardown:
checkTrackers()
asyncTest "memory multiaddress":
let ma = MultiAddress.init("/memory/addr-1").get()
check $ma == "/memory/addr-1"
asyncTest "can handle local address":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let transport: MemoryTransport = MemoryTransport.new()
await transport.start(ma)
check transport.handles(transport.addrs[0])
await transport.stop()
asyncTest "send receive":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let server = MemoryTransport.new()
await server.start(ma)
proc runClient() {.async.} =
let client = MemoryTransport.new()
let conn = await client.dial("", ma[0])
await conn.write("client")
var resp: array[6, byte]
await conn.readExactly(addr resp, resp.len)
await conn.close()
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, resp.len)
check string.fromBytes(resp) == "client"
await conn.write("server")
await conn.close()
await server.stop()
asyncSpawn serverAcceptHandler()
await runClient()
asyncTest "server already started":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let server = MemoryTransport.new()
await server.start(ma)
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
await conn.close()
asyncSpawn serverAcceptHandler()
# accept by server2 should not succeed
let server2 = MemoryTransport.new()
await server2.start(ma)
expect MemoryTransportError:
discard await server2.accept()
# dial to pass through server.accept()
let conn = await server2.dial("", ma[0])
await conn.close()
await server.stop()
await server2.stop()
asyncTest "server stopping - should drop accept":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let server = MemoryTransport.new()
await server.start(ma)
proc serverAcceptHandler() {.async.} =
# should throw error when stopped
expect MemoryTransportAcceptStopped:
discard await server.accept()
asyncSpawn serverAcceptHandler()
await server.stop()
asyncTest "server conn close propagated to client":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let server = MemoryTransport.new()
await server.start(ma)
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, resp.len)
check string.fromBytes(resp) == "client"
await conn.close()
await server.stop()
proc runClient() {.async.} =
let client = MemoryTransport.new()
let conn = await client.dial("", ma[0])
await conn.write("client")
var resp: array[6, byte]
expect LPStreamEOFError:
await conn.readExactly(addr resp, resp.len)
await conn.close() # already closed
await client.stop()
asyncSpawn serverAcceptHandler()
await runClient()
asyncTest "client conn close propagated to server":
let ma = @[MultiAddress.init("/memory/addr-1").get()]
let server = MemoryTransport.new()
await server.start(ma)
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
expect LPStreamEOFError:
await conn.write("server") # already closed
await conn.close()
await server.stop()
proc runClient() {.async.} =
let client = MemoryTransport.new()
let conn = await client.dial("", ma[0])
await conn.close()
await client.stop()
asyncSpawn serverAcceptHandler()
await runClient()

View File

@@ -12,7 +12,7 @@
import unittest2
import ../libp2p/protobuf/minprotobuf
import ../libp2p/varint
import stew/byteutils, strutils, sequtils
import stew/byteutils, strutils
suite "MinProtobuf test suite":
const VarintVectors = [

View File

@@ -11,7 +11,7 @@
import unittest2
import ../libp2p/multibase
import stew/results
import results
const GoTestVectors = [
["identity", "\x00Decentralize everything!!!", "Decentralize everything!!!"],

View File

@@ -10,8 +10,8 @@
# those terms.
import
testvarint, testconnection, testminprotobuf, teststreamseq, testsemaphore,
testheartbeat, testfuture
testvarint, testconnection, testbridgestream, testminprotobuf, teststreamseq,
testsemaphore, testheartbeat, testfuture
import testminasn1, testrsa, testecnist, tested25519, testsecp256k1, testcrypto
@@ -20,9 +20,16 @@ import
testsigned_envelope, testrouting_record
import
testtcptransport, testtortransport, testnameresolve, testwstransport, testmultistream,
testbufferstream, testidentify, testobservedaddrmanager, testconnmngr, testswitch,
testnoise, testpeerinfo, testpeerstore, testping, testmplex, testrelayv1, testrelayv2,
testrendezvous, testdiscovery, testyamux, testautonat, testautonatservice,
testautorelay, testdcutr, testhpservice, testutility, testhelpers,
testwildcardresolverservice
testtcptransport,
testtortransport,
testwstransport,
testquic,
testmemorytransport,
transports/tls/testcertificate
import
testnameresolve, testmultistream, testbufferstream, testidentify,
testobservedaddrmanager, testconnmngr, testswitch, testnoise, testpeerinfo,
testpeerstore, testping, testmplex, testrelayv1, testrelayv2, testrendezvous,
testdiscovery, testyamux, testautonat, testautonatservice, testautorelay, testdcutr,
testhpservice, testutility, testhelpers, testwildcardresolverservice

View File

@@ -1,24 +1,117 @@
{.used.}
import sequtils
import chronos, stew/byteutils
import
../libp2p/[
stream/connection,
transports/transport,
transports/quictransport,
transports/tls/certificate,
upgrademngrs/upgrade,
multiaddress,
errors,
wire,
]
import ./helpers
import ./helpers, ./commontransport
proc createServerAcceptConn(
server: QuicTransport
): proc(): Future[void] {.
async: (raises: [transport.TransportError, LPStreamError, CancelledError])
.} =
proc handler() {.
async: (raises: [transport.TransportError, LPStreamError, CancelledError])
.} =
try:
let conn = await server.accept()
if conn == nil:
return
let stream = await getStream(QuicSession(conn), Direction.In)
var resp: array[6, byte]
await stream.readExactly(addr resp, 6)
check string.fromBytes(resp) == "client"
await stream.write("server")
await stream.close()
except QuicTransportAcceptStopped:
discard # Transport is stopped
return handler
proc invalidCertGenerator(
kp: KeyPair
): CertificateX509 {.gcsafe, raises: [TLSCertificateError].} =
try:
let keyNew = PrivateKey.random(ECDSA, (newRng())[]).get()
let pubkey = keyNew.getPublicKey().get()
# invalidKp has pubkey that does not match seckey
let invalidKp = KeyPair(seckey: kp.seckey, pubkey: pubkey)
return generateX509(invalidKp, encodingFormat = EncodingFormat.PEM)
except ResultError[crypto.CryptoError]:
raiseAssert "private key should be set"
proc createTransport(withInvalidCert: bool = false): Future[QuicTransport] {.async.} =
let ma = @[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
let privateKey = PrivateKey.random(ECDSA, (newRng())[]).tryGet()
let trans =
if withInvalidCert:
QuicTransport.new(Upgrade(), privateKey, invalidCertGenerator)
else:
QuicTransport.new(Upgrade(), privateKey)
await trans.start(ma)
return trans
suite "Quic transport":
teardown:
checkTrackers()
asyncTest "can handle local address":
let ma = @[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
let transport1 = QuicTransport.new()
await transport1.start(ma)
check transport1.handles(transport1.addrs[0])
await transport1.stop()
let trans = await createTransport()
check trans.handles(trans.addrs[0])
await trans.stop()
asyncTest "transport e2e":
let server = await createTransport()
asyncSpawn createServerAcceptConn(server)()
proc runClient() {.async.} =
let client = await createTransport()
let conn = await client.dial("", server.addrs[0])
let stream = await getStream(QuicSession(conn), Direction.Out)
await stream.write("client")
var resp: array[6, byte]
await stream.readExactly(addr resp, 6)
await stream.close()
check string.fromBytes(resp) == "server"
await client.stop()
await runClient()
await server.stop()
asyncTest "transport e2e - invalid cert - server":
let server = await createTransport(true)
asyncSpawn createServerAcceptConn(server)()
proc runClient() {.async.} =
let client = await createTransport()
expect QuicTransportDialError:
discard await client.dial("", server.addrs[0])
await client.stop()
await runClient()
await server.stop()
asyncTest "transport e2e - invalid cert - client":
let server = await createTransport()
asyncSpawn createServerAcceptConn(server)()
proc runClient() {.async.} =
let client = await createTransport(true)
expect QuicTransportDialError:
discard await client.dial("", server.addrs[0])
await client.stop()
await runClient()
await server.stop()

View File

@@ -58,18 +58,22 @@ suite "RendezVous":
await client.start()
await remoteSwitch.start()
await client.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
let res0 = await rdv.request("empty")
let res0 = await rdv.request(Opt.some("empty"))
check res0.len == 0
await rdv.advertise("foo")
let res1 = await rdv.request("foo")
let res1 = await rdv.request(Opt.some("foo"))
check:
res1.len == 1
res1[0] == client.peerInfo.signedPeerRecord.data
let res2 = await rdv.request("bar")
let res2 = await rdv.request(Opt.some("bar"))
check res2.len == 0
await rdv.unsubscribe("foo")
let res3 = await rdv.request("foo")
let res3 = await rdv.request(Opt.some("foo"))
check res3.len == 0
await allFutures(client.stop(), remoteSwitch.stop())
asyncTest "Harder remote test":
@@ -88,17 +92,21 @@ suite "RendezVous":
)
await allFutures(rdvSeq.mapIt(it.advertise("foo")))
var data = clientSeq.mapIt(it.peerInfo.signedPeerRecord.data)
let res1 = await rdvSeq[0].request("foo", 5)
let res1 = await rdvSeq[0].request(Opt.some("foo"), 5)
check res1.len == 5
for d in res1:
check d in data
data.keepItIf(it notin res1)
let res2 = await rdvSeq[0].request("foo")
let res2 = await rdvSeq[0].request(Opt.some("foo"))
check res2.len == 5
for d in res2:
check d in data
let res3 = await rdvSeq[0].request("foo")
let res3 = await rdvSeq[0].request(Opt.some("foo"))
check res3.len == 0
let res4 = await rdvSeq[0].request()
check res4.len == 11
let res5 = await rdvSeq[0].request(Opt.none(string))
check res5.len == 11
await remoteSwitch.stop()
await allFutures(clientSeq.mapIt(it.stop()))
@@ -116,9 +124,9 @@ suite "RendezVous":
await clientA.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
await clientB.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
await rdvA.advertise("foo")
let res1 = await rdvA.request("foo")
let res1 = await rdvA.request(Opt.some("foo"))
await rdvB.advertise("foo")
let res2 = await rdvA.request("foo")
let res2 = await rdvA.request(Opt.some("foo"))
check:
res2.len == 1
res2[0] == clientB.peerInfo.signedPeerRecord.data
@@ -129,11 +137,11 @@ suite "RendezVous":
rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
switch = createSwitch(rdv)
expect AdvertiseError:
discard await rdv.request("A".repeat(300))
discard await rdv.request(Opt.some("A".repeat(300)))
expect AdvertiseError:
discard await rdv.request("A", -1)
discard await rdv.request(Opt.some("A"), -1)
expect AdvertiseError:
discard await rdv.request("A", 3000)
discard await rdv.request(Opt.some("A"), 3000)
expect AdvertiseError:
await rdv.advertise("A".repeat(300))
expect AdvertiseError:

View File

@@ -994,7 +994,7 @@ suite "Switch":
.withRng(crypto.newRng())
.withMplex()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withNameResolver(resolver)
@@ -1007,7 +1007,7 @@ suite "Switch":
.withRng(crypto.newRng())
.withMplex()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withTcpTransport()
@@ -1046,10 +1046,7 @@ suite "Switch":
.new()
.withAddress(quicAddress1)
.withRng(crypto.newRng())
.withTransport(
proc(upgr: Upgrade): Transport =
QuicTransport.new(upgr)
)
.withQuicTransport()
.withNoise()
.build()
@@ -1057,10 +1054,7 @@ suite "Switch":
.new()
.withAddress(quicAddress2)
.withRng(crypto.newRng())
.withTransport(
proc(upgr: Upgrade): Transport =
QuicTransport.new(upgr)
)
.withQuicTransport()
.withNoise()
.build()

View File

@@ -11,7 +11,7 @@
import sugar
import chronos
import ../libp2p/[stream/connection, muxers/yamux/yamux], ./helpers
import ../libp2p/[stream/connection, stream/bridgestream, muxers/yamux/yamux], ./helpers
proc newBlockerFut(): Future[void] {.async: (raises: [], raw: true).} =
newFuture[void]()
@@ -27,7 +27,9 @@ suite "Yamux":
) {.inject.} =
#TODO in a template to avoid threadvar
let
(conna {.inject.}, connb {.inject.}) = bridgedConnections()
(conna {.inject.}, connb {.inject.}) = bridgedConnections(
closeTogether = false, dirA = Direction.Out, dirB = Direction.In
)
yamuxa {.inject.} =
Yamux.new(conna, windowSize = ws, inTimeout = inTo, outTimeout = outTo)
yamuxb {.inject.} =

View File

@@ -3,11 +3,9 @@ FROM nimlang/nim:1.6.16 as builder
WORKDIR /app
COPY .pinned libp2p.nimble nim-libp2p/
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y python python3 python3-pip python3-venv curl
RUN mkdir .venv && python3 -m venv .venv && . .venv/bin/activate
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y

View File

@@ -45,7 +45,7 @@ proc main() {.async.} =
of "ws":
discard switchBuilder
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())

View File

@@ -1,20 +1,116 @@
import unittest2
import times
import ../../../libp2p/transports/tls/certificate
import ../../../libp2p/crypto/crypto
import ../../../libp2p/peerid
suite "Certificate Tests":
test "sanity check":
suite "Certificate roundtrip tests":
test "generate then parse with DER ecoding":
let schemes = @[Ed25519, Secp256k1, ECDSA]
for scheme in schemes:
var rng = newRng()
let keypair = KeyPair.random(scheme, rng[]).tryGet()
let peerId = PeerId.init(keypair.pubkey).tryGet()
let certX509 = generateX509(keypair, encodingFormat = EncodingFormat.DER)
let cert = parse(certX509.certificate)
check peerId == cert.peerId()
check cert.publicKey().scheme == scheme
check cert.verify()
test "gnerate with invalid validity time":
var rng = newRng()
let keypair = KeyPair.random(Ed25519, rng[]).tryGet()
# Generate an Ed25519 keypair
let keypair = KeyPair.random(Secp256k1, rng[]).tryGet()
let peerId = PeerId.init(keypair.pubkey).tryGet()
# past
var validFrom = (now() - 3.days).toTime()
var validTo = (now() - 1.days).toTime()
var certX509 = generateX509(keypair, validFrom, validTo)
var cert = parse(certX509.certificate)
check not cert.verify()
let (certBytes, _) = generate(keypair, EncodingFormat.DER)
let cert = parse(certBytes)
let ext = cert.extension
# future
validFrom = (now() + 1.days).toTime()
validTo = (now() + 3.days).toTime()
certX509 = generateX509(keypair, validFrom, validTo)
cert = parse(certX509.certificate)
check not cert.verify()
let parsedPeerId = PeerId.init(PublicKey.init(ext.publicKey).tryGet).tryGet()
check parsedPeerId == peerId
# twisted from-to
validFrom = (now() + 3.days).toTime()
validTo = (now() - 3.days).toTime()
certX509 = generateX509(keypair, validFrom, validTo)
cert = parse(certX509.certificate)
check not cert.verify()
## Test vectors from https://github.com/libp2p/specs/blob/master/tls/tls.md#test-vectors.
suite "Test vectors":
test "ECDSA Peer ID":
let certBytesHex =
"308201f63082019da0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea381c23081bf3081bc060a2b0601040183a25a01010481ad3081aa045f0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004bf30511f909414ebdd3242178fd290f093a551cf75c973155de0bb5a96fedf6cb5d52da7563e794b512f66e60c7f55ba8a3acf3dd72a801980d205e8a1ad29f2044730450220064ea8124774caf8f50e57f436aa62350ce652418c019df5d98a3ac666c9386a022100aa59d704a931b5f72fb9222cb6cc51f954d04a4e2e5450f8805fe8918f71eaae300a06082a8648ce3d04030203470030440220799395b0b6c1e940a7e4484705f610ab51ed376f19ff9d7c16757cfbf61b8d4302206205c03fbb0f95205c779be86581d3e31c01871ad5d1f3435bcf375cb0e5088a"
let cert = parse(fromHex(certBytesHex))
check $cert.peerId() == "QmfXbAwNjJLXfesgztEHe8HwgVDCMMpZ9Eax1HYq6hn9uE"
check cert.publicKey().scheme == PKScheme.ECDSA
check cert.verify()
test "RSA Peer ID":
let certBytesHex =
"308203863082032ba0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea382024f3082024b30820247060a2b0601040183a25a010104820237308202330482012b080012a60230820122300d06092a864886f70d01010105000382010f003082010a0282010100c6423f0fa8757d15b9e9332126339f32395b3f5d16e639b9d030e0507e60e68c973607dad6a2994a5b5f80456de271f21faee9051807e846ade5b396c661eef046e1f8f2279182df845f8962040cf08f6cfadcfde9c4592a0d11b92edab459e9099535db595834c8db762136a164f159bb01a5545a24e0f453df420e6633a9cbc123454b68c11966bc9851993608875e804cfe65604ac60f357b226ba57de0c191039935f7c0c85f1d3de7c2aeb7e6a1520f7201542b949784feb85d53d99f034a55218e6c4fae870cddf7dbb43583cd9eb1bc9e5111c0e7cf62aafef1188711ba205b87c8c95a4ccf154881a49e8b155c795fc1c7621b3b95b01ce4af48a6a7020301000104820100ab0ed7f294e3ec740cb5842ec3d0fc5a0458e00b60bb6c9ee0da8626d0cc1a50cbeb4cc1d61e9a487b327627600b7474e29f5d6c2f66c24966b3524c87e9edc42a3461fb183ce92127b160cf1be599f04a68a22cb463c266181b87e265d418ebff1bcf255bd5e5c1db783ff5909db37c183af5532563c847f104b540855727af53c0412d181f0893e150d5a28e0e9562ff301ff1278264a724152abf0e79537d52cddc205a0f5205490f18218b28ec8051b887033572ace045c24bfe3c0c72d18171148fac43f8b3a494a4ba90d27e554f64c17dc8513597078409e813791d79ee225bf6a2d8dc0893304f74b949230dad4ee4e1cfa62fa1c9ff0439bcda79c3300a06082a8648ce3d0403020349003046022100b3f1961bb314db110f9aae02e1ca0db9cbfda089635a6b99f257661db41a913d022100c04cad46601322e10cf092360a5a290148dfca40fdf7050c977863c57b407144"
let cert = parse(fromHex(certBytesHex))
check $cert.peerId() == "QmXsmtNnfvVdbDaPK415Zw3sjcS49aNfE33PtrQPtoyUfa"
check cert.publicKey().scheme == PKScheme.RSA
check cert.verify()
test "Ed25519 Peer ID":
let certBytesHex =
"308201ae30820156a0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea37c307a3078060a2b0601040183a25a0101046a3068042408011220a77f1d92fedb59dddaea5a1c4abd1ac2fbde7d7b879ed364501809923d7c11b90440d90d2769db992d5e6195dbb08e706b6651e024fda6cfb8846694a435519941cac215a8207792e42849cccc6cd8136c6e4bde92a58c5e08cfd4206eb5fe0bf909300a06082a8648ce3d0403020346003043021f50f6b6c52711a881778718238f650c9fb48943ae6ee6d28427dc6071ae55e702203625f116a7a454db9c56986c82a25682f7248ea1cb764d322ea983ed36a31b77"
let cert = parse(fromHex(certBytesHex))
check $cert.peerId() == "12D3KooWM6CgA9iBFZmcYAHA6A2qvbAxqfkmrYiRQuz3XEsk4Ksv"
check cert.publicKey().scheme == PKScheme.Ed25519
check cert.verify()
test "Secp256k1 Peer ID":
let certBytesHex =
"308201ba3082015fa0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea38184308181307f060a2b0601040183a25a01010471306f0425080212210206dc6968726765b820f050263ececf7f71e4955892776c0970542efd689d2382044630440220145e15a991961f0d08cd15425bb95ec93f6ffa03c5a385eedc34ecf464c7a8ab022026b3109b8a3f40ef833169777eb2aa337cfb6282f188de0666d1bcec2a4690dd300a06082a8648ce3d0403020349003046022100e1a217eeef9ec9204b3f774a08b70849646b6a1e6b8b27f93dc00ed58545d9fe022100b00dafa549d0f03547878338c7b15e7502888f6d45db387e5ae6b5d46899cef0"
let cert = parse(fromHex(certBytesHex))
check $cert.peerId() == "16Uiu2HAkutTMoTzDw1tCvSRtu6YoixJwS46S1ZFxW8hSx9fWHiPs"
check cert.publicKey().scheme == PKScheme.Secp256k1
check cert.verify()
test "Invalid certificate signature":
let certBytesHex =
"308201f73082019da0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea381c23081bf3081bc060a2b0601040183a25a01010481ad3081aa045f0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004bf30511f909414ebdd3242178fd290f093a551cf75c973155de0bb5a96fedf6cb5d52da7563e794b512f66e60c7f55ba8a3acf3dd72a801980d205e8a1ad29f204473045022100bb6e03577b7cc7a3cd1558df0da2b117dfdcc0399bc2504ebe7de6f65cade72802206de96e2a5be9b6202adba24ee0362e490641ac45c240db71fe955f2c5cf8df6e300a06082a8648ce3d0403020348003045022100e847f267f43717358f850355bdcabbefb2cfbf8a3c043b203a14788a092fe8db022027c1d04a2d41fd6b57a7e8b3989e470325de4406e52e084e34a3fd56eef0d0df"
let cert = parse(fromHex(certBytesHex)) # should parse correctly
# should have key
check $cert.peerId() == "QmfXbAwNjJLXfesgztEHe8HwgVDCMMpZ9Eax1HYq6hn9uE"
check cert.publicKey().scheme == PKScheme.ECDSA
# should not verify
check not cert.verify()
test "Expired certificate":
let certBytesHex =
"30820214308201BBA003020102021412A974B8DE545B54729BF8393EFFFB00AEF69FB5300A06082A8648CE3D04030230423140303E06035504030C37434E3D313244334B6F6F574A7A564657566869746861656A395172374E6A4A4642626A44447942475351737146317361734342635841483022180F32303234313232343038303030345A180F32303235303232343038303030345A30423140303E06035504030C37434E3D313244334B6F6F574A7A564657566869746861656A395172374E6A4A4642626A44447942475351737146317361734342635841483059301306072A8648CE3D020106082A8648CE3D03010703420004C1DB5D2F5D4697386B723993D5499DB50E80E3F970135381B25FDBCA0660797F79FCE818EDDEFF6D27F56C6505F3F3F439E5D78F355293A5215718FA91DA0263A3818A3081873078060A2B0601040183A25A0101046A30680424080112208850FF8CF0E751088411ECF49A34DDBB50EEE0584F1B8CA6F9BBC93752FD6D4A04401ACF48FCFC7CA932C316E21DC986B366531D158E1499194D8601AE9FEF65D6E41198D4FC14B5AEA6BC67B06D28AFA13B759477048FF887CC26C0F197FB227B0F300B0603551D0F0101FF0401A0300A06082A8648CE3D040302034700304402206B1C01813E0A3CF0777D564A8090386B324660E703F6120E7C387DF23F94323B0220206E6BCC1213EF2A15E01B16F30DF45653B84AE6CEA87271A9301E055DB65FCB"
let cert = parse(fromHex(certBytesHex))
# should have valid key
check $cert.peerId() == "12D3KooWJzVFWVhithaej9Qr7NjJFBbjDDyBGSQsqF1sasCBcXAH"
check cert.publicKey().scheme == PKScheme.Ed25519
# should not verify
check not cert.verify()
suite "utilities test":
test "parseCertTime":
var dt = parseCertTime("Mar 19 11:54:31 2025 GMT")
check 1742385271 == dt.toUnix()
dt = parseCertTime("Jan 1 00:00:00 1975 GMT")
check 157766400 == dt.toUnix()

61
tests/utils/futures.nim Normal file
View File

@@ -0,0 +1,61 @@
import chronos/futures, stew/results, chronos, sequtils
const
DURATION_TIMEOUT* = 1.seconds
DURATION_TIMEOUT_EXTENDED* = 1500.milliseconds
type FutureStateWrapper*[T] = object
future: Future[T]
state: FutureState
when T is void:
discard
else:
value: T
proc isPending*(wrapper: FutureStateWrapper): bool =
wrapper.state == Pending
proc isCompleted*(wrapper: FutureStateWrapper): bool =
wrapper.state == Completed
proc isCompleted*[T](wrapper: FutureStateWrapper[T], expectedValue: T): bool =
when T is void:
wrapper.state == Completed
else:
wrapper.state == Completed and wrapper.value == expectedValue
proc isCancelled*(wrapper: FutureStateWrapper): bool =
wrapper.state == Cancelled
proc isFailed*(wrapper: FutureStateWrapper): bool =
wrapper.state == Failed
proc toState*[T](future: Future[T]): FutureStateWrapper[T] =
var wrapper: FutureStateWrapper[T]
wrapper.future = future
if future.cancelled():
wrapper.state = Cancelled
elif future.finished():
if future.failed():
wrapper.state = Failed
else:
wrapper.state = Completed
when T isnot void:
wrapper.value = future.read()
else:
wrapper.state = Pending
return wrapper
proc waitForState*[T](
future: Future[T], timeout = DURATION_TIMEOUT
): Future[FutureStateWrapper[T]] {.async.} =
discard await future.withTimeout(timeout)
return future.toState()
proc waitForStates*[T](
futures: seq[Future[T]], timeout = DURATION_TIMEOUT
): Future[seq[FutureStateWrapper[T]]] {.async.} =
await sleepAsync(timeout)
return futures.mapIt(it.toState())