Compare commits

...

36 Commits

Author SHA1 Message Date
Radosław Kamiński
5f6b8e86a5 test(rendezvous): error cases (#1683) 2025-09-22 15:53:47 +01:00
richΛrd
11b98b7a3f fix: add missing import (#1707) 2025-09-22 13:38:32 +00:00
richΛrd
647f76341e feat(mix): mix_protocol and entry connection (#1703) 2025-09-22 13:09:18 +00:00
Radosław Kamiński
fbf96bb2ce chore(readme): Update chat code example link (#1709) 2025-09-22 13:28:03 +01:00
richΛrd
f0aaecb743 feat(mix): mixnode (#1702) 2025-09-21 16:46:48 +00:00
richΛrd
8d3076ea99 fix: add missing import for linux/amd64 daily job (#1706) 2025-09-20 18:39:12 -04:00
richΛrd
70b7d61436 feat(mix): SURBs and fragmentation (#1700) 2025-09-19 15:56:26 -04:00
Gabriel Cruz
37bae0986c chore: remove go daemon (#1705) 2025-09-19 15:21:23 -04:00
Gabriel Cruz
b34ddab10c chore(autonat-v2): add interop tests (#1695) 2025-09-18 14:30:56 +00:00
richΛrd
e09457da12 feat(mix): sphinx (#1691) 2025-09-18 09:04:44 -04:00
vladopajic
94ad1dcbc8 chore: nimble config tidy (#1696) 2025-09-17 18:10:07 +00:00
Gabriel Cruz
5b9f2cba6f fix: autonatV2 request addresses (#1698) 2025-09-17 13:38:03 -03:00
richΛrd
59e7069c15 feat: v1.13.0 (#1673) 2025-09-17 08:18:53 -04:00
richΛrd
18a0e9c2d1 feat(mix): message (#1690) 2025-09-16 10:49:49 -04:00
richΛrd
34a9a03b73 feat(mix): serialization (#1689) 2025-09-16 14:12:19 +00:00
Gabriel Cruz
788109b4f4 fix(autonat-v2): service setting up correctly (#1694) 2025-09-16 09:10:12 +00:00
vladopajic
44aab92b3e chore(quic): better error handling in stream.write() (#1693) 2025-09-16 08:35:57 +00:00
richΛrd
ad0812b40b feat(mix): sequence number generator and tag manager (#1688) 2025-09-15 16:49:34 -03:00
richΛrd
0751f240a2 feat(mix): crypto (#1687) 2025-09-15 16:46:28 +00:00
richΛrd
d8ecf8a135 chore: add missing import (#1692) 2025-09-15 10:04:07 -04:00
Gabriel Cruz
bab863859c chore: add autonatv2 service to builder (#1686) 2025-09-12 21:23:03 +02:00
Gabriel Cruz
73d04def6f feat: add autonat v2 service (#1684) 2025-09-12 08:26:33 -04:00
richΛrd
4509ade75c feat: add skipIDontWant and skipPreamble (#1681) 2025-09-10 16:27:54 -04:00
Gabriel Cruz
b64c0f6d85 chore: rename withAutonatV2 to withAutonatV2Server (#1680) 2025-09-10 15:20:01 -04:00
vladopajic
582ba7e650 fix(pubsub): use custom conn when message is sent as lower-priority (#1679) 2025-09-10 18:34:51 +02:00
Gabriel Cruz
31ae734aff feat(autonat-v2): add client and tests (#1659) 2025-09-10 13:11:03 +00:00
Radosław Kamiński
f26ff88e6c test(rendezvous): Simplify test setup (#1677) 2025-09-10 10:21:22 +01:00
vladopajic
4fbf59ece8 fix(builders): transport param not assigned in newStandardSwitch (#1676) 2025-09-09 11:34:41 -03:00
Radosław Kamiński
62388a7a20 refactor(rendezvous): Split Rendezvous Protobuf and add tests (#1671) 2025-09-09 09:12:43 +01:00
vladopajic
27051164db chore(tests): utilize quic transport in pubsub tests (#1667) 2025-09-08 21:30:21 +00:00
Gabriel Cruz
f41009461b fix: revert reviewdog workaround (#1672) 2025-09-08 16:35:47 -04:00
vladopajic
c3faabf522 chore(quic): add tests from common interop (#1662) 2025-09-08 16:06:22 +00:00
Gabriel Cruz
10f7f5c68a chore(autonat-v2): add server config (#1669) 2025-09-08 15:23:23 +00:00
Gabriel Cruz
f345026900 fix(linters): use workaround for reviewdog bug (#1668) 2025-09-08 14:48:03 +00:00
vladopajic
5d6578a06f chore: splitRPCMsg improvements (#1665) 2025-09-08 11:06:55 -03:00
Gabriel Cruz
871a5d047f feat(autonat-v2): add server (#1658) 2025-09-04 13:27:49 -04:00
87 changed files with 7236 additions and 4177 deletions

View File

@@ -72,15 +72,6 @@ jobs:
shell: ${{ matrix.shell }}
nim_ref: ${{ matrix.nim.ref }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3

View File

@@ -69,16 +69,6 @@ jobs:
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |

View File

@@ -60,3 +60,30 @@ jobs:
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-autonatv2-interop:
name: Run AutoNATv2 interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.25"
- name: Set up Nim
uses: jiro4989/setup-nim-action@v1
with:
nim-version: "stable"
- name: Run Go and Nim together
run: |
nimble install
cd interop/autonatv2/go-peer
git clone https://github.com/libp2p/go-libp2p
cd go-libp2p
git apply ../disable-filtering-of-private-ip-addresses.patch
cd ..
go run testautonatv2.go &
cd ../nim-peer
nim r src/nim_peer.nim $(cat ../go-peer/peer.id)

View File

@@ -8,7 +8,7 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/vacp2p/nim-quic@#cae13c2d22ba2730c979486cf89b88927045c3ae
quic;https://github.com/vacp2p/nim-quic@#9370190ded18d78a5a9990f57aa8cbbf947f3891
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088

View File

@@ -47,7 +47,7 @@ nimble install libp2p
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
Try out the chat example. Full code can be found [here](https://github.com/vacp2p/nim-libp2p/blob/master/examples/directchat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
@@ -81,12 +81,6 @@ Run unit tests:
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
@@ -155,8 +149,6 @@ List of packages modules implemented in nim-libp2p:
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |

View File

@@ -5,12 +5,13 @@ if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warningAsError", "UseBase:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--styleCheck:
usages
switch("warningAsError", "UseBase:on")
--styleCheck:
error
--mm:
@@ -23,7 +24,7 @@ if defined(windows) and not defined(vcc):
--define:
nimRawSetjmp
# begin Nimble config (version 1)
when fileExists("nimble.paths"):
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -1,54 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
import ../hexdump
const PubSubTopic = "test-net"
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
var peers = await api.pubsubListPeers(PubSubTopic)
echo "= List of connected and subscribed peers:"
for item in peers:
echo item.pretty()
proc dumpAllPeers(api: DaemonAPI) {.async.} =
var peers = await api.listPeers()
echo "Current connected peers count = ", len(peers)
for item in peers:
echo item.peer.pretty()
proc monitor(api: DaemonAPI) {.async.} =
while true:
echo "Dumping all peers"
await dumpAllPeers(api)
await sleepAsync(5000)
proc main() {.async.} =
echo "= Starting P2P bootnode"
var api = await newDaemonApi({DHTFull, PSGossipSub})
var id = await api.identity()
echo "= P2P bootnode ", id.peer.pretty(), " started."
let mcip4 = multiCodec("ip4")
let mcip6 = multiCodec("ip6")
echo "= You can use one of this addresses to bootstrap your nodes:"
for item in id.addresses:
if item.protoCode() == mcip4 or item.protoCode() == mcip6:
echo $item & "/ipfs/" & id.peer.pretty()
asyncSpawn monitor(api)
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty()
echo dumpHex(message.data)
await api.dumpSubscribedPeers()
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
when isMainModule:
waitFor(main())
while true:
poll()

View File

@@ -1,132 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
## nim c -r --threads:on chat.nim
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
const ServerProtocols = @["/test-chat-stream"]
type CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
proc threadMain(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
## pipe to main thread.
var transp = fromPipe(wfd)
while true:
var line = stdin.readLine()
let res = waitFor transp.write(line & "\r\n")
proc serveThread(udata: CustomData) {.async.} =
## This procedure perform reading on pipe and sends data to remote clients.
var transp = fromPipe(udata.consoleFd)
proc remoteReader(transp: StreamTransport) {.async.} =
while true:
var line = await transp.readLine()
if len(line) == 0:
break
echo ">> ", line
while true:
try:
var line = await transp.readLine()
if line.startsWith("/connect"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
var address = MultiAddress.init(multiCodec("p2p-circuit"))
address &= MultiAddress.init(multiCodec("p2p"), peerId)
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
echo "= Connecting to peer ", $address
await udata.api.connect(peerId, @[address], 30)
echo "= Opening stream to peer chat ", parts[1]
var stream = await udata.api.openStream(peerId, ServerProtocols)
udata.remotes.add(stream.transp)
echo "= Connected to peer chat ", parts[1]
asyncSpawn remoteReader(stream.transp)
elif line.startsWith("/search"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
elif line.startsWith("/consearch"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peers connected to peer ", parts[1]
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
echo "= Found ", len(peers), " connected to peer ", parts[1]
for item in peers:
var peer = item.peer
var addresses = newSeq[string]()
var relay = false
for a in item.addresses:
addresses.add($a)
if a.protoName() == "/p2p-circuit":
relay = true
break
if relay:
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
else:
echo peer.pretty(), " [", addresses.join(", "), "]"
elif line.startsWith("/exit"):
break
else:
var msg = line & "\r\n"
echo "<< ", line
var pending = newSeq[Future[int]]()
for item in udata.remotes:
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData
data.remotes = newSeq[StreamTransport]()
var (rfd, wfd) = createAsyncPipe()
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
raise newException(ValueError, "Could not initialize pipe!")
data.consoleFd = rfd
data.serveFut = serveThread(data)
var thread: Thread[AsyncFD]
thread.createThread(threadMain, wfd)
echo "= Starting P2P node"
data.api = await newDaemonApi({DHTFull, Bootstrap})
await sleepAsync(3000)
var id = await data.api.identity()
proc streamHandler(api: DaemonAPI, stream: P2PStream) {.async.} =
echo "= Peer ", stream.peer.pretty(), " joined chat"
data.remotes.add(stream.transp)
while true:
var line = await stream.transp.readLine()
if len(line) == 0:
break
echo ">> ", line
await data.api.addHandler(ServerProtocols, streamHandler)
echo "= Your PeerId is ", id.peer.pretty()
await data.serveFut
when isMainModule:
waitFor(main())

View File

@@ -1,43 +0,0 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Run the build script while having the `go` command pointing to the correct Go version.
```sh
./scripts/build_p2pd.sh
```
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)

View File

@@ -1,46 +0,0 @@
import chronos, nimcrypto, strutils, os
import ../../libp2p/daemon/daemonapi
const PubSubTopic = "test-net"
proc main(bn: string) {.async.} =
echo "= Starting P2P node"
var bootnodes = bn.split(",")
var api = await newDaemonApi(
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
)
var id = await api.identity()
echo "= P2P node ", id.peer.pretty(), " started:"
for item in id.addresses:
echo item
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty(), ": "
var strdata = cast[string](message.data)
echo strdata
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
# Waiting for gossipsub interval
while true:
var peers = await api.pubsubListPeers(PubSubTopic)
if len(peers) > 0:
break
await sleepAsync(1000)
var data = "HELLO\r\n"
var msgData = cast[seq[byte]](data)
await api.pubsubPublish(PubSubTopic, msgData)
when isMainModule:
if paramCount() != 1:
echo "Please supply bootnodes!"
else:
waitFor(main(paramStr(1)))
while true:
poll()

View File

@@ -0,0 +1,76 @@
From 29bac4cd8f28abfb9efb481d800b7c2e855d9b03 Mon Sep 17 00:00:00 2001
From: Gabriel Cruz <gabe@gmelodie.com>
Date: Wed, 17 Sep 2025 10:42:14 -0300
Subject: [PATCH] disable filtering of private ip addresses
---
p2p/protocol/autonatv2/autonat.go | 24 +-----------------------
p2p/protocol/autonatv2/server.go | 9 ++++++---
2 files changed, 7 insertions(+), 26 deletions(-)
diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go
index 24883052..00a6211f 100644
--- a/p2p/protocol/autonatv2/autonat.go
+++ b/p2p/protocol/autonatv2/autonat.go
@@ -16,7 +16,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
logging "github.com/libp2p/go-libp2p/gologshim"
ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
)
const (
@@ -180,21 +179,7 @@ func (an *AutoNAT) Close() {
// GetReachability makes a single dial request for checking reachability for requested addresses
func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) {
var filteredReqs []Request
- if !an.allowPrivateAddrs {
- filteredReqs = make([]Request, 0, len(reqs))
- for _, r := range reqs {
- if manet.IsPublicAddr(r.Addr) {
- filteredReqs = append(filteredReqs, r)
- } else {
- log.Error("private address in reachability check", "address", r.Addr)
- }
- }
- if len(filteredReqs) == 0 {
- return Result{}, ErrPrivateAddrs
- }
- } else {
- filteredReqs = reqs
- }
+ filteredReqs = reqs
an.mx.Lock()
now := time.Now()
var p peer.ID
@@ -215,13 +200,6 @@ func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result,
log.Debug("reachability check failed", "peer", p, "err", err)
return res, fmt.Errorf("reachability check with %s failed: %w", p, err)
}
- // restore the correct index in case we'd filtered private addresses
- for i, r := range reqs {
- if r.Addr.Equal(res.Addr) {
- res.Idx = i
- break
- }
- }
log.Debug("reachability check successful", "peer", p)
return res, nil
}
diff --git a/p2p/protocol/autonatv2/server.go b/p2p/protocol/autonatv2/server.go
index 167d3d8e..e6d1e492 100644
--- a/p2p/protocol/autonatv2/server.go
+++ b/p2p/protocol/autonatv2/server.go
@@ -196,9 +197,6 @@ func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted {
if err != nil {
continue
}
- if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) {
- continue
- }
if !as.dialerHost.Network().CanDial(p, a) {
continue
}
--
2.51.0

View File

@@ -0,0 +1,97 @@
module go-peer
go 1.25.1
require github.com/libp2p/go-libp2p v0.43.0
replace github.com/libp2p/go-libp2p => ./go-libp2p
require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.16.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

View File

@@ -0,0 +1,441 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@@ -0,0 +1 @@
12D3KooWSnUDxXeeEnerD1Wf35R5b8bjTMzdAz838aDUUY8GJPGa

View File

@@ -0,0 +1,2 @@
@i
(>%ËÁø‡®PM”ܘXE~§|# õ“ýºØ®ü\íÇØ¬åsqzïÔDSݺvöLË(± Úð…•(×

View File

@@ -0,0 +1,97 @@
package main
import (
"crypto/rand"
"fmt"
"io/ioutil"
"log"
"os"
libp2p "github.com/libp2p/go-libp2p"
crypto "github.com/libp2p/go-libp2p/core/crypto"
peer "github.com/libp2p/go-libp2p/core/peer"
)
const (
privKeyFile = "peer.key"
peerIDFile = "peer.id"
)
func loadOrCreateIdentity() (crypto.PrivKey, peer.ID, error) {
if _, err := os.Stat(privKeyFile); err == nil {
// Load private key
data, err := ioutil.ReadFile(privKeyFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read private key: %w", err)
}
priv, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
return nil, "", fmt.Errorf("failed to unmarshal private key: %w", err)
}
// Load peer ID as string
peerData, err := ioutil.ReadFile(peerIDFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read peer ID: %w", err)
}
pid, err := peer.Decode(string(peerData))
if err != nil {
return nil, "", fmt.Errorf("failed to decode peer ID: %w", err)
}
return priv, pid, nil
}
// Create new keypair
priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
if err != nil {
return nil, "", fmt.Errorf("failed to generate keypair: %w", err)
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
return nil, "", fmt.Errorf("failed to derive peer ID: %w", err)
}
// Save private key
privBytes, err := crypto.MarshalPrivateKey(priv)
if err != nil {
return nil, "", fmt.Errorf("failed to marshal private key: %w", err)
}
if err := ioutil.WriteFile(privKeyFile, privBytes, 0600); err != nil {
return nil, "", fmt.Errorf("failed to write private key: %w", err)
}
// Save peer ID in canonical string form
if err := ioutil.WriteFile(peerIDFile, []byte(pid.String()), 0644); err != nil {
return nil, "", fmt.Errorf("failed to write peer ID: %w", err)
}
return priv, pid, nil
}
func main() {
priv, pid, err := loadOrCreateIdentity()
if err != nil {
log.Fatalf("Identity setup failed: %v", err)
}
h, err := libp2p.New(
libp2p.Identity(priv),
libp2p.EnableAutoNATv2(),
libp2p.ListenAddrStrings(
"/ip4/0.0.0.0/tcp/4040",
"/ip6/::/tcp/4040",
),
)
if err != nil {
log.Fatalf("Failed to create host: %v", err)
}
defer h.Close()
fmt.Println("Peer ID:", pid.String())
fmt.Println("Listen addresses:", h.Addrs())
fmt.Println("AutoNATv2 client started.")
select {}
}

View File

@@ -0,0 +1,4 @@
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -0,0 +1,10 @@
version = "0.1.0"
author = "Status Research & Development Gmb"
description = "AutoNATv2 peer for interop testing"
license = "MIT"
srcDir = "src"
bin = @["nim_peer"]
# Dependencies
requires "nim >= 2.3.1", "libp2p"

View File

@@ -0,0 +1,64 @@
import net, os, chronos, libp2p
import libp2p/protocols/connectivity/autonatv2/service
import libp2p/protocols/connectivity/autonatv2/types
proc waitForService(
host: string, port: Port, retries: int = 20, delay: Duration = 500.milliseconds
): Future[bool] {.async.} =
for i in 0 ..< retries:
try:
var s = newSocket()
s.connect(host, port)
s.close()
return true
except OSError:
discard
await sleepAsync(delay)
return false
proc main() {.async.} =
if paramCount() != 1:
quit("Usage: nim r src/nim_peer.nim <peerid>", 1)
# ensure go peer is started
await sleepAsync(3.seconds)
let dstPeerId = PeerId.init(paramStr(1)).get()
var src = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/3030").tryGet()])
.withAutonatV2Server()
.withAutonatV2(
serviceConfig = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds))
)
.withTcpTransport()
.withYamux()
.withNoise()
.build()
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability != NetworkReachability.Unknown and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
let service = cast[AutonatV2Service](src.services[1])
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await src.start()
await src.connect(dstPeerId, @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()])
await awaiter
echo service.networkReachability
when isMainModule:
if waitFor(waitForService("127.0.0.1", Port(4040))):
waitFor(main())
else:
quit("timeout waiting for service", 1)

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.12.0"
version = "1.13.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -10,7 +10,7 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 2.0.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.15",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.16",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
@@ -49,12 +49,6 @@ proc tutorialToMd(filename: string) =
task testnative, "Runs libp2p native tests":
runTest("testnative")
task testdaemon, "Runs daemon tests":
runTest("testdaemon")
task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")

View File

@@ -26,7 +26,15 @@ import
transports/[transport, tcptransport, wstransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
protocols/connectivity/[
autonat/server,
autonatv2/server,
autonatv2/service,
autonatv2/client,
relay/relay,
relay/client,
relay/rtransport,
],
connmanager,
upgrademngrs/muxedupgrade,
observedaddrmanager,
@@ -74,6 +82,9 @@ type
nameResolver: NameResolver
peerStoreCapacity: Opt[int]
autonat: bool
autonatV2ServerConfig: Opt[AutonatV2Config]
autonatV2Client: AutonatV2Client
autonatV2ServiceConfig: AutonatV2ServiceConfig
autotls: AutotlsService
circuitRelay: Relay
rdv: RendezVous
@@ -280,6 +291,19 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
b.autonat = true
b
proc withAutonatV2Server*(
b: SwitchBuilder, config: AutonatV2Config = AutonatV2Config.new()
): SwitchBuilder =
b.autonatV2ServerConfig = Opt.some(config)
b
proc withAutonatV2*(
b: SwitchBuilder, serviceConfig = AutonatV2ServiceConfig.new()
): SwitchBuilder =
b.autonatV2Client = AutonatV2Client.new(b.rng)
b.autonatV2ServiceConfig = serviceConfig
b
when defined(libp2p_autotls_support):
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
@@ -366,6 +390,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if b.enableWildcardResolver:
b.services.insert(WildcardAddressResolverService.new(), 0)
if not isNil(b.autonatV2Client):
b.services.add(
AutonatV2Service.new(
b.rng, client = b.autonatV2Client, config = b.autonatV2ServiceConfig
)
)
let switch = newSwitch(
peerInfo = peerInfo,
transports = transports,
@@ -379,9 +410,15 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
switch.mount(identify)
if not isNil(b.autonatV2Client):
b.autonatV2Client.setup(switch)
switch.mount(b.autonatV2Client)
b.autonatV2ServerConfig.withValue(config):
switch.mount(AutonatV2.new(switch, config = config))
if b.autonat:
let autonat = Autonat.new(switch)
switch.mount(autonat)
switch.mount(Autonat.new(switch))
if not isNil(b.circuitRelay):
if b.circuitRelay of RelayClient:
@@ -395,13 +432,78 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
return switch
proc newStandardSwitch*(
type TransportType* {.pure.} = enum
QUIC
TCP
Memory
proc newStandardSwitchBuilder*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): SwitchBuilder {.raises: [LPError], public.} =
## Helper for common switch configurations.
var b = SwitchBuilder
.new()
.withRng(rng)
.withSignedPeerRecord(sendSignedPeerRecord)
.withMaxConnections(maxConnections)
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity = peerStoreCapacity)
.withNameResolver(nameResolver)
.withNoise()
var addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
case transport
of TransportType.QUIC:
when defined(libp2p_quic_support):
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/0.0.0.0/udp/0/quic-v1").tryGet()]
b = b.withQuicTransport().withAddresses(addrs)
else:
raiseAssert "QUIC not supported in this build"
of TransportType.TCP:
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()]
b = b.withTcpTransport(transportFlags).withAddresses(addrs).withMplex(
inTimeout, outTimeout
)
of TransportType.Memory:
if addrs.len == 0:
addrs = @[MultiAddress.init(MemoryAutoAddress).tryGet()]
b = b.withMemoryTransport().withAddresses(addrs).withMplex(inTimeout, outTimeout)
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
@@ -412,28 +514,21 @@ proc newStandardSwitch*(
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): Switch {.raises: [LPError], public.} =
## Helper for common switch configurations.
let addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
var b = SwitchBuilder
.new()
.withAddresses(addrs)
.withRng(rng)
.withSignedPeerRecord(sendSignedPeerRecord)
.withMaxConnections(maxConnections)
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity = peerStoreCapacity)
.withMplex(inTimeout, outTimeout)
.withTcpTransport(transportFlags)
.withNameResolver(nameResolver)
.withNoise()
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b.build()
newStandardSwitchBuilder(
privKey = privKey,
addrs = addrs,
transport = transport,
transportFlags = transportFlags,
rng = rng,
secureManagers = secureManagers,
inTimeout = inTimeout,
outTimeout = outTimeout,
maxConnections = maxConnections,
maxIn = maxIn,
maxOut = maxOut,
maxConnsPerPeer = maxConnsPerPeer,
nameResolver = nameResolver,
sendSignedPeerRecord = sendSignedPeerRecord,
peerStoreCapacity = peerStoreCapacity,
)
.build()

File diff suppressed because it is too large Load Diff

View File

@@ -1,156 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos
const DefaultPoolSize* = 8 ## Default pool size
type
ConnectionFlags = enum
None
Busy
PoolItem = object
transp*: StreamTransport
flags*: set[ConnectionFlags]
PoolState = enum
Connecting
Connected
Closing
Closed
TransportPool* = ref object ## Transports pool object
transports: seq[PoolItem]
busyCount: int
state: PoolState
bufferSize: int
event: AsyncEvent
TransportPoolError* = object of AsyncError
proc waitAll[T](futs: seq[Future[T]]): Future[void] =
## Performs waiting for all Future[T].
var counter = len(futs)
var retFuture = newFuture[void]("connpool.waitAllConnections")
proc cb(udata: pointer) =
dec(counter)
if counter == 0:
retFuture.complete()
for fut in futs:
fut.addCallback(cb)
return retFuture
proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
pool.bufferSize = bufferSize
pool.transports = newSeq[PoolItem](poolsize)
var conns = newSeq[Future[StreamTransport]](poolsize)
pool.state = Connecting
for i in 0 ..< poolsize:
conns[i] = connect(address, bufferSize)
# Waiting for all connections to be established.
await waitAll(conns)
# Checking connections and preparing pool.
for i in 0 ..< poolsize:
if conns[i].failed:
raise conns[i].error
else:
let transp = conns[i].read()
let item = PoolItem(transp: transp)
pool.transports[i] = item
# Setup available connections event
pool.event = newAsyncEvent()
pool.state = Connected
result = pool
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
while true:
if pool.busyCount < len(pool.transports):
for conn in pool.transports.mitems():
if Busy notin conn.flags:
conn.flags.incl(Busy)
inc(pool.busyCount)
transp = conn.transp
break
else:
await pool.event.wait()
pool.event.clear()
if not isNil(transp):
break
else:
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
for conn in pool.transports.mitems():
if conn.transp == transp:
conn.flags.excl(Busy)
dec(pool.busyCount)
pool.event.fire()
found = true
break
if not found:
raise newException(TransportPoolError, "Transport not bound to pool!")
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
if pool.busyCount == 0:
break
else:
await pool.event.wait()
pool.event.clear()
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing
# Waiting for all transports to become available.
await pool.join()
# Closing all transports
var pending = newSeq[Future[void]](len(pool.transports))
for i in 0 ..< len(pool.transports):
let transp = pool.transports[i].transp
transp.close()
pending[i] = transp.join()
# Waiting for all transports to be closed
await waitAll(pending)
# Mark pool as `Closed`.
pool.state = Closed

View File

@@ -11,7 +11,7 @@
import chronos
import results
import peerid, stream/connection, transports/transport
import peerid, stream/connection, transports/transport, muxers/muxer
export results
@@ -49,6 +49,22 @@ method dial*(
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial,
peerId: Opt[PeerId],
hostname: string,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.base, async: (raises: [CancelledError]).} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
base, async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
method dial*(
self: Dial,
peerId: PeerId,
@@ -65,6 +81,11 @@ method dial*(
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method negotiateStream*(
self: Dial, conn: Connection, protos: seq[string]
): Future[Connection] {.base, async: (raises: [CatchableError]).} =
doAssert(false, "[Dial.negotiateStream] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.

View File

@@ -43,20 +43,20 @@ type Dialer* = ref object of Dial
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
method dialAndUpgrade*(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
if transport.handles(addrs): # check if it can dial it
trace "Dialing address", addrs, peerId = peerId.get(default(PeerId)), hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address, peerId)
await transport.dial(hostname, addrs, peerId)
except CancelledError as exc:
trace "Dialing canceled",
description = exc.msg, peerId = peerId.get(default(PeerId))
@@ -139,7 +139,7 @@ proc expandDnsAddr(
else:
result.add((resolvedAddress, peerId))
proc dialAndUpgrade(
method dialAndUpgrade*(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
@@ -284,7 +284,7 @@ method connect*(
return
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
proc negotiateStream(
method negotiateStream*(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async: (raises: [CatchableError]).} =
trace "Negotiating stream", conn, protos
@@ -292,7 +292,6 @@ proc negotiateStream(
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
return conn
method tryDial*(

View File

@@ -93,6 +93,10 @@ proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
parseFullAddress(?MultiAddress.init(ma))
proc toFullAddress*(peerId: PeerId, ma: MultiAddress): MaResult[MultiAddress] =
let peerIdPart = ?MultiAddress.init(multiCodec("p2p"), peerId.data)
concat(ma, peerIdPart)
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,

View File

@@ -58,6 +58,9 @@ type
NotReachable
Reachable
proc isReachable*(self: NetworkReachability): bool =
self == NetworkReachability.Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
p.id.withValue(id):

View File

@@ -0,0 +1,202 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles, tables
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
./types,
./utils
logScope:
topics = "libp2p autonat v2 client"
const
MaxAcceptedDialDataRequest* = 100 * 1024 # 100 KB
MaxDialDataResponsePayload* = 1024
DefaultDialBackTimeout* = 5.seconds
type AutonatV2Client* = ref object of LPProtocol
dialer*: Dial
dialBackTimeout: Duration
rng: ref HmacDrbgContext
expectedNonces: Table[Nonce, Opt[MultiAddress]]
proc handleDialBack(
self: AutonatV2Client, conn: Connection, dialBack: DialBack
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
debug "Handling DialBack",
conn = conn, localAddr = conn.localAddr, observedAddr = conn.observedAddr
if not self.expectedNonces.hasKey(dialBack.nonce):
error "Not expecting this nonce", nonce = dialBack.nonce
return
conn.localAddr.withValue(localAddr):
debug "Setting expectedNonces",
nonce = dialBack.nonce, localAddr = Opt.some(localAddr)
self.expectedNonces[dialBack.nonce] = Opt.some(localAddr)
else:
error "Unable to get localAddr from connection"
return
trace "Sending DialBackResponse"
await conn.writeLp(DialBackResponse(status: DialBackStatus.Ok).encode().buffer)
proc new*(
T: typedesc[AutonatV2Client],
rng: ref HmacDrbgContext,
dialBackTimeout: Duration = DefaultDialBackTimeout,
): T =
let client = T(rng: rng, dialBackTimeout: dialBackTimeout)
# handler for DialBack messages
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let dialBack = DialBack.decode(initProtoBuffer(await conn.readLp(DialBackLpSize))).valueOr:
trace "Unable to decode DialBack"
return
if not await client.handleDialBack(conn, dialBack).withTimeout(
client.dialBackTimeout
):
trace "Sending DialBackResponse timed out"
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
except LPStreamError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
client.handler = handleStream
client.codec = $AutonatV2Codec.DialBack
client
proc setup*(self: AutonatV2Client, switch: Switch) =
self.dialer = switch.dialer
proc handleDialDataRequest*(
conn: Connection, req: DialDataRequest
): Future[DialResponse] {.
async: (raises: [CancelledError, AutonatV2Error, LPStreamError])
.} =
debug "Received DialDataRequest",
numBytes = req.numBytes, maxAcceptedNumBytes = MaxAcceptedDialDataRequest
if req.numBytes > MaxAcceptedDialDataRequest:
raise newException(
AutonatV2Error, "Rejecting DialDataRequest: numBytes is greater than the maximum"
)
# send required data
var msg = AutonatV2Msg(
msgType: MsgType.DialDataResponse,
dialDataResp: DialDataResponse(data: newSeq[byte](MaxDialDataResponsePayload)),
)
let messagesToSend =
(req.numBytes + MaxDialDataResponsePayload - 1) div MaxDialDataResponsePayload
for i in 0 ..< messagesToSend:
await conn.writeLp(msg.encode().buffer)
debug "Sending DialDataResponse", i = i, messagesToSend = messagesToSend
# get DialResponse
msg = AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
debug "Received message", msgType = msg.msgType
if msg.msgType != MsgType.DialResponse:
raise
newException(AutonatV2Error, "Expecting DialResponse, but got " & $msg.msgType)
return msg.dialResp
proc checkAddrIdx(
self: AutonatV2Client, addrIdx: AddrIdx, testAddrs: seq[MultiAddress], nonce: Nonce
): bool {.raises: [AutonatV2Error].} =
debug "checking addrs", addrIdx = addrIdx, testAddrs = testAddrs, nonce = nonce
let dialBackAddrs = self.expectedNonces.getOrDefault(nonce).valueOr:
debug "Not expecting this nonce",
nonce = nonce, expectedNonces = self.expectedNonces
return false
if addrIdx.int >= testAddrs.len:
debug "addrIdx outside of testAddrs range",
addrIdx = addrIdx, testAddrs = testAddrs, testAddrsLen = testAddrs.len
return false
let dialRespAddrs = testAddrs[addrIdx]
if not areAddrsConsistent(dialRespAddrs, dialBackAddrs):
debug "Invalid addrIdx: got DialBack in another address",
addrIdx = addrIdx, dialBackAddrs = dialBackAddrs, dialRespAddrs = dialRespAddrs
return false
true
method sendDialRequest*(
self: AutonatV2Client, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
base,
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
## Dials peer with `pid` and requests that it tries connecting to `testAddrs`
let nonce = self.rng[].generate(Nonce)
self.expectedNonces[nonce] = Opt.none(MultiAddress)
var dialResp: DialResponse
try:
let conn = await self.dialer.dial(pid, @[$AutonatV2Codec.DialRequest])
defer:
await conn.close()
# send dialRequest
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialRequest,
dialReq: DialRequest(addrs: testAddrs, nonce: nonce),
).encode().buffer
)
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
dialResp =
case msg.msgType
of MsgType.DialResponse:
msg.dialResp
of MsgType.DialDataRequest:
await conn.handleDialDataRequest(msg.dialDataReq)
else:
raise newException(
AutonatV2Error,
"Expecting DialResponse or DialDataRequest, but got " & $msg.msgType,
)
debug "Received DialResponse", dialResp = dialResp
dialResp.dialStatus.withValue(dialStatus):
if dialStatus == DialStatus.Ok:
dialResp.addrIdx.withValue(addrIdx):
if not self.checkAddrIdx(addrIdx, testAddrs, nonce):
raise newException(
AutonatV2Error, "Invalid addrIdx " & $addrIdx & " in DialResponse"
)
except LPStreamRemoteClosedError as exc:
error "Stream reset by server", description = exc.msg, peer = pid
finally:
# rollback any changes
self.expectedNonces.del(nonce)
return dialResp.asAutonatV2Response(testAddrs)

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/
[
switch,
muxers/muxer,
dialer,
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
],
../../protocol,
./types,
./server
type AutonatV2Mock* = ref object of LPProtocol
config*: AutonatV2Config
response*: ProtoBuffer
proc new*(
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
): T =
let autonatV2 = T(config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
try:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
return
if msg.msgType != MsgType.DialRequest:
return
except LPStreamError:
return
try:
# return mocked message
await conn.writeLp(autonatV2.response.buffer)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError:
discard
except LPStreamError:
discard
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,45 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos
import ../../../peerid, ../../../multiaddress, ../../../switch
import ./client, ./types
type AutonatV2ClientMock* = ref object of AutonatV2Client
response*: AutonatV2Response
dials*: int
expectedDials: int
finished*: Future[void]
proc new*(
T: typedesc[AutonatV2ClientMock], response: AutonatV2Response, expectedDials: int
): T =
return T(
dials: 0,
expectedDials: expectedDials,
finished: newFuture[void](),
response: response,
)
method sendDialRequest*(
self: AutonatV2ClientMock, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
self.dials += 1
if self.dials == self.expectedDials:
self.finished.complete()
var ans = self.response
ans.dialResp.addrIdx.withValue(addrIdx):
ans.addrs = Opt.some(testAddrs[addrIdx])
ans

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/
[
switch,
muxers/muxer,
dialer,
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
],
../../protocol,
./types,
./server
type AutonatV2Mock* = ref object of LPProtocol
config*: AutonatV2Config
response*: ProtoBuffer
proc new*(
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
): T =
let autonatV2 = T(config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
try:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
return
if msg.msgType != MsgType.DialRequest:
return
except LPStreamError:
return
try:
# return mocked message
await conn.writeLp(autonatV2.response.buffer)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError:
discard
except LPStreamError:
discard
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,309 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/[
switch,
muxers/muxer,
dialer,
multiaddress,
transports/transport,
multicodec,
peerid,
protobuf/minprotobuf,
utils/ipaddr,
],
../../protocol,
./types
logScope:
topics = "libp2p autonat v2 server"
type AutonatV2Config* = object
dialTimeout: Duration
dialDataSize: uint64
amplificationAttackTimeout: Duration
allowPrivateAddresses: bool
type AutonatV2* = ref object of LPProtocol
switch*: Switch
config: AutonatV2Config
proc new*(
T: typedesc[AutonatV2Config],
dialTimeout: Duration = DefaultDialTimeout,
dialDataSize: uint64 = DefaultDialDataSize,
amplificationAttackTimeout: Duration = DefaultAmplificationAttackDialTimeout,
allowPrivateAddresses: bool = false,
): T =
T(
dialTimeout: dialTimeout,
dialDataSize: dialDataSize,
amplificationAttackTimeout: amplificationAttackTimeout,
allowPrivateAddresses: allowPrivateAddresses,
)
proc sendDialResponse(
conn: Connection,
status: ResponseStatus,
addrIdx: Opt[AddrIdx] = Opt.none(AddrIdx),
dialStatus: Opt[DialStatus] = Opt.none(DialStatus),
) {.async: (raises: [CancelledError, LPStreamError]).} =
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialResponse,
dialResp: DialResponse(status: status, addrIdx: addrIdx, dialStatus: dialStatus),
).encode().buffer
)
proc findObservedIPAddr*(
conn: Connection, req: DialRequest
): Future[Opt[MultiAddress]] {.async: (raises: [CancelledError, LPStreamError]).} =
let observedAddr = conn.observedAddr.valueOr:
await conn.sendDialResponse(ResponseStatus.EInternalError)
return Opt.none(MultiAddress)
let isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
error "Invalid observed address"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return Opt.none(MultiAddress)
if isRelayed:
error "Invalid observed address: relayed address"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return Opt.none(MultiAddress)
let hostIp = observedAddr[0].valueOr:
error "Invalid observed address"
await conn.sendDialResponse(ResponseStatus.EInternalError)
return Opt.none(MultiAddress)
return Opt.some(hostIp)
proc dialBack(
conn: Connection, nonce: Nonce
): Future[DialStatus] {.
async: (raises: [CancelledError, DialFailedError, LPStreamError])
.} =
try:
# send dial back
await conn.writeLp(DialBack(nonce: nonce).encode().buffer)
# receive DialBackResponse
let dialBackResp = DialBackResponse.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
trace "DialBack failed, could not decode DialBackResponse"
return DialStatus.EDialBackError
except LPStreamRemoteClosedError as exc:
# failed because of nonce error (remote reset the stream): EDialBackError
debug "DialBack failed, remote closed the connection", description = exc.msg
return DialStatus.EDialBackError
# TODO: failed because of client or server resources: EDialError
trace "DialBack successful"
return DialStatus.Ok
proc handleDialDataResponses(
self: AutonatV2, conn: Connection
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
var dataReceived: uint64 = 0
while dataReceived < self.config.dialDataSize:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(DialDataResponseLpSize))
).valueOr:
raise newException(AutonatV2Error, "Received malformed message")
debug "Received message", msgType = $msg.msgType
if msg.msgType != MsgType.DialDataResponse:
raise
newException(AutonatV2Error, "Expecting DialDataResponse, got " & $msg.msgType)
let resp = msg.dialDataResp
dataReceived += resp.data.len.uint64
debug "received data",
dataReceived = resp.data.len.uint64, totalDataReceived = dataReceived
proc amplificationAttackPrevention(
self: AutonatV2, conn: Connection, addrIdx: AddrIdx
): Future[bool] {.async: (raises: [CancelledError, LPStreamError]).} =
# send DialDataRequest
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialDataRequest,
dialDataReq: DialDataRequest(addrIdx: addrIdx, numBytes: self.config.dialDataSize),
).encode().buffer
)
# recieve DialDataResponses until we're satisfied
try:
await self.handleDialDataResponses(conn)
except AutonatV2Error as exc:
error "Amplification attack prevention failed", description = exc.msg
return false
return true
proc canDial(self: AutonatV2, addrs: MultiAddress): bool =
let (ipv4Support, ipv6Support) = self.switch.peerInfo.listenAddrs.ipSupport()
addrs[0].withValue(addrIp):
if IP4.match(addrIp) and not ipv4Support:
return false
if IP6.match(addrIp) and not ipv6Support:
return false
try:
if not self.config.allowPrivateAddresses and isPrivate($addrIp):
return false
except ValueError:
warn "Unable to parse IP address, skipping", addrs = $addrIp
return false
for t in self.switch.transports:
if t.handles(addrs):
return true
return false
proc forceNewConnection(
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
): Future[Opt[Connection]] {.async: (raises: [CancelledError]).} =
## Bypasses connManager to force a new connection to ``pid``
## instead of reusing a preexistent one
try:
let mux = await self.switch.dialer.dialAndUpgrade(Opt.some(pid), addrs)
if mux.isNil():
return Opt.none(Connection)
return Opt.some(
await self.switch.dialer.negotiateStream(
await mux.newStream(), @[$AutonatV2Codec.DialBack]
)
)
except CancelledError as exc:
raise exc
except CatchableError:
return Opt.none(Connection)
proc chooseDialAddr(
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
): Future[(Opt[Connection], Opt[AddrIdx])] {.async: (raises: [CancelledError]).} =
for i, ma in addrs:
if self.canDial(ma):
debug "Trying to dial", chosenAddrs = ma, addrIdx = i
let conn =
try:
(await (self.forceNewConnection(pid, @[ma]).wait(self.config.dialTimeout))).valueOr:
return (Opt.none(Connection), Opt.none(AddrIdx))
except AsyncTimeoutError:
trace "Dial timed out"
return (Opt.none(Connection), Opt.some(i.AddrIdx))
return (Opt.some(conn), Opt.some(i.AddrIdx))
return (Opt.none(Connection), Opt.none(AddrIdx))
proc handleDialRequest(
self: AutonatV2, conn: Connection, req: DialRequest
) {.async: (raises: [CancelledError, LPStreamError]).} =
let observedIPAddr = (await conn.findObservedIPAddr(req)).valueOr:
trace "Could not find observed IP address"
await conn.sendDialResponse(ResponseStatus.ERequestRejected)
return
let (dialBackConnOpt, addrIdxOpt) = await self.chooseDialAddr(conn.peerId, req.addrs)
let addrIdx = addrIdxOpt.valueOr:
trace "No dialable addresses found"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return
let dialBackConn = dialBackConnOpt.valueOr:
trace "Dial failed"
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialError),
)
return
defer:
await dialBackConn.close()
# if observed address for peer is not in address list to try
# then we perform Amplification Attack Prevention
if not ipAddrMatches(observedIPAddr, req.addrs):
debug "Starting amplification attack prevention",
observedIPAddr = observedIPAddr, testAddr = req.addrs[addrIdx]
# send DialDataRequest and wait until dataReceived is enough
if not await self.amplificationAttackPrevention(conn, addrIdx).withTimeout(
self.config.amplificationAttackTimeout
):
debug "Amplification attack prevention timeout",
timeout = self.config.amplificationAttackTimeout, peer = conn.peerId
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return
debug "Sending DialBack",
nonce = req.nonce, addrIdx = addrIdx, addr = req.addrs[addrIdx]
try:
let dialStatus =
await dialBackConn.dialBack(req.nonce).wait(self.config.dialTimeout)
await conn.sendDialResponse(
ResponseStatus.Ok, addrIdx = Opt.some(addrIdx), dialStatus = Opt.some(dialStatus)
)
except DialFailedError as exc:
debug "DialBack failed", description = exc.msg
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialBackError),
)
except AsyncTimeoutError:
debug "DialBack timeout", timeout = self.config.dialTimeout
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialBackError),
)
proc new*(
T: typedesc[AutonatV2],
switch: Switch,
config: AutonatV2Config = AutonatV2Config.new(),
): T =
let autonatV2 = T(switch: switch, config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
let msg =
try:
AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
trace "Unable to decode AutonatV2Msg"
return
except LPStreamError as exc:
debug "Could not receive AutonatV2Msg", description = exc.msg
return
debug "Received message", msgType = $msg.msgType
if msg.msgType != MsgType.DialRequest:
debug "Expecting DialRequest", receivedMsgType = msg.msgType
return
try:
await autonatV2.handleDialRequest(conn, msg.dialReq)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
except LPStreamError as exc:
debug "Stream Error", description = exc.msg
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,278 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[deques, sequtils]
import chronos, chronicles, metrics, results
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
../../../wire,
../../../utils/heartbeat,
../../../crypto/crypto,
../autonat/types,
./types,
./client
declarePublicGauge(
libp2p_autonat_v2_reachability_confidence,
"autonat v2 reachability confidence",
labels = ["reachability"],
)
logScope:
topics = "libp2p autonatv2 service"
# needed because nim 2.0 can't do proper type assertions
const noneDuration: Opt[Duration] = Opt.none(Duration)
type
AutonatV2ServiceConfig* = object
scheduleInterval: Opt[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
minConfidence: float
enableAddressMapper: bool
AutonatV2Service* = ref object of Service
config*: AutonatV2ServiceConfig
confidence: Opt[float]
newConnectedPeerHandler: PeerEventHandler
statusAndConfidenceHandler: StatusAndConfidenceHandler
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability*: NetworkReachability
answers: Deque[NetworkReachability]
client*: AutonatV2Client
rng: ref HmacDrbgContext
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatV2ServiceConfig],
scheduleInterval: Opt[Duration] = noneDuration,
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
enableAddressMapper = true,
): T =
return T(
scheduleInterval: scheduleInterval,
askNewConnectedPeers: askNewConnectedPeers,
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
enableAddressMapper: enableAddressMapper,
)
proc new*(
T: typedesc[AutonatV2Service],
rng: ref HmacDrbgContext,
client: AutonatV2Client = AutonatV2Client.new(),
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
): T =
return T(
config: config,
confidence: Opt.none(float),
networkReachability: Unknown,
answers: initDeque[NetworkReachability](),
client: client,
rng: rng,
)
proc callHandler(self: AutonatV2Service) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
proc hasEnoughIncomingSlots(switch: Switch): bool =
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
return switch.connManager.slotsAvailable(In) >= 2
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(
self: AutonatV2Service, ans: NetworkReachability
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
let oldNetworkReachability = self.networkReachability
let oldConfidence = self.confidence
if self.answers.len == self.config.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
self.confidence = Opt.none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.config.maxQueueSize
libp2p_autonat_v2_reachability_confidence.set(
value = confidence, labelValues = [$reachability]
)
if self.confidence.isNone and confidence >= self.config.minConfidence:
self.networkReachability = reachability
self.confidence = Opt.some(confidence)
debug "Current status",
currentStats = $self.networkReachability,
confidence = $self.confidence,
answers = self.answers
# Return whether anything has changed
return
self.networkReachability != oldNetworkReachability or
self.confidence != oldConfidence
proc askPeer(
self: AutonatV2Service, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
if doesPeerHaveIncomingConn(switch, peerId):
return Unknown
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peer",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
return Unknown
trace "Asking peer for reachability"
let ans =
try:
let reqAddrs = switch.peerInfo.addrs
let autonatV2Resp = await self.client.sendDialRequest(peerId, reqAddrs)
debug "AutonatV2Response", autonatV2Resp = autonatV2Resp
autonatV2Resp.reachability
except CancelledError as exc:
raise exc
except LPStreamError as exc:
debug "DialRequest stream error", description = exc.msg
Unknown
except DialFailedError as exc:
debug "DialRequest dial failed", description = exc.msg
Unknown
except AutonatV2Error as exc:
debug "DialRequest error", description = exc.msg
Unknown
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
if hasReachabilityOrConfidenceChanged:
await self.callHandler()
await switch.peerInfo.update()
return ans
proc askConnectedPeers(
self: AutonatV2Service, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
var answersFromPeers = 0
for peer in peers:
if answersFromPeers >= self.config.numPeersToAsk:
break
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peers",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
break
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(
service: AutonatV2Service, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatV2Service run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatV2Service, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if not self.networkReachability.isReachable():
return listenAddrs
var addrs = newSeq[MultiAddress]()
for listenAddr in listenAddrs:
if listenAddr.isPublicMA() or not self.networkReachability.isReachable():
addrs.add(listenAddr)
else:
addrs.add(peerStore.guessDialableAddr(listenAddr))
return addrs
method setup*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
trace "Setting up AutonatV2Service"
let hasBeenSetup = await procCall Service(self).setup(switch)
if not hasBeenSetup:
return hasBeenSetup
if self.config.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.config.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(
self: AutonatV2Service, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatV2Service"
await askConnectedPeers(self, switch)
method stop*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
trace "Stopping AutonatV2Service"
let hasBeenStopped = await procCall Service(self).stop(switch)
if not hasBeenStopped:
return hasBeenStopped
if not isNil(self.scheduleHandle):
self.scheduleHandle.cancelSoon()
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc setStatusAndConfidenceHandler*(
self: AutonatV2Service, statusAndConfidenceHandler: StatusAndConfidenceHandler
) =
self.statusAndConfidenceHandler = statusAndConfidenceHandler

View File

@@ -14,6 +14,17 @@ import
../../../multiaddress, ../../../peerid, ../../../protobuf/minprotobuf, ../../../switch
from ../autonat/types import NetworkReachability
export NetworkReachability
const
DefaultDialTimeout*: Duration = 15.seconds
DefaultAmplificationAttackDialTimeout*: Duration = 3.seconds
DefaultDialDataSize*: uint64 = 50 * 1024 # 50 KiB > 50 KB
AutonatV2MsgLpSize*: int = 1024
DialBackLpSize*: int = 1024
# readLp needs to receive more than 4096 bytes (since it's a DialDataResponse) + overhead
DialDataResponseLpSize*: int = 5000
type
AutonatV2Codec* {.pure.} = enum
DialRequest = "/libp2p/autonat/2/dial-request"

View File

@@ -9,7 +9,6 @@ import
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
../autonat/service,
./types
proc asNetworkReachability*(self: DialResponse): NetworkReachability =

View File

@@ -0,0 +1,53 @@
import endians, nimcrypto
proc aes_ctr*(key, iv, data: openArray[byte]): seq[byte] =
## Processes 'data' using AES in CTR mode.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
var
ctx: CTR[aes128]
output = newSeq[byte](data.len)
ctx.init(key, iv)
ctx.encrypt(data, output)
ctx.clear()
output
proc advance_ctr*(iv: var openArray[byte], blocks: uint64) =
## Advances the counter in the AES-CTR IV by a specified number of blocks.
var counter: uint64
bigEndian64(addr counter, addr iv[8])
counter += blocks
bigEndian64(addr iv[8], addr counter)
proc aes_ctr_start_index*(key, iv, data: openArray[byte], startIndex: int): seq[byte] =
## Encrypts 'data' using AES in CTR mode from startIndex, without processing all preceding data.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
doAssert startIndex mod 16 == 0, "Start index must be a multiple of 16"
var advIV = @iv
# Advance the counter to the start index
let blocksToAdvance = startIndex div 16
advance_ctr(advIV, blocksToAdvance.uint64)
return aes_ctr(key, advIV, data)
proc sha256_hash*(data: openArray[byte]): array[32, byte] =
## hashes 'data' using SHA-256.
return sha256.digest(data).data
proc kdf*(key: openArray[byte]): seq[byte] =
## Returns the hash of 'key' truncated to 16 bytes.
let hash = sha256_hash(key)
return hash[0 .. 15]
proc hmac*(key, data: openArray[byte]): seq[byte] =
## Computes a HMAC for 'data' using given 'key'.
let hmac = sha256.hmac(key, data).data
return hmac[0 .. 15]

View File

@@ -0,0 +1,52 @@
import results
import bearssl/rand
import ../../crypto/curve25519
const FieldElementSize* = Curve25519KeySize
type FieldElement* = Curve25519Key
proc bytesToFieldElement*(bytes: openArray[byte]): Result[FieldElement, string] =
## Convert bytes to FieldElement
if bytes.len != FieldElementSize:
return err("Field element size must be 32 bytes")
ok(intoCurve25519Key(bytes))
proc fieldElementToBytes*(fe: FieldElement): seq[byte] =
## Convert FieldElement to bytes
fe.getBytes()
# Generate a random FieldElement
proc generateRandomFieldElement*(): Result[FieldElement, string] =
let rng = HmacDrbgContext.new()
if rng.isNil:
return err("Failed to create HmacDrbgContext with system randomness")
ok(Curve25519Key.random(rng[]))
# Generate a key pair (private key and public key are both FieldElements)
proc generateKeyPair*(): Result[tuple[privateKey, publicKey: FieldElement], string] =
let privateKey = generateRandomFieldElement().valueOr:
return err("Error in private key generation: " & error)
let publicKey = public(privateKey)
ok((privateKey, publicKey))
proc multiplyPointWithScalars*(
point: FieldElement, scalars: openArray[FieldElement]
): FieldElement =
## Multiply a given Curve25519 point with a set of scalars
var res = point
for scalar in scalars:
Curve25519.mul(res, scalar)
res
proc multiplyBasePointWithScalars*(
scalars: openArray[FieldElement]
): Result[FieldElement, string] =
## Multiply the Curve25519 base point with a set of scalars
if scalars.len <= 0:
return err("Atleast one scalar must be provided")
var res: FieldElement = public(scalars[0]) # Use the predefined base point
for i in 1 ..< scalars.len:
Curve25519.mul(res, scalars[i]) # Multiply with each scalar
ok(res)

View File

@@ -0,0 +1,133 @@
import hashes, chronos, stew/byteutils, results, chronicles
import ../../stream/connection
import ../../varint
import ../../utils/sequninit
import ./mix_protocol
from fragmentation import DataSize
type MixDialer* = proc(
msg: seq[byte], codec: string, destination: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).}
type MixEntryConnection* = ref object of Connection
destination: MixDestination
codec: string
mixDialer: MixDialer
func shortLog*(conn: MixEntryConnection): string =
if conn == nil:
"MixEntryConnection(nil)"
else:
"MixEntryConnection(" & $conn.destination & ")"
chronicles.formatIt(MixEntryConnection):
shortLog(it)
method readOnce*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readExactly*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLine*(
s: MixEntryConnection, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readVarint*(
conn: MixEntryConnection
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLp*(
s: MixEntryConnection, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method write*(
self: MixEntryConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.mixDialer(msg, self.codec, self.destination)
proc write*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.write(msg.toBytes())
method writeLp*(
self: MixEntryConnection, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
if msg.len() > DataSize:
let fut = newFuture[void]()
fut.fail(
newException(LPStreamError, "exceeds max msg size of " & $DataSize & " bytes")
)
return fut
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< buf.len] = msg
self.write(buf)
method writeLp*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.writeLp(msg.toOpenArrayByte(0, msg.high))
proc shortLog*(self: MixEntryConnection): string {.raises: [].} =
"[MixEntryConnection] Destination: " & $self.destination
method closeImpl*(
self: MixEntryConnection
): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
return fut
func hash*(self: MixEntryConnection): Hash =
hash($self.destination)
when defined(libp2p_agents_metrics):
proc setShortAgent*(self: MixEntryConnection, shortAgent: string) =
discard
proc new*(
T: typedesc[MixEntryConnection],
srcMix: MixProtocol,
destination: MixDestination,
codec: string,
): T {.raises: [].} =
var instance = T()
instance.destination = destination
instance.codec = codec
instance.mixDialer = proc(
msg: seq[byte], codec: string, dest: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
await srcMix.anonymizeLocalProtocolSend(
nil, msg, codec, dest, 0 # TODO: set incoming queue for replies and surbs
)
when defined(libp2p_agents_metrics):
instance.shortAgent = connection.shortAgent
instance
proc toConnection*(
srcMix: MixProtocol, destination: MixDestination, codec: string
): Result[Connection, string] {.gcsafe, raises: [].} =
## Create a stream to send and optionally receive responses.
## Under the hood it will wrap the message in a sphinx packet
## and send it via a random mix path.
ok(MixEntryConnection.new(srcMix, destination, codec))

View File

@@ -0,0 +1,36 @@
import chronicles, chronos, metrics
import ../../builders
import ../../stream/connection
import ./mix_metrics
type ExitLayer* = object
switch: Switch
proc init*(T: typedesc[ExitLayer], switch: Switch): T =
ExitLayer(switch: switch)
proc onMessage*(
self: ExitLayer,
codec: string,
message: seq[byte],
destAddr: MultiAddress,
destPeerId: PeerId,
) {.async: (raises: [CancelledError]).} =
# If dialing destination fails, no response is returned to
# the sender, so, flow can just end here. Only log errors
# for now
# https://github.com/vacp2p/mix/issues/86
try:
let destConn = await self.switch.dial(destPeerId, @[destAddr], codec)
defer:
await destConn.close()
await destConn.write(message)
except LPStreamError as exc:
error "Stream error while writing to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "LPSTREAM_ERR"])
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "DIAL_FAILED"])
except CancelledError as exc:
raise exc

View File

@@ -0,0 +1,95 @@
import ./[serialization, seqno_generator]
import results, stew/endians2
import ../../peerid
const PaddingLengthSize* = 2
const SeqNoSize* = 4
const DataSize* = MessageSize - PaddingLengthSize - SeqNoSize
# Unpadding and reassembling messages will be handled by the top-level applications.
# Although padding and splitting messages could also be managed at that level, we
# implement it here to clarify the sender's logic.
# This is crucial as the sender is responsible for wrapping messages in Sphinx packets.
type MessageChunk* = object
paddingLength: uint16
data: seq[byte]
seqNo: uint32
proc init*(
T: typedesc[MessageChunk], paddingLength: uint16, data: seq[byte], seqNo: uint32
): T =
T(paddingLength: paddingLength, data: data, seqNo: seqNo)
proc get*(msgChunk: MessageChunk): (uint16, seq[byte], uint32) =
(msgChunk.paddingLength, msgChunk.data, msgChunk.seqNo)
proc serialize*(msgChunk: MessageChunk): seq[byte] =
let
paddingBytes = msgChunk.paddingLength.toBytesBE()
seqNoBytes = msgChunk.seqNo.toBytesBE()
doAssert msgChunk.data.len == DataSize,
"Padded data must be exactly " & $DataSize & " bytes"
return @paddingBytes & msgChunk.data & @seqNoBytes
proc deserialize*(T: typedesc[MessageChunk], data: openArray[byte]): Result[T, string] =
if data.len != MessageSize:
return err("Data must be exactly " & $MessageSize & " bytes")
let
paddingLength = uint16.fromBytesBE(data[0 .. PaddingLengthSize - 1])
chunk = data[PaddingLengthSize .. (PaddingLengthSize + DataSize - 1)]
seqNo = uint32.fromBytesBE(data[PaddingLengthSize + DataSize ..^ 1])
ok(T(paddingLength: paddingLength, data: chunk, seqNo: seqNo))
proc ceilDiv*(a, b: int): int =
(a + b - 1) div b
proc addPadding*(messageBytes: seq[byte], seqNo: SeqNo): MessageChunk =
## Pads messages smaller than DataSize
let paddingLength = uint16(DataSize - messageBytes.len)
let paddedData =
if paddingLength > 0:
let paddingBytes = newSeq[byte](paddingLength)
paddingBytes & messageBytes
else:
messageBytes
MessageChunk(paddingLength: paddingLength, data: paddedData, seqNo: seqNo)
proc addPadding*(messageBytes: seq[byte], peerId: PeerId): MessageChunk =
## Pads messages smaller than DataSize
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
messageBytes.addPadding(seqNoGen)
proc removePadding*(msgChunk: MessageChunk): Result[seq[byte], string] =
let msgLength = len(msgChunk.data) - int(msgChunk.paddingLength)
if msgLength < 0:
return err("Invalid padding length")
ok(msgChunk.data[msgChunk.paddingLength ..^ 1])
proc padAndChunkMessage*(messageBytes: seq[byte], peerId: PeerId): seq[MessageChunk] =
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
var chunks: seq[MessageChunk] = @[]
# Split to chunks
let totalChunks = max(1, ceilDiv(messageBytes.len, DataSize))
# Ensure at least one chunk is generated
for i in 0 ..< totalChunks:
let
startIdx = i * DataSize
endIdx = min(startIdx + DataSize, messageBytes.len)
chunkData = messageBytes[startIdx .. endIdx - 1]
msgChunk = chunkData.addPadding(seqNoGen)
chunks.add(msgChunk)
seqNoGen.inc()
return chunks

View File

@@ -0,0 +1,47 @@
import chronicles, results
import stew/[byteutils, leb128]
import ../../protobuf/minprotobuf
import ../../utils/sequninit
type MixMessage* = object
message*: seq[byte]
codec*: string
proc init*(T: typedesc[MixMessage], message: openArray[byte], codec: string): T =
return T(message: @message, codec: codec)
proc serialize*(mixMsg: MixMessage): seq[byte] =
let vbytes = toBytes(mixMsg.codec.len.uint64, Leb128)
doAssert vbytes.len <= 2, "serialization failed: codec length exceeds 2 bytes"
var buf = newSeqUninit[byte](vbytes.len + mixMsg.codec.len + mixMsg.message.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< mixMsg.codec.len] = mixMsg.codec.toBytes()
buf[vbytes.len + mixMsg.codec.len ..< buf.len] = mixMsg.message
buf
proc deserialize*(
T: typedesc[MixMessage], data: openArray[byte]
): Result[MixMessage, string] =
if data.len == 0:
return err("deserialization failed: data is empty")
var codecLen: int
var varintLen: int
for i in 0 ..< min(data.len, 2):
let parsed = uint16.fromBytes(data[0 ..< i], Leb128)
if parsed.len < 0 or (i == 1 and parsed.len == 0):
return err("deserialization failed: invalid codec length")
varintLen = parsed.len
codecLen = parsed.val.int
if data.len < varintLen + codecLen:
return err("deserialization failed: not enough data")
ok(
T(
codec: string.fromBytes(data[varintLen ..< varintLen + codecLen]),
message: data[varintLen + codecLen ..< data.len],
)
)

View File

@@ -0,0 +1,13 @@
{.push raises: [].}
import metrics
declarePublicCounter mix_messages_recvd, "number of mix messages received", ["type"]
declarePublicCounter mix_messages_forwarded,
"number of mix messages forwarded", ["type"]
declarePublicCounter mix_messages_error,
"number of mix messages failed processing", ["type", "error"]
declarePublicGauge mix_pool_size, "number of nodes in the pool"

View File

@@ -0,0 +1,318 @@
import os, results, strformat, sugar, sequtils
import std/streams
import ../../crypto/[crypto, curve25519, secp]
import ../../[multiaddress, multicodec, peerid, peerinfo]
import ./[serialization, curve25519, multiaddr]
const MixNodeInfoSize* =
AddrSize + (2 * FieldElementSize) + (SkRawPublicKeySize + SkRawPrivateKeySize)
const MixPubInfoSize* = AddrSize + FieldElementSize + SkRawPublicKeySize
type MixNodeInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
mixPrivKey*: FieldElement
libp2pPubKey*: SkPublicKey
libp2pPrivKey*: SkPrivateKey
proc initMixNodeInfo*(
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey, mixPrivKey: FieldElement,
libp2pPubKey: SkPublicKey,
libp2pPrivKey: SkPrivateKey,
): MixNodeInfo =
MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
proc get*(
info: MixNodeInfo
): (PeerId, MultiAddress, FieldElement, FieldElement, SkPublicKey, SkPrivateKey) =
(
info.peerId, info.multiAddr, info.mixPubKey, info.mixPrivKey, info.libp2pPubKey,
info.libp2pPrivKey,
)
proc serialize*(nodeInfo: MixNodeInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
mixPrivKeyBytes = fieldElementToBytes(nodeInfo.mixPrivKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
libp2pPrivKeyBytes = nodeInfo.libp2pPrivKey.getBytes()
return ok(
addrBytes & mixPubKeyBytes & mixPrivKeyBytes & libp2pPubKeyBytes & libp2pPrivKeyBytes
)
proc deserialize*(T: typedesc[MixNodeInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixNodeInfoSize:
return
err("Serialized Mix node info must be exactly " & $MixNodeInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let mixPrivKey = bytesToFieldElement(
data[(AddrSize + FieldElementSize) .. (AddrSize + (2 * FieldElementSize) - 1)]
).valueOr:
return err("Mix private key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(
data[
AddrSize + (2 * FieldElementSize) ..
AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize - 1
]
).valueOr:
return err("Failed to initialize libp2p public key")
let libp2pPrivKey = SkPrivateKey.init(
data[AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize ..^ 1]
).valueOr:
return err("Failed to initialize libp2p private key")
ok(
T(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
)
proc writeToFile*(
node: MixNodeInfo, index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[void, string] =
if not dirExists(nodeInfoFolderPath):
createDir(nodeInfoFolderPath)
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix node info: " & error)
file.writeData(addr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixNodeInfo], index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[T, string] =
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixNodeInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixNodeInfo = MixNodeInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix node info deserialize error: " & error)
return ok(dMixNodeInfo)
proc deleteNodeInfoFolder*(nodeInfoFolderPath: string = "./nodeInfo") =
## Deletes the folder that stores serialized mix node info files
## along with all its contents, if the folder exists.
if dirExists(nodeInfoFolderPath):
removeDir(nodeInfoFolderPath)
type MixPubInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
libp2pPubKey*: SkPublicKey
proc init*(
T: typedesc[MixPubInfo],
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey: FieldElement,
libp2pPubKey: SkPublicKey,
): T =
T(
peerId: PeerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
proc get*(info: MixPubInfo): (PeerId, MultiAddress, FieldElement, SkPublicKey) =
(info.peerId, info.multiAddr, info.mixPubKey, info.libp2pPubKey)
proc serialize*(nodeInfo: MixPubInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
return ok(addrBytes & mixPubKeyBytes & libp2pPubKeyBytes)
proc deserialize*(T: typedesc[MixPubInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixPubInfoSize:
return
err("Serialized mix public info must be exactly " & $MixPubInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in bytes to multiaddress conversion: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(data[(AddrSize + FieldElementSize) ..^ 1]).valueOr:
return err("Failed to initialize libp2p public key: ")
ok(
MixPubInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
)
proc writeToFile*(
node: MixPubInfo, index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[void, string] =
if not dirExists(pubInfoFolderPath):
createDir(pubInfoFolderPath)
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix pub info: " & error)
file.writeData(unsafeAddr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixPubInfo], index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[T, string] =
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixPubInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixPubInfo = MixPubInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix pub info deserialize error: " & error)
return ok(dMixPubInfo)
proc deletePubInfoFolder*(pubInfoFolderPath: string = "./pubInfo") =
## Deletes the folder containing serialized public mix node info
## and all files inside it, if the folder exists.
if dirExists(pubInfoFolderPath):
removeDir(pubInfoFolderPath)
type MixNodes* = seq[MixNodeInfo]
proc getMixPubInfoByIndex*(self: MixNodes, index: int): Result[MixPubInfo, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
ok(
MixPubInfo(
peerId: self[index].peerId,
multiAddr: self[index].multiAddr,
mixPubKey: self[index].mixPubKey,
libp2pPubKey: self[index].libp2pPubKey,
)
)
proc generateMixNodes(
count: int, basePort: int = 4242, rng: ref HmacDrbgContext = newRng()
): Result[MixNodes, string] =
var nodes = newSeq[MixNodeInfo](count)
for i in 0 ..< count:
let keyPairResult = generateKeyPair()
if keyPairResult.isErr:
return err("Generate key pair error: " & $keyPairResult.error)
let (mixPrivKey, mixPubKey) = keyPairResult.get()
let
rng = newRng()
keyPair = SkKeyPair.random(rng[])
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: keyPair.pubkey)
peerId = PeerId.init(pubKeyProto).get()
multiAddr =
?MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{basePort + i}").tryGet().catch().mapErr(
x => x.msg
)
nodes[i] = MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: keyPair.pubkey,
libp2pPrivKey: keyPair.seckey,
)
ok(nodes)
proc initializeMixNodes*(count: int, basePort: int = 4242): Result[MixNodes, string] =
## Creates and initializes a set of mix nodes
let mixNodes = generateMixNodes(count, basePort).valueOr:
return err("Mix node initialization error: " & error)
return ok(mixNodes)
proc findByPeerId*(self: MixNodes, peerId: PeerId): Result[MixNodeInfo, string] =
let filteredNodes = self.filterIt(it.peerId == peerId)
if filteredNodes.len != 0:
return ok(filteredNodes[0])
return err("No node with peer id: " & $peerId)
proc initMixMultiAddrByIndex*(
self: var MixNodes, index: int, peerId: PeerId, multiAddr: MultiAddress
): Result[void, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
self[index].multiAddr = multiAddr
self[index].peerId = peerId
ok()

View File

@@ -0,0 +1,392 @@
import chronicles, chronos, sequtils, strutils, os, results
import std/[strformat, tables], metrics
import
./[
curve25519, fragmentation, mix_message, mix_node, sphinx, serialization,
tag_manager, mix_metrics, exit_layer, multiaddr,
]
import stew/endians2
import ../protocol
import ../../stream/[connection, lpstream]
import ../../[switch, multicodec, peerinfo]
const MixProtocolID* = "/mix/1.0.0"
## Mix Protocol defines a decentralized anonymous message routing layer for libp2p networks.
## It enables sender anonymity by routing each message through a decentralized mix overlay
## network composed of participating libp2p nodes, known as mix nodes. Each message is
## routed independently in a stateless manner, allowing other libp2p protocols to selectively
## anonymize messages without modifying their core protocol behavior.
type MixProtocol* = ref object of LPProtocol
mixNodeInfo: MixNodeInfo
pubNodeInfo: Table[PeerId, MixPubInfo]
switch: Switch
tagManager: TagManager
exitLayer: ExitLayer
rng: ref HmacDrbgContext
proc loadAllButIndexMixPubInfo*(
index, numNodes: int, pubInfoFolderPath: string = "./pubInfo"
): Result[Table[PeerId, MixPubInfo], string] =
var pubInfoTable = initTable[PeerId, MixPubInfo]()
for i in 0 ..< numNodes:
if i == index:
continue
let pubInfo = MixPubInfo.readFromFile(i, pubInfoFolderPath).valueOr:
return err("Failed to load pub info from file: " & error)
pubInfoTable[pubInfo.peerId] = pubInfo
return ok(pubInfoTable)
proc cryptoRandomInt(rng: ref HmacDrbgContext, max: int): Result[int, string] =
if max == 0:
return err("Max cannot be zero.")
let res = rng[].generate(uint64) mod uint64(max)
ok(res.int)
proc handleMixNodeConnection(
mixProto: MixProtocol, conn: Connection
) {.async: (raises: [LPStreamError, CancelledError]).} =
let receivedBytes =
try:
await conn.readLp(PacketSize)
except CancelledError as exc:
raise exc
finally:
await conn.close()
if receivedBytes.len == 0:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "NO_DATA"])
return # No data, end of stream
# Process the packet
let (peerId, multiAddr, _, mixPrivKey, _, _) = mixProto.mixNodeInfo.get()
let sphinxPacket = SphinxPacket.deserialize(receivedBytes).valueOr:
error "Sphinx packet deserialization error", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
let processedSP = processSphinxPacket(sphinxPacket, mixPrivKey, mixProto.tagManager).valueOr:
error "Failed to process Sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
case processedSP.status
of Exit:
mix_messages_recvd.inc(labelValues = ["Exit"])
# This is the exit node, forward to destination
let msgChunk = MessageChunk.deserialize(processedSP.messageChunk).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let unpaddedMsg = msgChunk.removePadding().valueOr:
error "Unpadding message failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let deserialized = MixMessage.deserialize(unpaddedMsg).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
if processedSP.destination == Hop():
error "no destination available"
mix_messages_error.inc(labelValues = ["Exit", "NO_DESTINATION"])
return
let destBytes = processedSP.destination.get()
let (destPeerId, destAddr) = bytesToMultiAddr(destBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_DEST"])
return
trace "Exit node - Received mix message",
peerId,
message = deserialized.message,
codec = deserialized.codec,
to = destPeerId
await mixProto.exitLayer.onMessage(
deserialized.codec, deserialized.message, destAddr, destPeerId
)
mix_messages_forwarded.inc(labelValues = ["Exit"])
of Reply:
# TODO: implement
discard
of Intermediate:
trace "# Intermediate: ", peerId, multiAddr
# Add delay
mix_messages_recvd.inc(labelValues = ["Intermediate"])
await sleepAsync(milliseconds(processedSP.delayMs))
# Forward to next hop
let nextHopBytes = processedSP.nextHop.get()
let (nextPeerId, nextAddr) = bytesToMultiAddr(nextHopBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Intermediate", "INVALID_DEST"])
return
try:
let nextHopConn =
await mixProto.switch.dial(nextPeerId, @[nextAddr], MixProtocolID)
defer:
await nextHopConn.close()
await nextHopConn.writeLp(processedSP.serializedSphinxPacket)
mix_messages_forwarded.inc(labelValues = ["Intermediate"])
except CancelledError as exc:
raise exc
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
of Duplicate:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "DUPLICATE"])
of InvalidMAC:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_MAC"])
proc getMaxMessageSizeForCodec*(
codec: string, numberOfSurbs: uint8 = 0
): Result[int, string] =
## Computes the maximum payload size (in bytes) available for a message when encoded
## with the given `codec`
## Returns an error if the codec length would cause it to exceeds the data capacity.
let serializedMsg = MixMessage.init(@[], codec).serialize()
if serializedMsg.len > DataSize:
return err("cannot encode messages for this codec")
return ok(DataSize - serializedMsg.len)
proc sendPacket(
mixProto: MixProtocol,
multiAddrs: MultiAddress,
sphinxPacket: seq[byte],
label: string,
) {.async: (raises: [CancelledError]).} =
## Send the wrapped message to the first mix node in the selected path
let (firstMixPeerId, firstMixAddr) = parseFullAddress(multiAddrs).valueOr:
error "Invalid multiaddress", err = error
mix_messages_error.inc(labelValues = [label, "NON_RECOVERABLE"])
return
try:
let nextHopConn =
await mixProto.switch.dial(firstMixPeerId, @[firstMixAddr], @[MixProtocolID])
defer:
await nextHopConn.close()
await nextHopConn.writeLp(sphinxPacket)
except DialFailedError as exc:
error "Failed to dial next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except CancelledError as exc:
raise exc
mix_messages_forwarded.inc(labelValues = ["Entry"])
proc buildMessage(
msg: seq[byte], codec: string, peerId: PeerId
): Result[Message, (string, string)] =
let
mixMsg = MixMessage.init(msg, codec)
serialized = mixMsg.serialize()
if serialized.len > DataSize:
return err(("message size exceeds maximum payload size", "INVALID_SIZE"))
let
paddedMsg = addPadding(serialized, peerId)
serializedMsgChunk = paddedMsg.serialize()
ok(serializedMsgChunk)
## Represents the final target of a mixnet message.
## contains the peer id and multiaddress of the destination node.
type MixDestination* = object
peerId: PeerId
address: MultiAddress
proc init*(T: typedesc[MixDestination], peerId: PeerId, address: MultiAddress): T =
## Initializes a destination object with the given peer id and multiaddress.
T(peerId: peerId, address: address)
proc `$`*(d: MixDestination): string =
$d.address & "/p2p/" & $d.peerId
proc anonymizeLocalProtocolSend*(
mixProto: MixProtocol,
incoming: AsyncQueue[seq[byte]],
msg: seq[byte],
codec: string,
destination: MixDestination,
numSurbs: uint8,
) {.async: (raises: [CancelledError, LPStreamError]).} =
mix_messages_recvd.inc(labelValues = ["Entry"])
var
multiAddrs: seq[MultiAddress] = @[]
publicKeys: seq[FieldElement] = @[]
hop: seq[Hop] = @[]
delay: seq[seq[byte]] = @[]
exitPeerId: PeerId
# Select L mix nodes at random
let numMixNodes = mixProto.pubNodeInfo.len
var numAvailableNodes = numMixNodes
debug "Destination data", destination
if mixProto.pubNodeInfo.hasKey(destination.peerId):
numAvailableNodes = numMixNodes - 1
if numAvailableNodes < PathLength:
error "No. of public mix nodes less than path length.",
numMixNodes = numAvailableNodes, pathLength = PathLength
mix_messages_error.inc(labelValues = ["Entry", "LOW_MIX_POOL"])
return
# Skip the destination peer
var pubNodeInfoKeys =
mixProto.pubNodeInfo.keys.toSeq().filterIt(it != destination.peerId)
var availableIndices = toSeq(0 ..< pubNodeInfoKeys.len)
var i = 0
while i < PathLength:
let randomIndexPosition = cryptoRandomInt(mixProto.rng, availableIndices.len).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
let selectedIndex = availableIndices[randomIndexPosition]
let randPeerId = pubNodeInfoKeys[selectedIndex]
availableIndices.del(randomIndexPosition)
# Last hop will be the exit node that will forward the request
if i == PathLength - 1:
exitPeerId = randPeerId
debug "Selected mix node: ", indexInPath = i, peerId = randPeerId
# Extract multiaddress, mix public key, and hop
let (peerId, multiAddr, mixPubKey, _) =
mixProto.pubNodeInfo.getOrDefault(randPeerId).get()
multiAddrs.add(multiAddr)
publicKeys.add(mixPubKey)
let multiAddrBytes = multiAddrToBytes(peerId, multiAddr).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_MIX_INFO"])
#TODO: should we skip and pick a different node here??
return
hop.add(Hop.init(multiAddrBytes))
# Compute delay
let delayMillisec =
if i != PathLength - 1:
cryptoRandomInt(mixProto.rng, 3).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
else:
0 # Last hop does not require a delay
delay.add(@(delayMillisec.uint16.toBytesBE()))
i = i + 1
#Encode destination
let destAddrBytes = multiAddrToBytes(destination.peerId, destination.address).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_DEST"])
return
let destHop = Hop.init(destAddrBytes)
let message = buildMessage(msg, codec, mixProto.mixNodeInfo.peerId).valueOr:
error "Error building message", err = error[0]
mix_messages_error.inc(labelValues = ["Entry", error[1]])
return
# Wrap in Sphinx packet
let sphinxPacket = wrapInSphinxPacket(message, publicKeys, delay, hop, destHop).valueOr:
error "Failed to wrap in sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
# Send the wrapped message to the first mix node in the selected path
await mixProto.sendPacket(multiAddrs[0], sphinxPacket, "Entry")
proc init*(
mixProto: MixProtocol,
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
) =
mixProto.mixNodeInfo = mixNodeInfo
mixProto.pubNodeInfo = pubNodeInfo
mixProto.switch = switch
mixProto.tagManager = tagManager
mixProto.exitLayer = ExitLayer.init(switch)
mixProto.codecs = @[MixProtocolID]
mixProto.rng = rng
mixProto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
await mixProto.handleMixNodeConnection(conn)
except LPStreamError as e:
debug "Stream error", conn = conn, err = e.msg
proc new*(
T: typedesc[MixProtocol],
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
): T =
let mixProto = new(T)
mixProto.init(mixNodeInfo, pubNodeInfo, switch)
mixProto
proc new*(
T: typedesc[MixProtocol],
index, numNodes: int,
switch: Switch,
nodeFolderInfoPath: string = ".",
rng: ref HmacDrbgContext = newRng(),
): Result[T, string] =
## Constructs a new `MixProtocol` instance for the mix node at `index`,
## loading its private info from `nodeInfo` and the public info of all other nodes from `pubInfo`.
let mixNodeInfo = MixNodeInfo.readFromFile(index, nodeFolderInfoPath / fmt"nodeInfo").valueOr:
return err("Failed to load mix node info for index " & $index & " - err: " & error)
let pubNodeInfo = loadAllButIndexMixPubInfo(
index, numNodes, nodeFolderInfoPath / fmt"pubInfo"
).valueOr:
return err("Failed to load mix pub info for index " & $index & " - err: " & error)
let mixProto =
MixProtocol.new(mixNodeInfo, pubNodeInfo, switch, TagManager.new(), rng)
return ok(mixProto)
proc setNodePool*(
mixProtocol: MixProtocol, mixNodeTable: Table[PeerId, MixPubInfo]
) {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo = mixNodeTable
proc getNodePoolSize*(mixProtocol: MixProtocol): int {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo.len

View File

@@ -0,0 +1,95 @@
import results, sugar, sequtils, strutils
import ./serialization
import stew/endians2
import ../../[multicodec, multiaddress, peerid]
const
PeerIdByteLen = 39 # ed25519 and secp256k1 multihash length
MinMultiAddrComponentLen = 2
MaxMultiAddrComponentLen = 5 # quic + circuit relay
# TODO: Add support for ipv6, dns, dns4, ws/wss/sni support
proc multiAddrToBytes*(
peerId: PeerId, multiAddr: MultiAddress
): Result[seq[byte], string] {.raises: [].} =
var ma = multiAddr
let sma = multiAddr.items().toSeq()
var res: seq[byte] = @[]
if not (sma.len >= MinMultiAddrComponentLen and sma.len <= MaxMultiAddrComponentLen):
return err("Invalid multiaddress format")
# Only IPV4 is supported
let isCircuitRelay = ?ma.contains(multiCodec("p2p-circuit"))
let baseP2PEndIdx = if isCircuitRelay: 3 else: 1
let baseAddr =
try:
if sma.len - 1 - baseP2PEndIdx < 0:
return err("Invalid multiaddress format")
sma[0 .. sma.len - baseP2PEndIdx].mapIt(it.tryGet()).foldl(a & b)
except LPError as exc:
return err("Could not obtain base address: " & exc.msg)
let isQuic = QUIC_V1_IP.match(baseAddr)
let isTCP = TCP_IP.match(baseAddr)
if not (isTCP or isQuic):
return err("Unsupported protocol")
# 4 bytes for the IP
let ip = ?ma.getPart(multiCodec("ip4")).value().protoArgument()
res.add(ip)
var port: string
if isQuic:
res.add(1.byte) # Protocol byte
let udpPortPart = ma.getPart(multiCodec("udp")).value()
port = $udpPortPart
elif isTCP:
res.add(0.byte) # Protocol byte
let tcpPortPart = ma.getPart(multiCodec("tcp")).value()
port = $tcpPortPart
let portNum = ?catch(port.split('/')[2].parseInt()).mapErr(x => x.msg)
res.add(portNum.uint16.toBytesBE())
if isCircuitRelay:
let relayIdPart = ?ma.getPart(multiCodec("p2p"))
let relayId = ?PeerId.init(?relayIdPart.protoArgument()).mapErr(x => $x)
if relayId.data.len != PeerIdByteLen:
return err("unsupported PeerId key type")
res.add(relayId.data)
# PeerID (39 bytes)
res.add(peerId.data)
if res.len > AddrSize:
return err("Address must be <= " & $AddrSize & " bytes")
return ok(res & newSeq[byte](AddrSize - res.len))
proc bytesToMultiAddr*(bytes: openArray[byte]): MaResult[(PeerId, MultiAddress)] =
if bytes.len != AddrSize:
return err("Address must be exactly " & $AddrSize & " bytes")
let
ip = bytes[0 .. 3].mapIt($it).join(".")
protocol = if bytes[4] == 0: "tcp" else: "udp"
quic = if bytes[4] == 1: "/quic-v1" else: ""
port = uint16.fromBytesBE(bytes[5 .. 6])
# peerId1 represents the circuit relay server addr if p2p-circuit addr, otherwise it's the node's actual peerId
peerId1Bytes = bytes[7 ..< 46]
peerId2Bytes = bytes[7 + PeerIdByteLen ..< 7 + (PeerIdByteLen * 2)]
let ma = ?MultiAddress.init("/ip4/" & ip & "/" & protocol & "/" & $port & quic)
return
if peerId2Bytes != newSeq[byte](PeerIdByteLen):
# Has circuit relay address
let relayIdMa = ?MultiAddress.init(multiCodec("p2p"), peerId1Bytes)
let p2pCircuitMa = ?MultiAddress.init(multiCodec("p2p-circuit"))
let peerId = ?PeerId.init(peerId2Bytes).mapErr(x => $x)
ok((peerId, ?(ma & relayIdMa & p2pCircuitMa).catch().mapErr(x => x.msg)))
else:
let peerId = ?PeerId.init(peerId1Bytes).mapErr(x => $x)
ok((peerId, ma))

View File

@@ -0,0 +1,29 @@
import std/endians, times
import ../../peerid
import ./crypto
import ../../utils/sequninit
type SeqNo* = uint32
proc init*(T: typedesc[SeqNo], data: seq[byte]): T =
var seqNo: SeqNo = 0
let hash = sha256_hash(data)
for i in 0 .. 3:
seqNo = seqNo or (uint32(hash[i]) shl (8 * (3 - i)))
return seqNo
proc init*(T: typedesc[SeqNo], peerId: PeerId): T =
SeqNo.init(peerId.data)
proc generate*(seqNo: var SeqNo, messageBytes: seq[byte]) =
let
currentTime = getTime().toUnix() * 1000
currentTimeBytes = newSeqUninit[byte](8)
bigEndian64(unsafeAddr currentTimeBytes[0], unsafeAddr currentTime)
let s = SeqNo.init(messageBytes & currentTimeBytes)
seqNo = (seqNo + s) mod high(uint32)
proc inc*(seqNo: var SeqNo) =
seqNo = (seqNo + 1) mod high(uint32)
# TODO: Manage sequence no. overflow in a way that it does not affect re-assembly

View File

@@ -0,0 +1,236 @@
import results
import std/sequtils
import ../../utility
const
k* = 16 # Security parameter
r* = 5 # Maximum path length
t* = 6 # t.k - combined length of next hop address and delay
AlphaSize* = 32 # Group element
BetaSize* = ((r * (t + 1)) + 1) * k # bytes
GammaSize* = 16 # Output of HMAC-SHA-256, truncated to 16 bytes
HeaderSize* = AlphaSize + BetaSize + GammaSize # Total header size
DelaySize* = 2 # Delay size
AddrSize* = (t * k) - DelaySize # Address size
PacketSize* = 4608 # Total packet size (from spec)
MessageSize* = PacketSize - HeaderSize - k # Size of the message itself
PayloadSize* = MessageSize + k # Total payload size
SurbSize* = HeaderSize + k + AddrSize
# Size of a surb packet inside the message payload
SurbLenSize* = 1 # Size of the field storing the number of surbs
SurbIdLen* = k # Size of the identifier used when sending a message with surb
DefaultSurbs* = uint8(4) # Default number of SURBs to send
type Header* = object
Alpha*: seq[byte]
Beta*: seq[byte]
Gamma*: seq[byte]
proc init*(
T: typedesc[Header], alpha: seq[byte], beta: seq[byte], gamma: seq[byte]
): T =
return T(Alpha: alpha, Beta: beta, Gamma: gamma)
proc get*(header: Header): (seq[byte], seq[byte], seq[byte]) =
(header.Alpha, header.Beta, header.Gamma)
proc serialize*(header: Header): seq[byte] =
doAssert header.Alpha.len == AlphaSize,
"Alpha must be exactly " & $AlphaSize & " bytes"
doAssert header.Beta.len == BetaSize, "Beta must be exactly " & $BetaSize & " bytes"
doAssert header.Gamma.len == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
return header.Alpha & header.Beta & header.Gamma
proc deserialize*(
T: typedesc[Header], serializedHeader: openArray[byte]
): Result[T, string] =
if len(serializedHeader) < HeaderSize:
return err("Serialized header must be exactly " & $HeaderSize & " bytes")
let header = Header(
Alpha: serializedHeader[0 .. (AlphaSize - 1)],
Beta: serializedHeader[AlphaSize .. (AlphaSize + BetaSize - 1)],
Gamma: serializedHeader[(AlphaSize + BetaSize) .. (HeaderSize - 1)],
)
ok(header)
type Message* = seq[byte]
proc serialize*(message: Message): seq[byte] =
doAssert message.len() == MessageSize,
"Message must be exactly " & $(MessageSize) & " bytes"
var res = newSeq[byte](k) # Prepend k bytes of zero padding
res.add(message)
return res
proc deserialize*(
T: typedesc[Message], serializedMessage: openArray[byte]
): Result[T, string] =
if len(serializedMessage) != PayloadSize:
return err("Serialized message must be exactly " & $PayloadSize & " bytes")
return ok(serializedMessage[k ..^ 1])
type Hop* = object
MultiAddress: seq[byte]
proc init*(T: typedesc[Hop], multiAddress: seq[byte]): T =
T(
MultiAddress:
if multiAddress == newSeq[byte](AddrSize):
@[]
else:
multiAddress
)
proc get*(hop: Hop): seq[byte] =
return hop.MultiAddress
proc serialize*(hop: Hop): seq[byte] =
if hop.MultiAddress.len == 0:
return newSeq[byte](AddrSize)
doAssert len(hop.MultiAddress) == AddrSize,
"MultiAddress must be exactly " & $AddrSize & " bytes"
proc deserialize*(T: typedesc[Hop], data: openArray[byte]): Result[T, string] =
if len(data) != AddrSize:
return err("MultiAddress must be exactly " & $AddrSize & " bytes")
ok(
T(
MultiAddress:
if data == newSeq[byte](AddrSize):
@[]
else:
@data
)
)
type RoutingInfo* = object
Addr: Hop
Delay: seq[byte]
Gamma: seq[byte]
Beta: seq[byte]
proc init*(
T: typedesc[RoutingInfo],
address: Hop,
delay: seq[byte],
gamma: seq[byte],
beta: seq[byte],
): T =
return T(Addr: address, Delay: delay, Gamma: gamma, Beta: beta)
proc getRoutingInfo*(info: RoutingInfo): (Hop, seq[byte], seq[byte], seq[byte]) =
(info.Addr, info.Delay, info.Gamma, info.Beta)
proc serialize*(info: RoutingInfo): seq[byte] =
doAssert info.Delay.len() == DelaySize,
"Delay must be exactly " & $DelaySize & " bytes"
doAssert info.Gamma.len() == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
let expectedBetaLen = ((r * (t + 1)) - t) * k
doAssert info.Beta.len() == expectedBetaLen,
"Beta must be exactly " & $expectedBetaLen & " bytes"
let addrBytes = info.Addr.serialize()
return addrBytes & info.Delay & info.Gamma & info.Beta
proc readBytes(
data: openArray[byte], offset: var int, readSize: Opt[int] = Opt.none(int)
): Result[seq[byte], string] =
if data.len < offset:
return err("not enough data")
readSize.withValue(size):
if data.len < offset + size:
return err("not enough data")
let slice = data[offset ..< offset + size]
offset += size
return ok(slice)
let slice = data[offset .. ^1]
offset = data.len
return ok(slice)
proc deserialize*(T: typedesc[RoutingInfo], data: openArray[byte]): Result[T, string] =
if len(data) != BetaSize + ((t + 1) * k):
return err("Data must be exactly " & $(BetaSize + ((t + 1) * k)) & " bytes")
let hop = Hop.deserialize(data[0 .. AddrSize - 1]).valueOr:
return err("Deserialize hop error: " & error)
var offset: int = AddrSize
return ok(
RoutingInfo(
Addr: hop,
Delay: ?data.readBytes(offset, Opt.some(DelaySize)),
Gamma: ?data.readBytes(offset, Opt.some(GammaSize)),
Beta: ?data.readBytes(offset, Opt.some(BetaSize)),
)
)
type SphinxPacket* = object
Hdr*: Header
Payload*: seq[byte]
proc init*(T: typedesc[SphinxPacket], header: Header, payload: seq[byte]): T =
T(Hdr: header, Payload: payload)
proc get*(packet: SphinxPacket): (Header, seq[byte]) =
(packet.Hdr, packet.Payload)
proc serialize*(packet: SphinxPacket): seq[byte] =
let headerBytes = packet.Hdr.serialize()
return headerBytes & packet.Payload
proc deserialize*(T: typedesc[SphinxPacket], data: openArray[byte]): Result[T, string] =
if len(data) != PacketSize:
return err("Sphinx packet size must be exactly " & $PacketSize & " bytes")
let header = ?Header.deserialize(data)
return ok(SphinxPacket(Hdr: header, Payload: data[HeaderSize ..^ 1]))
type
Secret* = seq[seq[byte]]
Key* = seq[byte]
SURBIdentifier* = array[SurbIdLen, byte]
SURB* = object
hop*: Hop
header*: Header
key*: Key
secret*: Opt[Secret]
proc serializeMessageWithSURBs*(
msg: seq[byte], surbs: seq[SURB]
): Result[seq[byte], string] =
if surbs.len > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
let surbBytes =
surbs.mapIt(it.hop.serialize() & it.header.serialize() & it.key).concat()
ok(byte(surbs.len) & surbBytes & msg)
proc extractSURBs*(msg: seq[byte]): Result[(seq[SURB], seq[byte]), string] =
var offset = 0
let surbsLenBytes = ?readBytes(msg, offset, Opt.some(1))
let surbsLen = int(surbsLenBytes[0])
if surbsLen > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
var surbs: seq[SURB] = newSeq[SURB](surbsLen)
for i in 0 ..< surbsLen:
let hopBytes = ?readBytes(msg, offset, Opt.some(AddrSize))
let headerBytes = ?readBytes(msg, offset, Opt.some(HeaderSize))
surbs[i].hop = ?Hop.deserialize(hopBytes)
surbs[i].header = ?Header.deserialize(headerBytes)
surbs[i].key = ?readBytes(msg, offset, Opt.some(k))
let msg = ?readBytes(msg, offset)
return ok((surbs, msg))

View File

@@ -0,0 +1,371 @@
import results, sequtils, stew/endians2
import ./[crypto, curve25519, serialization, tag_manager]
import ../../crypto/crypto
import ../../utils/sequninit
const PathLength* = 3 # Path length (L)
const PaddingLength = (((t + 1) * (r - PathLength)) + 1) * k
type ProcessingStatus* = enum
Exit
Intermediate
Reply
Duplicate
InvalidMAC
proc computeAlpha(
publicKeys: openArray[FieldElement]
): Result[(seq[byte], seq[seq[byte]]), string] =
## Compute alpha, an ephemeral public value. Each mix node uses its private key and
## alpha to derive a shared session key for that hop.
## This session key is used to decrypt and process one layer of the packet.
if publicKeys.len == 0:
return err("No public keys provided")
var
s: seq[seq[byte]] = newSeq[seq[byte]](publicKeys.len)
alpha_0: seq[byte]
alpha: FieldElement
secret: FieldElement
blinders: seq[FieldElement] = @[]
let x = generateRandomFieldElement().valueOr:
return err("Generate field element error: " & error)
blinders.add(x)
for i in 0 ..< publicKeys.len:
if publicKeys[i].len != FieldElementSize:
return err("Invalid public key size: " & $i)
# Compute alpha, shared secret, and blinder
if i == 0:
alpha = multiplyBasePointWithScalars([blinders[i]]).valueOr:
return err("Multiply base point with scalars error: " & error)
alpha_0 = fieldElementToBytes(alpha)
else:
alpha = multiplyPointWithScalars(alpha, [blinders[i]])
# TODO: Optimize point multiplication by multiplying scalars first
secret = multiplyPointWithScalars(publicKeys[i], blinders)
let blinder = bytesToFieldElement(
sha256_hash(fieldElementToBytes(alpha) & fieldElementToBytes(secret))
).valueOr:
return err("Error in bytes to field element conversion: " & error)
blinders.add(blinder)
s[i] = fieldElementToBytes(secret)
return ok((alpha_0, s))
proc deriveKeyMaterial(keyName: string, s: seq[byte]): seq[byte] =
@(keyName.toOpenArrayByte(0, keyName.high)) & s
proc computeFillerStrings(s: seq[seq[byte]]): Result[seq[byte], string] =
var filler: seq[byte] = @[] # Start with an empty filler string
for i in 1 ..< s.len:
# Derive AES key and IV
let
aes_key = deriveKeyMaterial("aes_key", s[i - 1]).kdf()
iv = deriveKeyMaterial("iv", s[i - 1]).kdf()
# Compute filler string
let
fillerLength = (t + 1) * k
zeroPadding = newSeq[byte](fillerLength)
filler = aes_ctr_start_index(
aes_key, iv, filler & zeroPadding, (((t + 1) * (r - i)) + t + 2) * k
)
return ok(filler)
proc computeBetaGamma(
s: seq[seq[byte]],
hop: openArray[Hop],
delay: openArray[seq[byte]],
destHop: Hop,
id: SURBIdentifier,
): Result[tuple[beta: seq[byte], gamma: seq[byte]], string] =
## Calculates the following elements:
## - Beta: The nested encrypted routing information. It encodes the next hop address, the forwarding delay, integrity check Gamma for the next hop, and the Beta for subsequent hops.
## - Gamma: A message authentication code computed over Beta using the session key derived from Alpha. It ensures header integrity at each hop.
let sLen = s.len
var
beta: seq[byte]
gamma: seq[byte]
# Compute filler strings
let filler = computeFillerStrings(s).valueOr:
return err("Error in filler generation: " & error)
for i in countdown(sLen - 1, 0):
# Derive AES key, MAC key, and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", s[i]).kdf()
mac_key = deriveKeyMaterial("mac_key", s[i]).kdf()
beta_iv = deriveKeyMaterial("iv", s[i]).kdf()
# Compute Beta and Gamma
if i == sLen - 1:
let destBytes = destHop.serialize()
let destPadding = destBytes & delay[i] & @id & newSeq[byte](PaddingLength)
let aes = aes_ctr(beta_aes_key, beta_iv, destPadding)
beta = aes & filler
else:
let routingInfo = RoutingInfo.init(
hop[i + 1], delay[i], gamma, beta[0 .. (((r * (t + 1)) - t) * k) - 1]
)
let serializedRoutingInfo = routingInfo.serialize()
beta = aes_ctr(beta_aes_key, beta_iv, serializedRoutingInfo)
gamma = hmac(mac_key, beta).toSeq()
return ok((beta: beta, gamma: gamma))
proc computeDelta(s: seq[seq[byte]], msg: Message): Result[seq[byte], string] =
let sLen = s.len
var delta: seq[byte]
for i in countdown(sLen - 1, 0):
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", s[i]).kdf()
delta_iv = deriveKeyMaterial("delta_iv", s[i]).kdf()
# Compute Delta
if i == sLen - 1:
let serializedMsg = msg.serialize()
delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
else:
delta = aes_ctr(delta_aes_key, delta_iv, delta)
return ok(delta)
proc createSURB*(
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hops: openArray[Hop],
id: SURBIdentifier,
rng: ref HmacDrbgContext = newRng(),
): Result[SURB, string] =
if id == default(SURBIdentifier):
return err("id should be initialized")
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(s, hops, delay, Hop(), id).valueOr:
return err("Error in beta and gamma generation: " & error)
# Generate key
var key = newSeqUninit[byte](k)
rng[].generate(key)
return ok(
SURB(
hop: hops[0],
header: Header.init(alpha_0, beta_0, gamma_0),
secret: Opt.some(s),
key: key,
)
)
proc useSURB*(surb: SURB, msg: Message): SphinxPacket =
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", surb.key).kdf()
delta_iv = deriveKeyMaterial("delta_iv", surb.key).kdf()
# Compute Delta
let serializedMsg = msg.serialize()
let delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
return SphinxPacket.init(surb.header, delta)
proc processReply*(
key: seq[byte], s: seq[seq[byte]], delta_prime: seq[byte]
): Result[seq[byte], string] =
var delta = delta_prime[0 ..^ 1]
var key_prime = key
for i in 0 .. s.len:
if i != 0:
key_prime = s[i - 1]
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", key_prime).kdf()
delta_iv = deriveKeyMaterial("delta_iv", key_prime).kdf()
delta = aes_ctr(delta_aes_key, delta_iv, delta)
let deserializeMsg = Message.deserialize(delta).valueOr:
return err("Message deserialization error: " & error)
return ok(deserializeMsg)
proc wrapInSphinxPacket*(
msg: Message,
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hop: openArray[Hop],
destHop: Hop,
): Result[seq[byte], string] =
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(
s, hop, delay, destHop, default(SURBIdentifier)
).valueOr:
return err("Error in beta and gamma generation: " & error)
# Compute delta
let delta_0 = computeDelta(s, msg).valueOr:
return err("Error in delta generation: " & error)
# Serialize sphinx packet
let sphinxPacket = SphinxPacket.init(Header.init(alpha_0, beta_0, gamma_0), delta_0)
let serialized = sphinxPacket.serialize()
return ok(serialized)
type ProcessedSphinxPacket* = object
case status*: ProcessingStatus
of ProcessingStatus.Exit:
destination*: Hop
messageChunk*: seq[byte]
of ProcessingStatus.Intermediate:
nextHop*: Hop
delayMs*: int
serializedSphinxPacket*: seq[byte]
of ProcessingStatus.Reply:
id*: SURBIdentifier
delta_prime*: seq[byte]
else:
discard
proc isZeros(data: seq[byte], startIdx: int, endIdx: int): bool =
doAssert 0 <= startIdx and endIdx < data.len and startIdx <= endIdx
for i in startIdx .. endIdx:
if data[i] != 0:
return false
return true
template extractSurbId(data: seq[byte]): SURBIdentifier =
const startIndex = t * k
const endIndex = startIndex + SurbIdLen - 1
doAssert data.len > startIndex and endIndex < data.len
var id: SURBIdentifier
copyMem(addr id[0], addr data[startIndex], SurbIdLen)
id
proc processSphinxPacket*(
sphinxPacket: SphinxPacket, privateKey: FieldElement, tm: var TagManager
): Result[ProcessedSphinxPacket, string] =
let
(header, payload) = sphinxPacket.get()
(alpha, beta, gamma) = header.get()
# Compute shared secret
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let
s = multiplyPointWithScalars(alphaFE, [privateKey])
sBytes = fieldElementToBytes(s)
# Check if the tag has been seen
if isTagSeen(tm, s):
return ok(ProcessedSphinxPacket(status: Duplicate))
# Compute MAC
let mac_key = deriveKeyMaterial("mac_key", sBytes).kdf()
if not (hmac(mac_key, beta).toSeq() == gamma):
# If MAC not verified
return ok(ProcessedSphinxPacket(status: InvalidMAC))
# Store the tag as seen
addTag(tm, s)
# Derive AES key and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", sBytes).kdf()
beta_iv = deriveKeyMaterial("iv", sBytes).kdf()
delta_aes_key = deriveKeyMaterial("delta_aes_key", sBytes).kdf()
delta_iv = deriveKeyMaterial("delta_iv", sBytes).kdf()
# Compute delta
let delta_prime = aes_ctr(delta_aes_key, delta_iv, payload)
# Compute B
let zeroPadding = newSeq[byte]((t + 1) * k)
let B = aes_ctr(beta_aes_key, beta_iv, beta & zeroPadding)
# Check if B has the required prefix for the original message
if B.isZeros((t + 1) * k, ((t + 1) * k) + PaddingLength - 1):
let hop = Hop.deserialize(B[0 .. AddrSize - 1]).valueOr:
return err(error)
if B.isZeros(AddrSize, ((t + 1) * k) - 1):
if delta_prime.isZeros(0, k - 1):
let msg = Message.deserialize(delta_prime).valueOr:
return err("Message deserialization error: " & error)
return ok(
ProcessedSphinxPacket(
status: Exit, destination: hop, messageChunk: msg[0 .. MessageSize - 1]
)
)
else:
return err("delta_prime should be all zeros")
elif B.isZeros(0, (t * k) - 1):
return ok(
ProcessedSphinxPacket(
status: Reply, id: B.extractSurbId(), delta_prime: delta_prime
)
)
else:
# Extract routing information from B
let routingInfo = RoutingInfo.deserialize(B).valueOr:
return err("Routing info deserialization error: " & error)
let (address, delay, gamma_prime, beta_prime) = routingInfo.getRoutingInfo()
# Compute alpha
let blinder = bytesToFieldElement(sha256_hash(alpha & sBytes)).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alpha_prime = multiplyPointWithScalars(alphaFE, [blinder])
# Serialize sphinx packet
let sphinxPkt = SphinxPacket.init(
Header.init(fieldElementToBytes(alpha_prime), beta_prime, gamma_prime),
delta_prime,
)
return ok(
ProcessedSphinxPacket(
status: Intermediate,
nextHop: address,
delayMs: uint16.fromBytes(delay).int,
serializedSphinxPacket: sphinxPkt.serialize(),
)
)

View File

@@ -0,0 +1,28 @@
import tables, locks
import ./curve25519
type TagManager* = ref object
lock: Lock
seenTags: Table[FieldElement, bool]
proc new*(T: typedesc[TagManager]): T =
let tm = T()
tm.seenTags = initTable[FieldElement, bool]()
initLock(tm.lock)
return tm
proc addTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags[tag] = true
proc isTagSeen*(tm: TagManager, tag: FieldElement): bool {.gcsafe.} =
withLock tm.lock:
return tm.seenTags.contains(tag)
proc removeTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.del(tag)
proc clearTags*(tm: TagManager) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.clear()

View File

@@ -932,11 +932,12 @@ method publish*(
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish:
if isLargeMessage(msg, msgId):
if not pubParams.skipIDontWant and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
when defined(libp2p_gossipsub_1_4):
g.sendPreamble(msg, msgId, peers)
if not pubParams.skipPreamble:
g.sendPreamble(msg, msgId, peers)
g.broadcast(
peers,

View File

@@ -146,9 +146,24 @@ type
## This callback can be used to reject topic we're not interested in
PublishParams* = object
## Used to indicate whether a message will be broadcasted using a custom connection
## defined when instantiating Pubsub, or if it will use the normal connection
useCustomConn*: bool
## Can be used to avoid having the node reply to IWANT messages when initially
## broadcasting a message, it's only after relaying its own message that the
## node will reply to IWANTs
skipMCache*: bool
## Determines whether an IDontWant message will be sent for the current message
## when it is published if it's a large message.
skipIDontWant*: bool
when defined(libp2p_gossipsub_1_4):
## Determines whether a Preamble message will be sent for the current message
## when it is published if it's a large message
skipPreamble*: bool
PubSub* {.public.} = ref object of LPProtocol
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info

View File

@@ -92,11 +92,17 @@ type
# have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
QueuedMessage = object
# Messages sent as lower-priority are queued and sent in other location
# in which we need to keep arguments like `useCustomConn`.
data: seq[byte]
useCustomConn: bool
RpcMessageQueue* = ref object
# Tracks async tasks for sending high-priority peer-published messages.
sendPriorityQueue: Deque[Future[void]]
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
nonPriorityQueue: AsyncQueue[seq[byte]]
nonPriorityQueue: AsyncQueue[QueuedMessage]
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
@@ -465,7 +471,9 @@ proc sendEncoded*(
else:
Future[void].completed()
else:
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(
QueuedMessage(data: msg, useCustomConn: useCustomConn)
)
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
f
@@ -478,19 +486,16 @@ iterator splitRPCMsg(
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
var currentRPCMsg = rpcMsg
currentRPCMsg.messages = newSeq[Message]()
var currentSize = byteSize(currentRPCMsg)
var currentRPCMsg = RPCMsg()
var currentSize = 0
for msg in rpcMsg.messages:
let msgSize = byteSize(msg)
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize):
# Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
if currentSize + msgSize > maxSize:
if msgSize > maxSize:
warn "message too big to sent", peer, rpcMsg = shortLog(msg)
continue # Skip this message
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
@@ -502,11 +507,9 @@ iterator splitRPCMsg(
currentSize += msgSize
# Check if there is a non-empty currentRPCMsg left to be added
if currentSize > 0 and currentRPCMsg.messages.len > 0:
if currentRPCMsg.messages.len > 0:
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(
p: PubSubPeer,
@@ -542,8 +545,11 @@ proc send*(
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
# Messages should not exceed 90% of maxMessageSize. Guessing 10% protobuf overhead.
let maxEncodedMsgSize = (p.maxMessageSize * 90) div 100
if encoded.len > maxEncodedMsgSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, maxEncodedMsgSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
else:
# If the message size is within limits, send it as is
@@ -573,7 +579,7 @@ proc sendNonPriorityTask(p: PubSubPeer) {.async: (raises: [CancelledError]).} =
discard await race(p.rpcmessagequeue.sendPriorityQueue[^1])
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
await p.sendMsg(msg.data, msg.useCustomConn)
proc startSendNonPriorityTask(p: PubSubPeer) =
debug "starting sendNonPriorityTask", p
@@ -595,7 +601,7 @@ proc stopSendNonPriorityTask*(p: PubSubPeer) =
proc new(T: typedesc[RpcMessageQueue]): T =
return T(
sendPriorityQueue: initDeque[Future[void]](),
nonPriorityQueue: newAsyncQueue[seq[byte]](),
nonPriorityQueue: newAsyncQueue[QueuedMessage](),
)
proc new*(

View File

@@ -1,851 +1,3 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import ./rendezvous/rendezvous
{.push raises: [].}
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protocol,
../protobuf/minprotobuf,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore,
../discovery/discoverymngr
export chronicles
logScope:
topics = "libp2p discovery rendezvous"
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
const
RendezVousCodec* = "/rendezvous/1.0.0"
# Default minimum TTL per libp2p spec
MinimumDuration* = 2.hours
# Lower validation limit to accommodate Waku requirements
MinimumAcceptedDuration = 1.minutes
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer* = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
type
MessageType {.pure.} = enum
Register = 0
RegisterResponse = 1
Unregister = 2
Discover = 3
DiscoverResponse = 4
ResponseStatus = enum
Ok = 0
InvalidNamespace = 100
InvalidSignedPeerRecord = 101
InvalidTTL = 102
InvalidCookie = 103
NotAuthorized = 200
InternalError = 300
Unavailable = 400
Cookie = object
offset: uint64
ns: Opt[string]
Register = object
ns: string
signedPeerRecord: seq[byte]
ttl*: Opt[uint64] # in seconds
RegisterResponse = object
status: ResponseStatus
text: Opt[string]
ttl: Opt[uint64] # in seconds
Unregister = object
ns: string
Discover = object
ns: Opt[string]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
DiscoverResponse = object
registrations: seq[Register]
cookie: Opt[seq[byte]]
status: ResponseStatus
text: Opt[string]
Message = object
msgType: MessageType
register: Opt[Register]
registerResponse: Opt[RegisterResponse]
unregister: Opt[Unregister]
discover: Opt[Discover]
discoverResponse: Opt[DiscoverResponse]
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
r.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
rr.text.withValue(text):
result.write(2, text)
rr.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(u: Unregister): ProtoBuffer =
result = initProtoBuffer()
result.write(1, u.ns)
result.finish()
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
result.write(3, cookie)
result.finish()
proc encode(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in dr.registrations:
result.write(1, reg.encode())
dr.cookie.withValue(cookie):
result.write(2, cookie)
result.write(3, dr.status.uint)
dr.text.withValue(text):
result.write(4, text)
result.finish()
proc encode(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
msg.register.withValue(register):
result.write(2, register.encode())
msg.registerResponse.withValue(registerResponse):
result.write(3, registerResponse.encode())
msg.unregister.withValue(unregister):
result.write(4, unregister.encode())
msg.discover.withValue(discover):
result.write(5, discover.encode())
msg.discoverResponse.withValue(discoverResponse):
result.write(6, discoverResponse.encode())
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr():
return Opt.none(Register)
if r3.get(false):
r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
text: string
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd):
return Opt.none(RegisterResponse)
if r2.get(false):
rr.text = Opt.some(text)
if r3.get(false):
rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr():
return Opt.none(Unregister)
Opt.some(u)
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
cookie: seq[byte]
statusOrd: uint
text: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRepeatedField(1, registrations)
r2 = pb.getField(2, cookie)
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd):
return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false):
dr.cookie = Opt.some(cookie)
if r4.get(false):
dr.text = Opt.some(text)
Opt.some(dr)
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let pb = initProtoBuffer(buf)
?pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd):
return Opt.none(Message)
if ?pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone():
return Opt.none(Message)
if ?pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone():
return Opt.none(Message)
if ?pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone():
return Opt.none(Message)
if ?pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone():
return Opt.none(Message)
if ?pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone():
return Opt.none(Message)
Opt.some(msg)
type
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration*: Moment
peerId*: PeerId
data*: Register
RendezVous* = ref object of LPProtocol
# Registered needs to be an offsetted sequence
# because we need stable index for the cookies.
registered*: OffsettedSeq[RegisteredData]
# Namespaces is a table whose key is a salted namespace and
# the value is the index sequence corresponding to this
# namespace in the offsettedqueue.
namespaces*: Table[string, seq[int]]
rng: ref HmacDrbgContext
salt: string
expiredDT: Moment
registerDeletionLoop: Future[void]
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
# + make the heartbeat sleep duration "smarter"
sema: AsyncSemaphore
peers: seq[PeerId]
cookiesSaved*: Table[PeerId, Table[string, seq[byte]]]
switch: Switch
minDuration: Duration
maxDuration: Duration
minTTL: uint64
maxTTL: uint64
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0:
return err("Empty peer record")
let signedEnv = ?SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl))),
)
)
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(
DiscoverResponse(
status: Ok, registrations: s, cookie: Opt.some(cookie.encode().buffer)
)
),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
for data in rdv.registered:
if data.peerId == peerId:
result.inc()
proc save(
rdv: RendezVous, ns: string, peerId: PeerId, r: Register, update: bool = true
) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false:
return
rdv.registered[index].expiration = rdv.expiredDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
data: r,
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
libp2p_rendezvous_registered.inc()
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
let nsSalted = u.ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.expiredDT
libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else:
# Start from the current lowest index (inclusive)
Cookie(offset: rdv.registered.low().uint64)
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get():
# Namespace changed: start from the beginning of that namespace
cookie = Cookie(offset: rdv.registered.low().uint64)
elif cookie.offset < rdv.registered.low().uint64:
# Cookie behind available range: reset to current low
cookie.offset = rdv.registered.low().uint64
elif cookie.offset > (rdv.registered.high() + 1).uint64:
# Cookie ahead of available range: reset to one past current high (empty page)
cookie.offset = (rdv.registered.high() + 1).uint64
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var nextOffset = cookie.offset
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
break
if reg.expiration < n or index.uint64 < cookie.offset:
continue
limit.dec()
nextOffset = index.uint64 + 1
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: nextOffset, ns: d.ns))
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", description = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
nsSalted = ns & rdv.salt
n = Moment.now()
try:
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
continue
res.data
except KeyError as exc:
@[]
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.valueOr:
debug "Discover response is empty"
return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
let ttl = r.ttl.get(rdv.maxTTL + 1)
if ttl > rdv.maxTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]:
continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
rdv.registered[index].expiration = rdv.expiredDT
except KeyError:
return
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", description = exc.msg
let futs = collect(newSeq()):
for peer in peerIds:
unsubscribePeer(peer)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(
T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
if minDuration < MinimumAcceptedDuration:
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
if maxDuration > MaximumDuration:
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
if minDuration >= maxDuration:
raise newException(RendezVousError, "Minimum TTL longer than maximum")
let
minTTL = minDuration.seconds.uint64
maxTTL = maxDuration.seconds.uint64
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](),
expiredDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize),
minDuration: minDuration,
maxDuration: maxDuration,
minTTL: minTTL,
maxTTL: maxTTL,
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
case msg.msgType
of MessageType.Register:
await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister:
rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover:
await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
finally:
await conn.close()
rdv.handler = handleStream
rdv.codec = RendezVousCodec
return rdv
proc new*(
T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister*(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
total += data.len
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(
rdv: RendezVous
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return fut
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
fut
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return fut
rdv.started = false
rdv.registerDeletionLoop.cancelSoon()
rdv.registerDeletionLoop = nil
fut
export rendezvous

View File

@@ -0,0 +1,275 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import results
import stew/objects
import ../../protobuf/minprotobuf
type
MessageType* {.pure.} = enum
Register = 0
RegisterResponse = 1
Unregister = 2
Discover = 3
DiscoverResponse = 4
ResponseStatus* = enum
Ok = 0
InvalidNamespace = 100
InvalidSignedPeerRecord = 101
InvalidTTL = 102
InvalidCookie = 103
NotAuthorized = 200
InternalError = 300
Unavailable = 400
Cookie* = object
offset*: uint64
ns*: Opt[string]
Register* = object
ns*: string
signedPeerRecord*: seq[byte]
ttl*: Opt[uint64] # in seconds
RegisterResponse* = object
status*: ResponseStatus
text*: Opt[string]
ttl*: Opt[uint64] # in seconds
Unregister* = object
ns*: string
Discover* = object
ns*: Opt[string]
limit*: Opt[uint64]
cookie*: Opt[seq[byte]]
DiscoverResponse* = object
registrations*: seq[Register]
cookie*: Opt[seq[byte]]
status*: ResponseStatus
text*: Opt[string]
Message* = object
msgType*: MessageType
register*: Opt[Register]
registerResponse*: Opt[RegisterResponse]
unregister*: Opt[Unregister]
discover*: Opt[Discover]
discoverResponse*: Opt[DiscoverResponse]
proc encode*(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode*(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
r.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode*(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
rr.text.withValue(text):
result.write(2, text)
rr.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode*(u: Unregister): ProtoBuffer =
result = initProtoBuffer()
result.write(1, u.ns)
result.finish()
proc encode*(d: Discover): ProtoBuffer =
result = initProtoBuffer()
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
result.write(3, cookie)
result.finish()
proc encode*(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in dr.registrations:
result.write(1, reg.encode())
dr.cookie.withValue(cookie):
result.write(2, cookie)
result.write(3, dr.status.uint)
dr.text.withValue(text):
result.write(4, text)
result.finish()
proc encode*(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
msg.register.withValue(register):
result.write(2, register.encode())
msg.registerResponse.withValue(registerResponse):
result.write(3, registerResponse.encode())
msg.unregister.withValue(unregister):
result.write(4, unregister.encode())
msg.discover.withValue(discover):
result.write(5, discover.encode())
msg.discoverResponse.withValue(discoverResponse):
result.write(6, discoverResponse.encode())
result.finish()
proc decode*(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode*(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr():
return Opt.none(Register)
if r3.get(false):
r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode*(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
text: string
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd):
return Opt.none(RegisterResponse)
if r2.get(false):
rr.text = Opt.some(text)
if r3.get(false):
rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode*(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr():
return Opt.none(Unregister)
Opt.some(u)
proc decode*(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode*(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
cookie: seq[byte]
statusOrd: uint
text: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRepeatedField(1, registrations)
r2 = pb.getField(2, cookie)
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd):
return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false):
dr.cookie = Opt.some(cookie)
if r4.get(false):
dr.text = Opt.some(text)
Opt.some(dr)
proc decode*(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let pb = initProtoBuffer(buf)
?pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd):
return Opt.none(Message)
if ?pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone():
return Opt.none(Message)
if ?pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone():
return Opt.none(Message)
if ?pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone():
return Opt.none(Message)
if ?pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone():
return Opt.none(Message)
if ?pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone():
return Opt.none(Message)
Opt.some(msg)

View File

@@ -0,0 +1,589 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protobuf,
../protocol,
../../protobuf/minprotobuf,
../../switch,
../../routing_record,
../../utils/heartbeat,
../../stream/connection,
../../utils/offsettedseq,
../../utils/semaphore,
../../discovery/discoverymngr
export chronicles
logScope:
topics = "libp2p discovery rendezvous"
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
const
RendezVousCodec* = "/rendezvous/1.0.0"
# Default minimum TTL per libp2p spec
MinimumDuration* = 2.hours
# Lower validation limit to accommodate Waku requirements
MinimumAcceptedDuration = 1.minutes
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer* = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
type
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration*: Moment
peerId*: PeerId
data*: Register
RendezVous* = ref object of LPProtocol
# Registered needs to be an offsetted sequence
# because we need stable index for the cookies.
registered*: OffsettedSeq[RegisteredData]
# Namespaces is a table whose key is a salted namespace and
# the value is the index sequence corresponding to this
# namespace in the offsettedqueue.
namespaces*: Table[string, seq[int]]
rng: ref HmacDrbgContext
salt: string
expiredDT: Moment
registerDeletionLoop: Future[void]
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
# + make the heartbeat sleep duration "smarter"
sema: AsyncSemaphore
peers: seq[PeerId]
cookiesSaved*: Table[PeerId, Table[string, seq[byte]]]
switch*: Switch
minDuration: Duration
maxDuration: Duration
minTTL: uint64
maxTTL: uint64
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0:
return err("Empty peer record")
let signedEnv = ?SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl))),
)
)
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(
DiscoverResponse(
status: Ok, registrations: s, cookie: Opt.some(cookie.encode().buffer)
)
),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
for data in rdv.registered:
if data.peerId == peerId:
result.inc()
proc save(
rdv: RendezVous, ns: string, peerId: PeerId, r: Register, update: bool = true
) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false:
return
rdv.registered[index].expiration = rdv.expiredDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
data: r,
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
libp2p_rendezvous_registered.inc()
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
let nsSalted = u.ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.expiredDT
libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else:
# Start from the current lowest index (inclusive)
Cookie(offset: rdv.registered.low().uint64)
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get():
# Namespace changed: start from the beginning of that namespace
cookie = Cookie(offset: rdv.registered.low().uint64)
elif cookie.offset < rdv.registered.low().uint64:
# Cookie behind available range: reset to current low
cookie.offset = rdv.registered.low().uint64
elif cookie.offset > (rdv.registered.high() + 1).uint64:
# Cookie ahead of available range: reset to one past current high (empty page)
cookie.offset = (rdv.registered.high() + 1).uint64
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var nextOffset = cookie.offset
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
break
if reg.expiration < n or index.uint64 < cookie.offset:
continue
limit.dec()
nextOffset = index.uint64 + 1
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: nextOffset, ns: d.ns))
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", description = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
nsSalted = ns & rdv.salt
n = Moment.now()
try:
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
continue
res.data
except KeyError as exc:
@[]
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.valueOr:
debug "Discover response is empty"
return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
let ttl = r.ttl.get(rdv.maxTTL + 1)
if ttl > rdv.maxTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]:
continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
rdv.registered[index].expiration = rdv.expiredDT
except KeyError:
return
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", description = exc.msg
let futs = collect(newSeq()):
for peer in peerIds:
unsubscribePeer(peer)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(
T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
if minDuration < MinimumAcceptedDuration:
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
if maxDuration > MaximumDuration:
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
if minDuration >= maxDuration:
raise newException(RendezVousError, "Minimum TTL longer than maximum")
let
minTTL = minDuration.seconds.uint64
maxTTL = maxDuration.seconds.uint64
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](),
expiredDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize),
minDuration: minDuration,
maxDuration: maxDuration,
minTTL: minTTL,
maxTTL: maxTTL,
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
case msg.msgType
of MessageType.Register:
await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister:
rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover:
await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
finally:
await conn.close()
rdv.handler = handleStream
rdv.codec = RendezVousCodec
return rdv
proc new*(
T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister*(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
total += data.len
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(
rdv: RendezVous
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return fut
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
fut
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return fut
rdv.started = false
rdv.registerDeletionLoop.cancelSoon()
rdv.registerDeletionLoop = nil
fut

View File

@@ -50,14 +50,6 @@ proc new(
method getWrapped*(self: QuicStream): P2PConnection =
self
template mapExceptions(body: untyped) =
try:
body
except QuicError:
raise newLPStreamEOFError()
except CatchableError:
raise newLPStreamEOFError()
method readOnce*(
stream: QuicStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
@@ -83,8 +75,16 @@ method readOnce*(
method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
try:
await stream.stream.write(bytes)
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
except QuicError:
raise newLPStreamEOFError()
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
(ref LPStreamError)(msg: "error in quic stream write: " & exc.msg, parent: exc)
{.pop.}
@@ -158,6 +158,8 @@ method newStream*(
.} =
try:
return await m.quicSession.getStream(Direction.Out)
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
@@ -195,7 +197,7 @@ type CertGenerator =
type QuicTransport* = ref object of Transport
listener: Listener
client: QuicClient
client: Opt[QuicClient]
privateKey: PrivateKey
connections: seq[P2PConnection]
rng: ref HmacDrbgContext
@@ -248,27 +250,33 @@ method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises:
return false
QUIC_V1.match(address)
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
doAssert self.listener.isNil, "start() already called"
#TODO handle multiple addr
proc makeConfig(self: QuicTransport): TLSConfig =
let pubkey = self.privateKey.getPublicKey().valueOr:
doAssert false, "could not obtain public key"
return
try:
if self.rng.isNil:
self.rng = newRng()
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
return tlsConfig
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
self.client = QuicClient.init(tlsConfig, rng = self.rng)
self.listener =
QuicServer.init(tlsConfig, rng = self.rng).listen(initTAddress(addrs[0]).tryGet)
proc getRng(self: QuicTransport): ref HmacDrbgContext =
if self.rng.isNil:
self.rng = newRng()
return self.rng
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
doAssert self.listener.isNil, "start() already called"
# TODO(#1663): handle multiple addr
try:
self.listener = QuicServer.init(self.makeConfig(), rng = self.getRng()).listen(
initTAddress(addrs[0]).tryGet
)
await procCall Transport(self).start(addrs)
self.addrs[0] =
MultiAddress.init(self.listener.localAddress(), IPPROTO_UDP).tryGet() &
@@ -289,19 +297,21 @@ method start*(
self.running = true
method stop*(transport: QuicTransport) {.async: (raises: []).} =
if transport.running:
let conns = transport.connections[0 .. ^1]
for c in conns:
await c.close()
await procCall Transport(transport).stop()
let conns = transport.connections[0 .. ^1]
for c in conns:
await c.close()
if not transport.listener.isNil:
try:
await transport.listener.stop()
except CatchableError as exc:
trace "Error shutting down Quic transport", description = exc.msg
transport.listener.destroy()
transport.running = false
transport.listener = nil
transport.client = Opt.none(QuicClient)
await procCall Transport(transport).stop()
proc wrapConnection(
transport: QuicTransport, connection: QuicConnection
): QuicSession {.raises: [TransportOsError, MaError].} =
@@ -365,7 +375,11 @@ method dial*(
async: (raises: [transport.TransportError, CancelledError])
.} =
try:
let quicConnection = await self.client.dial(initTAddress(address).tryGet)
if not self.client.isSome:
self.client = Opt.some(QuicClient.init(self.makeConfig(), rng = self.getRng()))
let client = self.client.get()
let quicConnection = await client.dial(initTAddress(address).tryGet)
return self.wrapConnection(quicConnection)
except CancelledError as e:
raise e

View File

@@ -150,57 +150,6 @@ proc createStreamServer*[T](
except CatchableError as exc:
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
## protocol information.
##
## Returns ``asyncInvalidSocket`` on error.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
socktype: SockType = SockType.SOCK_STREAM
protocol: Protocol = Protocol.IPPROTO_TCP
let address = initTAddress(ma).tryGet()
if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}:
if ma[1].tryGet().protoCode().tryGet() == multiCodec("udp"):
socktype = SockType.SOCK_DGRAM
protocol = Protocol.IPPROTO_UDP
elif ma[1].tryGet().protoCode().tryGet() == multiCodec("tcp"):
socktype = SockType.SOCK_STREAM
protocol = Protocol.IPPROTO_TCP
elif address.family in {AddressFamily.Unix}:
socktype = SockType.SOCK_STREAM
protocol = cast[Protocol](0)
else:
return asyncInvalidSocket
try:
createAsyncSocket(address.getDomain(), socktype, protocol)
except CatchableError as exc:
raise newException(
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
saddr: Sockaddr_storage
slen: SockLen
let address = initTAddress(ma).tryGet()
toSAddr(address, saddr, slen)
if bindSocket(SocketHandle(sock), cast[ptr SockAddr](addr saddr), slen) == 0:
result = true
else:
result = false
proc getLocalAddress*(sock: AsyncFD): TransportAddress =
## Retrieve local socket ``sock`` address.
##

View File

@@ -1,123 +0,0 @@
#!/bin/bash
# Copyright (c) 2018-2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
set -e
force=false
verbose=false
CACHE_DIR=""
LIBP2P_COMMIT="124530a3"
while [[ "$#" -gt 0 ]]; do
case "$1" in
-f|--force) force=true ;;
-v|--verbose) verbose=true ;;
-h|--help)
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
exit 0
;;
*)
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
if [[ -z "$CACHE_DIR" ]]; then
CACHE_DIR="$1"
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
LIBP2P_COMMIT="$1"
else
echo "Unknown argument: $1"
exit 1
fi
;;
esac
shift
done
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
if [[ ! -e "$SUBREPO_DIR" ]]; then
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q "$LIBP2P_COMMIT"
cd ..
fi
## env vars
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
TARGET_DIR="$(go env GOPATH)/bin"
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
target_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
}
build_target() {
echo -e "$BUILD_MSG"
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
if $verbose; then
echo "retry #${CURR}"
fi
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
popd
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully: $TARGET_BINARY"
}
if $force || target_needs_rebuilding; then
build_target
else
echo "No rebuild needed."
fi

View File

@@ -1,665 +0,0 @@
import chronos, chronicles, stew/byteutils
import helpers
import ../libp2p
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
type
SwitchCreator = proc(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.gcsafe, raises: [LPError].}
DaemonPeerInfo = daemonapi.PeerInfo
proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.} =
## write lenght prefixed
var buf = initVBuffer()
buf.writeSeq(msg)
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint
length: int
res: VarintResult[void]
result = newSeq[byte](10)
for i in 0 ..< len(result):
await s.readExactly(addr result[i], 1)
res = LP.getUVarint(result.toOpenArray(0, i), length, size)
if res.isOk():
break
res.expect("Valid varint")
result.setLen(size)
if size > 0.uint:
await s.readExactly(addr result[0], int(size))
proc testPubSubDaemonPublish(
gossip: bool = false, count: int = 1, swCreator: SwitchCreator
) {.async.} =
var pubsubData = "TEST MESSAGE"
var testTopic = "test-topic"
var msgData = pubsubData.toBytes()
var flags = {PSFloodSub}
if gossip:
flags = {PSGossipSub}
let daemonNode = await newDaemonApi(flags)
let daemonPeer = await daemonNode.identity()
let nativeNode = swCreator()
let pubsub =
if gossip:
GossipSub.init(switch = nativeNode).PubSub
else:
FloodSub.init(switch = nativeNode).PubSub
nativeNode.mount(pubsub)
await nativeNode.start()
await pubsub.start()
let nativePeer = nativeNode.peerInfo
var finished = false
var times = 0
proc nativeHandler(topic: string, data: seq[byte]) {.async.} =
let smsg = string.fromBytes(data)
check smsg == pubsubData
times.inc()
if times >= count and not finished:
finished = true
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
await sleepAsync(500.millis)
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
proc pubsubHandler(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
result = true # don't cancel subscription
asyncDiscard daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
pubsub.subscribe(testTopic, nativeHandler)
await sleepAsync(3.seconds)
proc publisher() {.async.} =
while not finished:
await daemonNode.pubsubPublish(testTopic, msgData)
await sleepAsync(250.millis)
await wait(publisher(), 5.minutes) # should be plenty of time
await nativeNode.stop()
await pubsub.stop()
await daemonNode.close()
proc testPubSubNodePublish(
gossip: bool = false, count: int = 1, swCreator: SwitchCreator
) {.async.} =
var pubsubData = "TEST MESSAGE"
var testTopic = "test-topic"
var msgData = pubsubData.toBytes()
var flags = {PSFloodSub}
if gossip:
flags = {PSGossipSub}
let daemonNode = await newDaemonApi(flags)
let daemonPeer = await daemonNode.identity()
let nativeNode = swCreator()
let pubsub =
if gossip:
GossipSub.init(switch = nativeNode).PubSub
else:
FloodSub.init(switch = nativeNode).PubSub
nativeNode.mount(pubsub)
await nativeNode.start()
await pubsub.start()
let nativePeer = nativeNode.peerInfo
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
await sleepAsync(500.millis)
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var times = 0
var finished = false
proc pubsubHandler(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = string.fromBytes(message.data)
check smsg == pubsubData
times.inc()
if times >= count and not finished:
finished = true
result = true # don't cancel subscription
discard await daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
proc nativeHandler(topic: string, data: seq[byte]) {.async.} =
discard
pubsub.subscribe(testTopic, nativeHandler)
await sleepAsync(3.seconds)
proc publisher() {.async.} =
while not finished:
discard await pubsub.publish(testTopic, msgData)
await sleepAsync(250.millis)
await wait(publisher(), 5.minutes) # should be plenty of time
check finished
await nativeNode.stop()
await pubsub.stop()
await daemonNode.close()
proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
suite "Interop using " & name:
# TODO: chronos transports are leaking,
# but those are tracked for both the daemon
# and libp2p, so not sure which one it is,
# need to investigate more
# teardown:
# checkTrackers()
# TODO: this test is failing sometimes on windows
# For some reason we receive EOF before test 4 sometimes
asyncTest "native -> daemon multiple reads and writes":
var protos = @["/test-stream"]
let nativeNode = swCreator()
await nativeNode.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[void]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
check string.fromBytes(await stream.transp.readLp()) == "test 1"
discard await stream.transp.writeLp("test 2")
check string.fromBytes(await stream.transp.readLp()) == "test 3"
discard await stream.transp.writeLp("test 4")
testFuture.complete()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp("test 1")
check "test 2" == string.fromBytes((await conn.readLp(1024)))
await conn.writeLp("test 3")
check "test 4" == string.fromBytes((await conn.readLp(1024)))
await wait(testFuture, 10.secs)
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "native -> daemon connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
# We are preparing expect string, which should be prefixed with varint
# length and do not have `\r\n` suffix, because we going to use
# readLine().
var buffer = initVBuffer()
buffer.writeSeq(test & "\r\n")
buffer.finish()
var expect = newString(len(buffer) - 2)
copyMem(addr expect[0], addr buffer.buffer[0], len(expect))
let nativeNode = swCreator()
await nativeNode.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[string]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
# We should perform `readLp()` instead of `readLine()`. `readLine()`
# here reads actually length prefixed string.
var line = await stream.transp.readLine()
check line == expect
testFuture.complete(line)
await stream.close()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp(test & "\r\n")
check expect == (await wait(testFuture, 10.secs))
await conn.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "daemon -> native connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
testFuture.complete(line)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
discard await stream.transp.writeLp(test)
check test == (await wait(testFuture, 10.secs))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "native -> daemon websocket connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
testFuture.complete(line)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
let nativeNode = swCreator(
ma = wsAddress,
prov = proc(config: TransportConfig): Transport =
WsTransport.new(config.upgr),
)
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi(hostAddresses = @[wsAddress])
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
discard await stream.transp.writeLp(test)
check test == (await wait(testFuture, 10.secs))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "daemon -> native websocket connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
# We are preparing expect string, which should be prefixed with varint
# length and do not have `\r\n` suffix, because we going to use
# readLine().
var buffer = initVBuffer()
buffer.writeSeq(test & "\r\n")
buffer.finish()
var expect = newString(len(buffer) - 2)
copyMem(addr expect[0], addr buffer.buffer[0], len(expect))
let wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
let nativeNode = SwitchBuilder
.new()
.withAddress(wsAddress)
.withRng(crypto.newRng())
.withMplex()
.withWsTransport()
.withNoise()
.build()
await nativeNode.start()
let daemonNode = await newDaemonApi(hostAddresses = @[wsAddress])
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[string]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
# We should perform `readLp()` instead of `readLine()`. `readLine()`
# here reads actually length prefixed string.
var line = await stream.transp.readLine()
check line == expect
testFuture.complete(line)
await stream.close()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp(test & "\r\n")
check expect == (await wait(testFuture, 10.secs))
await conn.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "daemon -> multiple reads and writes":
var protos = @["/test-stream"]
var testFuture = newFuture[void]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "test 1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test 2".toBytes())
check "test 3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test 4".toBytes())
testFuture.complete()
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
asyncDiscard stream.transp.writeLp("test 1")
check "test 2" == string.fromBytes(await stream.transp.readLp())
asyncDiscard stream.transp.writeLp("test 3")
check "test 4" == string.fromBytes(await stream.transp.readLp())
await wait(testFuture, 10.secs)
await stream.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "read write multiple":
var protos = @["/test-stream"]
var test = "TEST STRING"
var count = 0
var testFuture = newFuture[int]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while count < 10:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
await conn.writeLp(test.toBytes())
count.inc()
testFuture.complete(count)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
var count2 = 0
while count2 < 10:
discard await stream.transp.writeLp(test)
let line = await stream.transp.readLp()
check test == string.fromBytes(line)
inc(count2)
check 10 == (await wait(testFuture, 1.minutes))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "floodsub: daemon publish one":
await testPubSubDaemonPublish(swCreator = swCreator)
asyncTest "floodsub: daemon publish many":
await testPubSubDaemonPublish(count = 10, swCreator = swCreator)
asyncTest "gossipsub: daemon publish one":
await testPubSubDaemonPublish(gossip = true, swCreator = swCreator)
asyncTest "gossipsub: daemon publish many":
await testPubSubDaemonPublish(gossip = true, count = 10, swCreator = swCreator)
asyncTest "floodsub: node publish one":
await testPubSubNodePublish(swCreator = swCreator)
asyncTest "floodsub: node publish many":
await testPubSubNodePublish(count = 10, swCreator = swCreator)
asyncTest "gossipsub: node publish one":
await testPubSubNodePublish(gossip = true, swCreator = swCreator)
asyncTest "gossipsub: node publish many":
await testPubSubNodePublish(gossip = true, count = 10, swCreator = swCreator)
proc relayInteropTests*(name: string, relayCreator: SwitchCreator) =
suite "Interop relay using " & name:
asyncTest "NativeSrc -> NativeRelay -> DaemonDst":
let closeBlocker = newFuture[void]()
let daemonFinished = newFuture[void]()
# TODO: This Future blocks the daemonHandler after sending the last message.
# It exists because there's a strange behavior where stream.close sends
# a Rst instead of Fin. We should investigate this at some point.
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
check "line1" == string.fromBytes(await stream.transp.readLp())
discard await stream.transp.writeLp("line2")
check "line3" == string.fromBytes(await stream.transp.readLp())
discard await stream.transp.writeLp("line4")
await closeBlocker
await stream.close()
daemonFinished.complete()
let
maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
src = relayCreator(maSrc, relay = RelayClient.new(circuitRelayV1 = true))
rel = relayCreator(maRel)
await src.start()
await rel.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
let maStr =
$rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel.connect(daemonPeer.peer, daemonPeer.addresses)
await daemonNode.addHandler(@["/testCustom"], daemonHandler)
let conn = await src.dial(daemonPeer.peer, @[maddr], @["/testCustom"])
await conn.writeLp("line1")
check string.fromBytes(await conn.readLp(1024)) == "line2"
await conn.writeLp("line3")
check string.fromBytes(await conn.readLp(1024)) == "line4"
closeBlocker.complete()
await daemonFinished
await conn.close()
await allFutures(src.stop(), rel.stop())
try:
await daemonNode.close()
except CatchableError as e:
when defined(windows):
# On Windows, daemon close may fail due to socket race condition
# This is expected behavior and can be safely ignored
discard
else:
raise e
asyncTest "DaemonSrc -> NativeRelay -> NativeDst":
proc customHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "line1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line2")
check "line3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line4")
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
let protos = @["/customProto", RelayV1Codec]
var customProto = new LPProtocol
customProto.handler = customHandler
customProto.codec = protos[0]
let
maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
rel = relayCreator(maRel)
dst = relayCreator(maDst, relay = RelayClient.new())
dst.mount(customProto)
await rel.start()
await dst.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
let maStr =
$rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await daemonNode.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
await daemonNode.connect(dst.peerInfo.peerId, @[maddr])
var stream = await daemonNode.openStream(dst.peerInfo.peerId, protos)
discard await stream.transp.writeLp("line1")
check string.fromBytes(await stream.transp.readLp()) == "line2"
discard await stream.transp.writeLp("line3")
check string.fromBytes(await stream.transp.readLp()) == "line4"
await allFutures(dst.stop(), rel.stop())
await daemonNode.close()
asyncTest "NativeSrc -> DaemonRelay -> NativeDst":
proc customHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "line1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line2")
check "line3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line4")
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
let protos = @["/customProto", RelayV1Codec]
var customProto = new LPProtocol
customProto.handler = customHandler
customProto.codec = protos[0]
let
maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
src = relayCreator(maSrc, relay = RelayClient.new())
dst = relayCreator(maDst, relay = RelayClient.new())
dst.mount(customProto)
await src.start()
await dst.start()
let daemonNode = await newDaemonApi({RelayHop})
let daemonPeer = await daemonNode.identity()
let maStr = $daemonPeer.addresses[0] & "/p2p/" & $daemonPeer.peer & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await src.connect(daemonPeer.peer, daemonPeer.addresses)
await daemonNode.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
let conn = await src.dial(dst.peerInfo.peerId, @[maddr], protos[0])
await conn.writeLp("line1")
check string.fromBytes(await conn.readLp(1024)) == "line2"
await conn.writeLp("line3")
check string.fromBytes(await conn.readLp(1024)) == "line4"
await allFutures(src.stop(), dst.stop())
await daemonNode.close()

View File

@@ -1,3 +1,5 @@
{.used.}
import testdiscoverymngr, testrendezvous, testrendezvousinterface
import
testdiscoverymngr, testrendezvous, testrendezvouserrors, testrendezvousprotobuf,
testrendezvousinterface

View File

@@ -9,9 +9,17 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import sequtils, strutils
import sequtils
import chronos
import ../../libp2p/[protocols/rendezvous, switch]
import
../../libp2p/[
protocols/rendezvous,
protocols/rendezvous/protobuf,
peerinfo,
switch,
routing_record,
crypto/crypto,
]
import ../../libp2p/discovery/discoverymngr
import ../../libp2p/utils/offsettedseq
import ../helpers
@@ -22,118 +30,143 @@ suite "RendezVous":
checkTrackers()
asyncTest "Request locally returns 0 for empty namespace":
let (nodes, rdvs) = setupNodes(1)
let nodes = setupNodes(1)
nodes.startAndDeferStop()
const namespace = ""
check rdvs[0].requestLocally(namespace).len == 0
check nodes[0].requestLocally(namespace).len == 0
asyncTest "Request locally returns registered peers":
let (nodes, rdvs) = setupNodes(1)
let nodes = setupNodes(1)
nodes.startAndDeferStop()
const namespace = "foo"
await rdvs[0].advertise(namespace)
let peerRecords = rdvs[0].requestLocally(namespace)
await nodes[0].advertise(namespace)
let peerRecords = nodes[0].requestLocally(namespace)
check:
peerRecords.len == 1
peerRecords[0] == nodes[0].peerInfo.signedPeerRecord.data
peerRecords[0] == nodes[0].switch.peerInfo.signedPeerRecord.data
asyncTest "Unsubscribe Locally removes registered peer":
let (nodes, rdvs) = setupNodes(1)
let nodes = setupNodes(1)
nodes.startAndDeferStop()
const namespace = "foo"
await rdvs[0].advertise(namespace)
check rdvs[0].requestLocally(namespace).len == 1
await nodes[0].advertise(namespace)
check nodes[0].requestLocally(namespace).len == 1
rdvs[0].unsubscribeLocally(namespace)
check rdvs[0].requestLocally(namespace).len == 0
nodes[0].unsubscribeLocally(namespace)
check nodes[0].requestLocally(namespace).len == 0
asyncTest "Request returns 0 for empty namespace from remote":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
const namespace = "empty"
check (await peerRdvs[0].request(Opt.some(namespace))).len == 0
check (await peerNodes[0].request(Opt.some(namespace))).len == 0
asyncTest "Request returns registered peers from remote":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
const namespace = "foo"
await peerRdvs[0].advertise(namespace)
let peerRecords = await peerRdvs[0].request(Opt.some(namespace))
await peerNodes[0].advertise(namespace)
let peerRecords = await peerNodes[0].request(Opt.some(namespace))
check:
peerRecords.len == 1
peerRecords[0] == peerNodes[0].peerInfo.signedPeerRecord.data
peerRecords[0] == peerNodes[0].switch.peerInfo.signedPeerRecord.data
asyncTest "Peer is not registered when peer record validation fails":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
peerNodes[0].switch.peerInfo.signedPeerRecord =
createCorruptedSignedPeerRecord(peerNodes[0].switch.peerInfo.peerId)
const namespace = "foo"
await peerNodes[0].advertise(namespace)
check rendezvousNode.registered.s.len == 0
asyncTest "Unsubscribe removes registered peer from remote":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
const namespace = "foo"
await peerRdvs[0].advertise(namespace)
await peerNodes[0].advertise(namespace)
check (await peerRdvs[0].request(Opt.some(namespace))).len == 1
check (await peerNodes[0].request(Opt.some(namespace))).len == 1
await peerRdvs[0].unsubscribe(namespace)
check (await peerRdvs[0].request(Opt.some(namespace))).len == 0
await peerNodes[0].unsubscribe(namespace)
check (await peerNodes[0].request(Opt.some(namespace))).len == 0
asyncTest "Unsubscribe for not registered namespace is ignored":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
await peerNodes[0].advertise("foo")
await peerNodes[0].unsubscribe("bar")
check rendezvousNode.registered.s.len == 1
asyncTest "Consecutive requests with namespace returns peers with pagination":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(11)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(11)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
const namespace = "foo"
await allFutures(peerRdvs.mapIt(it.advertise(namespace)))
await allFutures(peerNodes.mapIt(it.advertise(namespace)))
var data = peerNodes.mapIt(it.peerInfo.signedPeerRecord.data)
var peerRecords = await peerRdvs[0].request(Opt.some(namespace), 5)
var data = peerNodes.mapIt(it.switch.peerInfo.signedPeerRecord.data)
var peerRecords = await peerNodes[0].request(Opt.some(namespace), 5)
check:
peerRecords.len == 5
peerRecords.allIt(it in data)
data.keepItIf(it notin peerRecords)
peerRecords = await peerRdvs[0].request(Opt.some(namespace))
peerRecords = await peerNodes[0].request(Opt.some(namespace))
check:
peerRecords.len == 6
peerRecords.allIt(it in data)
check (await peerRdvs[0].request(Opt.some(namespace))).len == 0
check (await peerNodes[0].request(Opt.some(namespace))).len == 0
asyncTest "Request without namespace returns all registered peers":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(10)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(10)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
const namespaceFoo = "foo"
const namespaceBar = "Bar"
await allFutures(peerRdvs[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[5 ..< 10].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[5 ..< 10].mapIt(it.advertise(namespaceBar)))
check (await peerRdvs[0].request()).len == 10
check (await peerNodes[0].request()).len == 10
check (await peerRdvs[0].request(Opt.none(string))).len == 10
check (await peerNodes[0].request(Opt.none(string))).len == 10
asyncTest "Consecutive requests with namespace keep cookie and retun only new peers":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(2)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(2)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
let
rdv0 = peerRdvs[0]
rdv1 = peerRdvs[1]
rdv0 = peerNodes[0]
rdv1 = peerNodes[1]
const namespace = "foo"
await rdv0.advertise(namespace)
@@ -144,30 +177,30 @@ suite "RendezVous":
check:
peerRecords.len == 1
peerRecords[0] == peerNodes[1].peerInfo.signedPeerRecord.data
peerRecords[0] == peerNodes[1].switch.peerInfo.signedPeerRecord.data
asyncTest "Request with namespace pagination with multiple namespaces":
let (rendezvousNode, peerNodes, peerRdvs, _) = setupRendezvousNodeWithPeerNodes(30)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(30)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
let rdv = peerRdvs[0]
let rdv = peerNodes[0]
# Register peers in different namespaces in mixed order
const
namespaceFoo = "foo"
namespaceBar = "bar"
await allFutures(peerRdvs[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[5 ..< 10].mapIt(it.advertise(namespaceBar)))
await allFutures(peerRdvs[10 ..< 15].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[15 ..< 20].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[5 ..< 10].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[10 ..< 15].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[15 ..< 20].mapIt(it.advertise(namespaceBar)))
var fooRecords = peerNodes[0 ..< 5].concat(peerNodes[10 ..< 15]).mapIt(
it.peerInfo.signedPeerRecord.data
it.switch.peerInfo.signedPeerRecord.data
)
var barRecords = peerNodes[5 ..< 10].concat(peerNodes[15 ..< 20]).mapIt(
it.peerInfo.signedPeerRecord.data
it.switch.peerInfo.signedPeerRecord.data
)
# Foo Page 1 with limit
@@ -205,15 +238,15 @@ suite "RendezVous":
check barRecords.len == 0
# Register new peers
await allFutures(peerRdvs[20 ..< 25].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[25 ..< 30].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[20 ..< 25].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[25 ..< 30].mapIt(it.advertise(namespaceBar)))
# Foo Page 5 only new peers
peerRecords = await rdv.request(Opt.some(namespaceFoo))
check:
peerRecords.len == 5
peerRecords.allIt(
it in peerNodes[20 ..< 25].mapIt(it.peerInfo.signedPeerRecord.data)
it in peerNodes[20 ..< 25].mapIt(it.switch.peerInfo.signedPeerRecord.data)
)
# Bar Page 2 only new peers
@@ -221,7 +254,7 @@ suite "RendezVous":
check:
peerRecords.len == 5
peerRecords.allIt(
it in peerNodes[25 ..< 30].mapIt(it.peerInfo.signedPeerRecord.data)
it in peerNodes[25 ..< 30].mapIt(it.switch.peerInfo.signedPeerRecord.data)
)
# All records
@@ -229,8 +262,7 @@ suite "RendezVous":
check peerRecords.len == 30
asyncTest "Request with namespace with expired peers":
let (rendezvousNode, peerNodes, peerRdvs, rdv) =
setupRendezvousNodeWithPeerNodes(20)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(20)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
@@ -239,135 +271,139 @@ suite "RendezVous":
const
namespaceFoo = "foo"
namespaceBar = "bar"
await allFutures(peerRdvs[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[5 ..< 10].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[0 ..< 5].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[5 ..< 10].mapIt(it.advertise(namespaceBar)))
check:
(await peerRdvs[0].request(Opt.some(namespaceFoo))).len == 5
(await peerRdvs[0].request(Opt.some(namespaceBar))).len == 5
(await peerNodes[0].request(Opt.some(namespaceFoo))).len == 5
(await peerNodes[0].request(Opt.some(namespaceBar))).len == 5
# Overwrite register timeout loop interval
discard rdv.deletesRegister(1.seconds)
discard rendezvousNode.deletesRegister(1.seconds)
# Overwrite expiration times
let now = Moment.now()
for reg in rdv.registered.s.mitems:
for reg in rendezvousNode.registered.s.mitems:
reg.expiration = now
# Wait for the deletion
checkUntilTimeout:
rdv.registered.offset == 10
rdv.registered.s.len == 0
(await peerRdvs[0].request(Opt.some(namespaceFoo))).len == 0
(await peerRdvs[0].request(Opt.some(namespaceBar))).len == 0
rendezvousNode.registered.offset == 10
rendezvousNode.registered.s.len == 0
(await peerNodes[0].request(Opt.some(namespaceFoo))).len == 0
(await peerNodes[0].request(Opt.some(namespaceBar))).len == 0
# Advertise new peers
await allFutures(peerRdvs[10 ..< 15].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerRdvs[15 ..< 20].mapIt(it.advertise(namespaceBar)))
await allFutures(peerNodes[10 ..< 15].mapIt(it.advertise(namespaceFoo)))
await allFutures(peerNodes[15 ..< 20].mapIt(it.advertise(namespaceBar)))
check:
rdv.registered.offset == 10
rdv.registered.s.len == 10
(await peerRdvs[0].request(Opt.some(namespaceFoo))).len == 5
(await peerRdvs[0].request(Opt.some(namespaceBar))).len == 5
rendezvousNode.registered.offset == 10
rendezvousNode.registered.s.len == 10
(await peerNodes[0].request(Opt.some(namespaceFoo))).len == 5
(await peerNodes[0].request(Opt.some(namespaceBar))).len == 5
asyncTest "Cookie offset is reset to end (returns empty) then new peers are discoverable":
let (rendezvousNode, peerNodes, peerRdvs, rdv) = setupRendezvousNodeWithPeerNodes(3)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(3)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
const namespace = "foo"
# Advertise two peers initially
await allFutures(peerRdvs[0 ..< 2].mapIt(it.advertise(namespace)))
await allFutures(peerNodes[0 ..< 2].mapIt(it.advertise(namespace)))
# Build and inject overflow cookie: offset past current high()+1
let offset = (rdv.registered.high + 1000).uint64
let offset = (rendezvousNode.registered.high + 1000).uint64
let cookie = buildProtobufCookie(offset, namespace)
peerRdvs[0].injectCookieForPeer(rendezvousNode.peerInfo.peerId, namespace, cookie)
peerNodes[0].injectCookieForPeer(
rendezvousNode.switch.peerInfo.peerId, namespace, cookie
)
# First request should return empty due to clamping to high()+1
check (await peerRdvs[0].request(Opt.some(namespace))).len == 0
check (await peerNodes[0].request(Opt.some(namespace))).len == 0
# Advertise a new peer, next request should return only the new one
await peerRdvs[2].advertise(namespace)
let peerRecords = await peerRdvs[0].request(Opt.some(namespace))
await peerNodes[2].advertise(namespace)
let peerRecords = await peerNodes[0].request(Opt.some(namespace))
check:
peerRecords.len == 1
peerRecords[0] == peerNodes[2].peerInfo.signedPeerRecord.data
peerRecords[0] == peerNodes[2].switch.peerInfo.signedPeerRecord.data
asyncTest "Cookie offset is reset to low after flush (returns current entries)":
let (rendezvousNode, peerNodes, peerRdvs, rdv) = setupRendezvousNodeWithPeerNodes(8)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(8)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
const namespace = "foo"
# Advertise 4 peers in namespace
await allFutures(peerRdvs[0 ..< 4].mapIt(it.advertise(namespace)))
await allFutures(peerNodes[0 ..< 4].mapIt(it.advertise(namespace)))
# Expire all and flush to advance registered.offset
discard rdv.deletesRegister(1.seconds)
discard rendezvousNode.deletesRegister(1.seconds)
let now = Moment.now()
for reg in rdv.registered.s.mitems:
for reg in rendezvousNode.registered.s.mitems:
reg.expiration = now
checkUntilTimeout:
rdv.registered.s.len == 0
rdv.registered.offset == 4
rendezvousNode.registered.s.len == 0
rendezvousNode.registered.offset == 4
# Advertise 4 new peers
await allFutures(peerRdvs[4 ..< 8].mapIt(it.advertise(namespace)))
await allFutures(peerNodes[4 ..< 8].mapIt(it.advertise(namespace)))
# Build and inject underflow cookie: offset behind current low
let offset = 0'u64
let cookie = buildProtobufCookie(offset, namespace)
peerRdvs[0].injectCookieForPeer(rendezvousNode.peerInfo.peerId, namespace, cookie)
peerNodes[0].injectCookieForPeer(
rendezvousNode.switch.peerInfo.peerId, namespace, cookie
)
check (await peerRdvs[0].request(Opt.some(namespace))).len == 4
check (await peerNodes[0].request(Opt.some(namespace))).len == 4
asyncTest "Cookie namespace mismatch resets to low (returns peers despite offset)":
let (rendezvousNode, peerNodes, peerRdvs, rdv) = setupRendezvousNodeWithPeerNodes(3)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(3)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodesToRendezvousNode(peerNodes, rendezvousNode)
const namespace = "foo"
await allFutures(peerRdvs.mapIt(it.advertise(namespace)))
await allFutures(peerNodes.mapIt(it.advertise(namespace)))
# Build and inject cookie with wrong namespace
let offset = 10.uint64
let cookie = buildProtobufCookie(offset, "other")
peerRdvs[0].injectCookieForPeer(rendezvousNode.peerInfo.peerId, namespace, cookie)
peerNodes[0].injectCookieForPeer(
rendezvousNode.switch.peerInfo.peerId, namespace, cookie
)
check (await peerRdvs[0].request(Opt.some(namespace))).len == 3
check (await peerNodes[0].request(Opt.some(namespace))).len == 3
asyncTest "Peer default TTL is saved when advertised":
let (rendezvousNode, peerNodes, peerRdvs, rendezvousRdv) =
setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
const namespace = "foo"
let timeBefore = Moment.now()
await peerRdvs[0].advertise(namespace)
await peerNodes[0].advertise(namespace)
let timeAfter = Moment.now()
# expiration within [timeBefore + 2hours, timeAfter + 2hours]
check:
# Peer Node side
peerRdvs[0].registered.s[0].data.ttl.get == MinimumDuration.seconds.uint64
peerRdvs[0].registered.s[0].expiration >= timeBefore + MinimumDuration
peerRdvs[0].registered.s[0].expiration <= timeAfter + MinimumDuration
peerNodes[0].registered.s[0].data.ttl.get == MinimumDuration.seconds.uint64
peerNodes[0].registered.s[0].expiration >= timeBefore + MinimumDuration
peerNodes[0].registered.s[0].expiration <= timeAfter + MinimumDuration
# Rendezvous Node side
rendezvousRdv.registered.s[0].data.ttl.get == MinimumDuration.seconds.uint64
rendezvousRdv.registered.s[0].expiration >= timeBefore + MinimumDuration
rendezvousRdv.registered.s[0].expiration <= timeAfter + MinimumDuration
rendezvousNode.registered.s[0].data.ttl.get == MinimumDuration.seconds.uint64
rendezvousNode.registered.s[0].expiration >= timeBefore + MinimumDuration
rendezvousNode.registered.s[0].expiration <= timeAfter + MinimumDuration
asyncTest "Peer TTL is saved when advertised with TTL":
let (rendezvousNode, peerNodes, peerRdvs, rendezvousRdv) =
setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
@@ -376,23 +412,22 @@ suite "RendezVous":
namespace = "foo"
ttl = 3.hours
let timeBefore = Moment.now()
await peerRdvs[0].advertise(namespace, ttl)
await peerNodes[0].advertise(namespace, ttl)
let timeAfter = Moment.now()
# expiration within [timeBefore + ttl, timeAfter + ttl]
check:
# Peer Node side
peerRdvs[0].registered.s[0].data.ttl.get == ttl.seconds.uint64
peerRdvs[0].registered.s[0].expiration >= timeBefore + ttl
peerRdvs[0].registered.s[0].expiration <= timeAfter + ttl
peerNodes[0].registered.s[0].data.ttl.get == ttl.seconds.uint64
peerNodes[0].registered.s[0].expiration >= timeBefore + ttl
peerNodes[0].registered.s[0].expiration <= timeAfter + ttl
# Rendezvous Node side
rendezvousRdv.registered.s[0].data.ttl.get == ttl.seconds.uint64
rendezvousRdv.registered.s[0].expiration >= timeBefore + ttl
rendezvousRdv.registered.s[0].expiration <= timeAfter + ttl
rendezvousNode.registered.s[0].data.ttl.get == ttl.seconds.uint64
rendezvousNode.registered.s[0].expiration >= timeBefore + ttl
rendezvousNode.registered.s[0].expiration <= timeAfter + ttl
asyncTest "Peer can reregister to update its TTL before previous TTL expires":
let (rendezvousNode, peerNodes, peerRdvs, rendezvousRdv) =
setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
@@ -400,72 +435,48 @@ suite "RendezVous":
const namespace = "foo"
let now = Moment.now()
await peerRdvs[0].advertise(namespace)
await peerNodes[0].advertise(namespace)
check:
# Peer Node side
peerRdvs[0].registered.s.len == 1
peerRdvs[0].registered.s[0].expiration > now
peerNodes[0].registered.s.len == 1
peerNodes[0].registered.s[0].expiration > now
# Rendezvous Node side
rendezvousRdv.registered.s.len == 1
rendezvousRdv.registered.s[0].expiration > now
rendezvousNode.registered.s.len == 1
rendezvousNode.registered.s[0].expiration > now
await peerRdvs[0].advertise(namespace, 5.hours)
await peerNodes[0].advertise(namespace, 5.hours)
check:
# Added 2nd registration
# Updated expiration of the 1st one to the past
# Will be deleted on deletion heartbeat
# Peer Node side
peerRdvs[0].registered.s.len == 2
peerRdvs[0].registered.s[0].expiration < now
peerNodes[0].registered.s.len == 2
peerNodes[0].registered.s[0].expiration < now
# Rendezvous Node side
rendezvousRdv.registered.s.len == 2
rendezvousRdv.registered.s[0].expiration < now
rendezvousNode.registered.s.len == 2
rendezvousNode.registered.s[0].expiration < now
# Returns only one record
check (await peerRdvs[0].request(Opt.some(namespace))).len == 1
check (await peerNodes[0].request(Opt.some(namespace))).len == 1
asyncTest "Peer registration is ignored if limit of 1000 registrations is reached":
let (rendezvousNode, peerNodes, peerRdvs, rendezvousRdv) =
setupRendezvousNodeWithPeerNodes(1)
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
const namespace = "foo"
let peerRdv = peerRdvs[0]
let peerRdv = peerNodes[0]
# Create 999 registrations
await populatePeerRegistrations(
peerRdv, rendezvousRdv, namespace, RegistrationLimitPerPeer - 1
peerRdv, rendezvousNode, namespace, RegistrationLimitPerPeer - 1
)
# 1000th registration allowed
await peerRdv.advertise(namespace)
check rendezvousRdv.registered.s.len == RegistrationLimitPerPeer
check rendezvousNode.registered.s.len == RegistrationLimitPerPeer
# 1001st registration ignored, limit reached
await peerRdv.advertise(namespace)
check rendezvousRdv.registered.s.len == RegistrationLimitPerPeer
asyncTest "Various local error":
let rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
expect AdvertiseError:
discard await rdv.request(Opt.some("A".repeat(300)))
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), -1)
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), 3000)
expect AdvertiseError:
await rdv.advertise("A".repeat(300))
expect AdvertiseError:
await rdv.advertise("A", 73.hours)
expect AdvertiseError:
await rdv.advertise("A", 30.seconds)
test "Various config error":
expect RendezVousError:
discard RendezVous.new(minDuration = 30.seconds)
expect RendezVousError:
discard RendezVous.new(maxDuration = 73.hours)
expect RendezVousError:
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)
check rendezvousNode.registered.s.len == RegistrationLimitPerPeer

View File

@@ -0,0 +1,153 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import sequtils
import strformat
import strutils
import chronos
import
../../libp2p/[
protocols/rendezvous,
protocols/rendezvous/protobuf,
peerinfo,
switch,
routing_record,
crypto/crypto,
]
import ../../libp2p/discovery/discoverymngr
import ../helpers
import ./utils
suite "RendezVous Errors":
teardown:
checkTrackers()
asyncTest "Various local error":
let rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
expect AdvertiseError:
discard await rdv.request(Opt.some("A".repeat(300)))
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), -1)
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), 3000)
expect AdvertiseError:
await rdv.advertise("A".repeat(300))
expect AdvertiseError:
await rdv.advertise("A", 73.hours)
expect AdvertiseError:
await rdv.advertise("A", 30.seconds)
test "Various config error":
expect RendezVousError:
discard RendezVous.new(minDuration = 30.seconds)
expect RendezVousError:
discard RendezVous.new(maxDuration = 73.hours)
expect RendezVousError:
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)
let testCases =
@[
(
"Register - Invalid Namespace",
(
proc(node: RendezVous): Message =
prepareRegisterMessage(
"A".repeat(300),
node.switch.peerInfo.signedPeerRecord.encode().get,
2.hours,
)
),
ResponseStatus.InvalidNamespace,
),
(
"Register - Invalid Signed Peer Record",
(
proc(node: RendezVous): Message =
# Malformed SPR - empty bytes will fail validation
prepareRegisterMessage("namespace", newSeq[byte](), 2.hours)
),
ResponseStatus.InvalidSignedPeerRecord,
),
(
"Register - Invalid TTL",
(
proc(node: RendezVous): Message =
prepareRegisterMessage(
"namespace", node.switch.peerInfo.signedPeerRecord.encode().get, 73.hours
)
),
ResponseStatus.InvalidTTL,
),
(
"Discover - Invalid Namespace",
(
proc(node: RendezVous): Message =
prepareDiscoverMessage(ns = Opt.some("A".repeat(300)))
),
ResponseStatus.InvalidNamespace,
),
(
"Discover - Invalid Cookie",
(
proc(node: RendezVous): Message =
# Empty buffer will fail Cookie.decode().tryGet() and yield InvalidCookie
prepareDiscoverMessage(cookie = Opt.some(newSeq[byte]()))
),
ResponseStatus.InvalidCookie,
),
]
for test in testCases:
let (testName, getMessage, expectedStatus) = test
asyncTest &"Node returns ERROR_CODE for invalid message - {testName}":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
let
peerNode = peerNodes[0]
messageBuf = encode(getMessage(peerNode)).buffer
let
responseBuf = await sendRdvMessage(peerNode, rendezvousNode, messageBuf)
responseMessage = Message.decode(responseBuf).tryGet()
actualStatus =
if responseMessage.registerResponse.isSome():
responseMessage.registerResponse.get.status
else:
responseMessage.discoverResponse.get.status
check actualStatus == expectedStatus
asyncTest "Node returns NotAuthorized when Register exceeding peer limit":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
# Pre-populate registrations up to the limit for this peer under the same namespace
let namespace = "namespaceNA"
await populatePeerRegistrations(
peerNodes[0], rendezvousNode, namespace, RegistrationLimitPerPeer
)
# Attempt one more registration which should be rejected with NotAuthorized
let messageBuf = encode(
prepareRegisterMessage(
namespace, peerNodes[0].switch.peerInfo.signedPeerRecord.encode().get, 2.hours
)
).buffer
let responseBuf = await sendRdvMessage(peerNodes[0], rendezvousNode, messageBuf)
let responseMessage = Message.decode(responseBuf).tryGet()
check responseMessage.registerResponse.get.status == ResponseStatus.NotAuthorized

View File

@@ -0,0 +1,256 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import ../../libp2p/[protocols/rendezvous/protobuf]
import ../../libp2p/protobuf/minprotobuf
import ../helpers
suite "RendezVous Protobuf":
teardown:
checkTrackers()
const namespace = "ns"
test "Cookie roundtrip with namespace":
let originalCookie = Cookie(offset: 42'u64, ns: Opt.some(namespace))
let decodeResult = Cookie.decode(originalCookie.encode().buffer)
check decodeResult.isSome()
let decodedCookie = decodeResult.get()
check:
decodedCookie.offset == originalCookie.offset
decodedCookie.ns.get() == originalCookie.ns.get()
decodedCookie.encode().buffer == originalCookie.encode().buffer # roundtrip again
test "Cookie roundtrip without namespace":
let originalCookie = Cookie(offset: 7'u64)
let decodeResult = Cookie.decode(originalCookie.encode().buffer)
check decodeResult.isSome()
let decodedCookie = decodeResult.get()
check:
decodedCookie.offset == originalCookie.offset
decodedCookie.ns.isNone()
decodedCookie.encode().buffer == originalCookie.encode().buffer
test "Cookie decode fails when missing offset":
var emptyCookieBuffer = initProtoBuffer()
check Cookie.decode(emptyCookieBuffer.buffer).isNone()
test "Register roundtrip with ttl":
let originalRegister =
Register(ns: namespace, signedPeerRecord: @[byte 1, 2, 3], ttl: Opt.some(60'u64))
let decodeResult = Register.decode(originalRegister.encode().buffer)
check decodeResult.isSome()
let decodedRegister = decodeResult.get()
check:
decodedRegister.ns == originalRegister.ns
decodedRegister.signedPeerRecord == originalRegister.signedPeerRecord
decodedRegister.ttl.get() == originalRegister.ttl.get()
decodedRegister.encode().buffer == originalRegister.encode().buffer
test "Register roundtrip without ttl":
let originalRegister = Register(ns: namespace, signedPeerRecord: @[byte 4, 5])
let decodeResult = Register.decode(originalRegister.encode().buffer)
check decodeResult.isSome()
let decodedRegister = decodeResult.get()
check:
decodedRegister.ns == originalRegister.ns
decodedRegister.signedPeerRecord == originalRegister.signedPeerRecord
decodedRegister.ttl.isNone()
decodedRegister.encode().buffer == originalRegister.encode().buffer
test "Register decode fails when missing namespace":
var bufferMissingNamespace = initProtoBuffer()
bufferMissingNamespace.write(2, @[byte 1])
check Register.decode(bufferMissingNamespace.buffer).isNone()
test "Register decode fails when missing signedPeerRecord":
var bufferMissingSignedPeerRecord = initProtoBuffer()
bufferMissingSignedPeerRecord.write(1, namespace)
check Register.decode(bufferMissingSignedPeerRecord.buffer).isNone()
test "RegisterResponse roundtrip successful":
let originalResponse = RegisterResponse(
status: ResponseStatus.Ok, text: Opt.some("ok"), ttl: Opt.some(10'u64)
)
let decodeResult = RegisterResponse.decode(originalResponse.encode().buffer)
check decodeResult.isSome()
let decodedResponse = decodeResult.get()
check:
decodedResponse.status == originalResponse.status
decodedResponse.text.get() == originalResponse.text.get()
decodedResponse.ttl.get() == originalResponse.ttl.get()
decodedResponse.encode().buffer == originalResponse.encode().buffer
test "RegisterResponse roundtrip failed":
let originalResponse = RegisterResponse(status: ResponseStatus.InvalidNamespace)
let decodeResult = RegisterResponse.decode(originalResponse.encode().buffer)
check decodeResult.isSome()
let decodedResponse = decodeResult.get()
check:
decodedResponse.status == originalResponse.status
decodedResponse.text.isNone()
decodedResponse.ttl.isNone()
decodedResponse.encode().buffer == originalResponse.encode().buffer
test "RegisterResponse decode fails invalid status enum":
var bufferInvalidStatusValue = initProtoBuffer()
bufferInvalidStatusValue.write(1, uint(999))
check RegisterResponse.decode(bufferInvalidStatusValue.buffer).isNone()
test "RegisterResponse decode fails missing status":
var bufferMissingStatusField = initProtoBuffer()
bufferMissingStatusField.write(2, "msg")
check RegisterResponse.decode(bufferMissingStatusField.buffer).isNone()
test "Unregister roundtrip":
let originalUnregister = Unregister(ns: namespace)
let decodeResult = Unregister.decode(originalUnregister.encode().buffer)
check decodeResult.isSome()
let decodedUnregister = decodeResult.get()
check:
decodedUnregister.ns == originalUnregister.ns
decodedUnregister.encode().buffer == originalUnregister.encode().buffer
test "Unregister decode fails when missing namespace":
var bufferMissingUnregisterNamespace = initProtoBuffer()
check Unregister.decode(bufferMissingUnregisterNamespace.buffer).isNone()
test "Discover roundtrip with all optional fields":
let originalDiscover = Discover(
ns: Opt.some(namespace), limit: Opt.some(5'u64), cookie: Opt.some(@[byte 1, 2, 3])
)
let decodeResult = Discover.decode(originalDiscover.encode().buffer)
check decodeResult.isSome()
let decodedDiscover = decodeResult.get()
check:
decodedDiscover.ns.get() == originalDiscover.ns.get()
decodedDiscover.limit.get() == originalDiscover.limit.get()
decodedDiscover.cookie.get() == originalDiscover.cookie.get()
decodedDiscover.encode().buffer == originalDiscover.encode().buffer
test "Discover decode empty buffer yields empty object":
var emptyDiscoverBuffer: seq[byte] = @[]
let decodeResult = Discover.decode(emptyDiscoverBuffer)
check decodeResult.isSome()
let decodedDiscover = decodeResult.get()
check:
decodedDiscover.ns.isNone()
decodedDiscover.limit.isNone()
decodedDiscover.cookie.isNone()
test "DiscoverResponse roundtrip with registration":
let registrationEntry = Register(ns: namespace, signedPeerRecord: @[byte 9])
let originalResponse = DiscoverResponse(
registrations: @[registrationEntry],
cookie: Opt.some(@[byte 0xAA]),
status: ResponseStatus.Ok,
text: Opt.some("t"),
)
let decodeResult = DiscoverResponse.decode(originalResponse.encode().buffer)
check decodeResult.isSome()
let decodedResponse = decodeResult.get()
check:
decodedResponse.status == originalResponse.status
decodedResponse.registrations.len == 1
decodedResponse.registrations[0].ns == registrationEntry.ns
decodedResponse.cookie.get() == originalResponse.cookie.get()
decodedResponse.text.get() == originalResponse.text.get()
decodedResponse.encode().buffer == originalResponse.encode().buffer
test "DiscoverResponse roundtrip failed":
let originalResponse = DiscoverResponse(status: ResponseStatus.InternalError)
let decodeResult = DiscoverResponse.decode(originalResponse.encode().buffer)
check decodeResult.isSome()
let decodedResponse = decodeResult.get()
check:
decodedResponse.status == originalResponse.status
decodedResponse.registrations.len == 0
decodedResponse.cookie.isNone()
decodedResponse.text.isNone()
decodedResponse.encode().buffer == originalResponse.encode().buffer
test "DiscoverResponse decode fails with invalid registration":
var bufferInvalidRegistration = initProtoBuffer()
bufferInvalidRegistration.write(1, @[byte 0x00, 0xFF])
bufferInvalidRegistration.write(3, ResponseStatus.Ok.uint)
check DiscoverResponse.decode(bufferInvalidRegistration.buffer).isNone()
test "DiscoverResponse decode fails missing status":
var bufferMissingDiscoverResponseStatus = initProtoBuffer()
check DiscoverResponse.decode(bufferMissingDiscoverResponseStatus.buffer).isNone()
test "Message roundtrip Register variant":
let registerPayload = Register(ns: namespace, signedPeerRecord: @[byte 1])
let originalMessage =
Message(msgType: MessageType.Register, register: Opt.some(registerPayload))
let decodeResult = Message.decode(originalMessage.encode().buffer)
check decodeResult.isSome()
let decodedMessage = decodeResult.get()
check:
decodedMessage.msgType == originalMessage.msgType
decodedMessage.register.get().ns == registerPayload.ns
test "Message roundtrip RegisterResponse variant":
let registerResponsePayload = RegisterResponse(status: ResponseStatus.Ok)
let originalMessage = Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(registerResponsePayload),
)
let decodeResult = Message.decode(originalMessage.encode().buffer)
check decodeResult.isSome()
let decodedMessage = decodeResult.get()
check:
decodedMessage.msgType == originalMessage.msgType
decodedMessage.registerResponse.get().status == registerResponsePayload.status
test "Message roundtrip Unregister variant":
let unregisterPayload = Unregister(ns: namespace)
let originalMessage =
Message(msgType: MessageType.Unregister, unregister: Opt.some(unregisterPayload))
let decodeResult = Message.decode(originalMessage.encode().buffer)
check decodeResult.isSome()
let decodedMessage = decodeResult.get()
check:
decodedMessage.unregister.get().ns == unregisterPayload.ns
test "Message roundtrip Discover variant":
let discoverPayload = Discover(limit: Opt.some(1'u64))
let originalMessage =
Message(msgType: MessageType.Discover, discover: Opt.some(discoverPayload))
let decodeResult = Message.decode(originalMessage.encode().buffer)
check decodeResult.isSome()
let decodedMessage = decodeResult.get()
check:
decodedMessage.discover.get().limit.get() == discoverPayload.limit.get()
test "Message roundtrip DiscoverResponse variant":
let discoverResponsePayload = DiscoverResponse(status: ResponseStatus.Unavailable)
let originalMessage = Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(discoverResponsePayload),
)
let decodeResult = Message.decode(originalMessage.encode().buffer)
check decodeResult.isSome()
let decodedMessage = decodeResult.get()
check:
decodedMessage.discoverResponse.get().status == discoverResponsePayload.status
test "Message decode header only":
var headerOnlyMessage = Message(msgType: MessageType.Register)
let decodeResult = Message.decode(headerOnlyMessage.encode().buffer)
check decodeResult.isSome()
check:
decodeResult.get().register.isNone()
test "Message decode fails invalid msgType":
var bufferInvalidMessageType = initProtoBuffer()
bufferInvalidMessageType.write(1, uint(999))
check Message.decode(bufferInvalidMessageType.buffer).isNone()

View File

@@ -1,6 +1,16 @@
import sequtils
import chronos
import ../../libp2p/[protobuf/minprotobuf, protocols/rendezvous, switch, builders]
import sequtils
import
../../libp2p/[
builders,
crypto/crypto,
peerid,
protobuf/minprotobuf,
protocols/rendezvous,
protocols/rendezvous/protobuf,
routing_record,
switch,
]
proc createSwitch*(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder
@@ -13,42 +23,37 @@ proc createSwitch*(rdv: RendezVous = RendezVous.new()): Switch =
.withRendezVous(rdv)
.build()
proc setupNodes*(count: int): (seq[Switch], seq[RendezVous]) =
proc setupNodes*(count: int): seq[RendezVous] =
doAssert(count > 0, "Count must be greater than 0")
var
nodes: seq[Switch] = @[]
rdvs: seq[RendezVous] = @[]
var rdvs: seq[RendezVous] = @[]
for x in 0 ..< count:
let rdv = RendezVous.new()
let node = createSwitch(rdv)
nodes.add(node)
rdvs.add(rdv)
return (nodes, rdvs)
return rdvs
proc setupRendezvousNodeWithPeerNodes*(
count: int
): (Switch, seq[Switch], seq[RendezVous], RendezVous) =
proc setupRendezvousNodeWithPeerNodes*(count: int): (RendezVous, seq[RendezVous]) =
let
(nodes, rdvs) = setupNodes(count + 1)
rendezvousNode = nodes[0]
rdvs = setupNodes(count + 1)
rendezvousRdv = rdvs[0]
peerNodes = nodes[1 ..^ 1]
peerRdvs = rdvs[1 ..^ 1]
return (rendezvousNode, peerNodes, peerRdvs, rendezvousRdv)
return (rendezvousRdv, peerRdvs)
template startAndDeferStop*(nodes: seq[Switch]) =
await allFutures(nodes.mapIt(it.start()))
template startAndDeferStop*(nodes: seq[RendezVous]) =
await allFutures(nodes.mapIt(it.switch.start()))
defer:
await allFutures(nodes.mapIt(it.stop()))
await allFutures(nodes.mapIt(it.switch.stop()))
proc connectNodes*[T: Switch](dialer: T, target: T) {.async.} =
await dialer.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc connectNodes*[T: RendezVous](dialer: T, target: T) {.async.} =
await dialer.switch.connect(
target.switch.peerInfo.peerId, target.switch.peerInfo.addrs
)
proc connectNodesToRendezvousNode*[T: Switch](
proc connectNodesToRendezvousNode*[T: RendezVous](
nodes: seq[T], rendezvousNode: T
) {.async.} =
for node in nodes:
@@ -81,3 +86,41 @@ proc populatePeerRegistrations*(
let record = targetRdv.registered.s[0]
for i in 0 ..< count - 1:
targetRdv.registered.s.add(record)
proc createCorruptedSignedPeerRecord*(peerId: PeerId): SignedPeerRecord =
let rng = newRng()
let wrongPrivKey = PrivateKey.random(rng[]).tryGet()
let record = PeerRecord.init(peerId, @[])
SignedPeerRecord.init(wrongPrivKey, record).tryGet()
proc sendRdvMessage*(
node: RendezVous, target: RendezVous, buffer: seq[byte]
): Future[seq[byte]] {.async.} =
let conn = await node.switch.dial(target.switch.peerInfo.peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(buffer)
let response = await conn.readLp(4096)
response
proc prepareRegisterMessage*(
namespace: string, spr: seq[byte], ttl: Duration
): Message =
Message(
msgType: MessageType.Register,
register: Opt.some(
Register(ns: namespace, signedPeerRecord: spr, ttl: Opt.some(ttl.seconds.uint64))
),
)
proc prepareDiscoverMessage*(
ns: Opt[string] = Opt.none(string),
limit: Opt[uint64] = Opt.none(uint64),
cookie: Opt[seq[byte]] = Opt.none(seq[byte]),
): Message =
Message(
msgType: MessageType.Discover,
discover: Opt.some(Discover(ns: ns, limit: limit, cookie: cookie)),
)

125
tests/mix/testcrypto.nim Normal file
View File

@@ -0,0 +1,125 @@
{.used.}
import nimcrypto, results, unittest
import ../../libp2p/protocols/mix/crypto
suite "cryptographic_functions_tests":
test "aes_ctr_encrypt_decrypt":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisdata")
let encrypted = aes_ctr(key, iv, data)
let decrypted = aes_ctr(key, iv, encrypted)
check:
data == decrypted
data != encrypted
test "sha256_hash_computation":
let
data: seq[byte] = cast[seq[byte]]("thisisdata")
expectedHashHex =
"b53a20ecf0814267a83be82f941778ffda4b85fbf93a07847539f645ff5f1b9b"
expectedHash = fromHex(expectedHashHex)
hash = sha256_hash(data)
check hash == expectedHash
test "kdf_computation":
let
key: seq[byte] = cast[seq[byte]]("thisiskey")
expectedKdfHex = "37c9842d37dc404854428a0a3554dcaa"
expectedKdf = fromHex(expectedKdfHex)
derivedKey = kdf(key)
check derivedKey == expectedKdf
test "hmac_computation":
let
key: seq[byte] = cast[seq[byte]]("thisiskey")
data: seq[byte] = cast[seq[byte]]("thisisdata")
expectedHmacHex = "b075dd302655e085d35e8cef5dfdf101"
expectedHmac = fromHex(expectedHmacHex)
hmacResult = hmac(key, data)
check hmacResult == expectedHmac
test "aes_ctr_empty_data":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
emptyData: array[0, byte] = []
let encrypted = aes_ctr(key, iv, emptyData)
let decrypted = aes_ctr(key, iv, encrypted)
check:
emptyData == decrypted
emptyData == encrypted
test "sha256_hash_empty_data":
let
emptyData: array[0, byte] = []
expectedHashHex =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
expectedHash = fromHex(expectedHashHex)
hash = sha256_hash(emptyData)
check hash == expectedHash
test "kdf_empty_key":
let
emptyKey: array[0, byte] = []
expectedKdfHex = "e3b0c44298fc1c149afbf4c8996fb924"
expectedKdf = fromHex(expectedKdfHex)
derivedKey = kdf(emptyKey)
check derivedKey == expectedKdf
test "hmac_empty_key_and_data":
let
emptyKey: array[0, byte] = []
emptyData: array[0, byte] = []
expectedHmacHex = "b613679a0814d9ec772f95d778c35fc5"
expectedHmac = fromHex(expectedHmacHex)
hmacResult = hmac(emptyKey, emptyData)
check hmacResult == expectedHmac
test "aes_ctr_start_index_zero_index":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisdata")
startIndex = 0
let encrypted = aes_ctr_start_index(key, iv, data, startIndex)
let expected = aes_ctr(key, iv, data)
check encrypted == expected
test "aes_ctr_start_index_empty_data":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
emptyData: array[0, byte] = []
startIndex = 0
let encrypted = aes_ctr_start_index(key, iv, emptyData, startIndex)
check emptyData == encrypted
test "aes_ctr_start_index_middle":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisverylongdata")
startIndex = 16
let encrypted2 = aes_ctr_start_index(key, iv, data[startIndex ..^ 1], startIndex)
let encrypted1 = aes_ctr(key, iv, data[0 .. startIndex - 1])
let expected = aes_ctr(key, iv, data)
check encrypted1 & encrypted2 == expected

View File

@@ -0,0 +1,45 @@
{.used.}
import results, unittest
import ../../libp2p/crypto/curve25519
import ../../libp2p/protocols/mix/curve25519
proc isNotZero(key: FieldElement): bool =
for byte in key:
if byte != 0:
return true
return false
suite "curve25519_tests":
test "generate_key_pair":
let (privateKey, publicKey) = generateKeyPair().expect("generate keypair error")
check:
fieldElementToBytes(privateKey).len == FieldElementSize
fieldElementToBytes(publicKey).len == FieldElementSize
privateKey.isNotZero()
publicKey.isNotZero()
let derivedPublicKey = multiplyBasePointWithScalars(@[privateKey]).expect(
"multiply base point with scalar error"
)
check publicKey == derivedPublicKey
test "commutativity":
let
x1 = generateRandomFieldElement().expect("generate random field element error")
x2 = generateRandomFieldElement().expect("generate random field element error")
res1 = multiplyBasePointWithScalars(@[x1, x2]).expect(
"multiply base point with scalar errors"
)
res2 = multiplyBasePointWithScalars(@[x2, x1]).expect(
"multiply base point with scalar errors"
)
res3 = multiplyPointWithScalars(public(x2), @[x1])
res4 = multiplyPointWithScalars(public(x1), @[x2])
check:
res1 == res2
res1 == res3
res1 == res4

View File

@@ -0,0 +1,101 @@
{.used.}
import results, unittest
import ../../libp2p/peerid
import ../../libp2p/protocols/mix/[serialization, fragmentation]
suite "Fragmentation":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
test "serialize and deserialize message chunk":
let
message = newSeq[byte](DataSize)
chunks = padAndChunkMessage(message, peerId)
serialized = chunks[0].serialize()
deserialized =
MessageChunk.deserialize(serialized).expect("Deserialization error")
check chunks[0] == deserialized
test "pad and unpad small message":
let
message = cast[seq[byte]]("Hello, World!")
messageBytesLen = len(message)
paddedMsg = addPadding(message, peerId)
unpaddedMessage = removePadding(paddedMsg).expect("Unpad error")
let (paddingLength, data, _) = paddedMsg.get()
check:
paddingLength == uint16(DataSize - messageBytesLen)
data.len == DataSize
unpaddedMessage.len == messageBytesLen
test "pad and chunk large message":
let
message = newSeq[byte](MessageSize * 2 + (MessageSize - 1))
messageBytesLen = len(message)
chunks = padAndChunkMessage(message, peerId)
totalChunks = max(1, ceilDiv(messageBytesLen, DataSize))
check chunks.len == totalChunks
for i in 0 ..< totalChunks:
let (paddingLength, data, _) = chunks[i].get()
if i != totalChunks - 1:
check paddingLength == 0
else:
let chunkSize = messageBytesLen mod DataSize
check paddingLength == uint16(DataSize - chunkSize)
check data.len == DataSize
test "chunk sequence numbers are consecutive":
let
message = newSeq[byte](MessageSize * 3)
messageBytesLen = len(message)
chunks = padAndChunkMessage(message, peerId)
totalChunks = max(1, ceilDiv(messageBytesLen, DataSize))
check chunks.len == totalChunks
let (_, _, firstSeqNo) = chunks[0].get()
for i in 1 ..< totalChunks:
let (_, _, seqNo) = chunks[i].get()
check seqNo == firstSeqNo + uint32(i)
test "chunk data reconstructs original message":
let
message = cast[seq[byte]]("This is a test message that will be split into multiple chunks.")
chunks = padAndChunkMessage(message, peerId)
var reconstructed: seq[byte]
for chunk in chunks:
let (paddingLength, data, _) = chunk.get()
reconstructed.add(data[paddingLength.int ..^ 1])
check reconstructed == message
test "empty message handling":
let
message = cast[seq[byte]]("")
chunks = padAndChunkMessage(message, peerId)
check chunks.len == 1
let (paddingLength, _, _) = chunks[0].get()
check paddingLength == uint16(DataSize)
test "message size equal to chunk size":
let
message = newSeq[byte](DataSize)
chunks = padAndChunkMessage(message, peerId)
check chunks.len == 1
let (paddingLength, _, _) = chunks[0].get()
check paddingLength == 0

View File

@@ -0,0 +1,34 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/mix_message
import stew/byteutils
# Define test cases
suite "mix_message_tests":
test "serialize_and_deserialize_mix_message":
let
message = "Hello World!"
codec = "/test/codec/1.0.0"
mixMsg = MixMessage.init(message.toBytes(), codec)
let serialized = mixMsg.serialize()
let deserializedMsg =
MixMessage.deserialize(serialized).expect("deserialization failed")
check:
message == string.fromBytes(deserializedMsg.message)
codec == deserializedMsg.codec
test "serialize_empty_mix_message":
let
emptyMessage = ""
codec = "/test/codec/1.0.0"
mixMsg = MixMessage.init(emptyMessage.toBytes(), codec)
let serialized = mixMsg.serialize()
let dMixMsg = MixMessage.deserialize(serialized).expect("deserialization failed")
check:
emptyMessage == string.fromBytes(dMixMsg.message)
codec == dMixMsg.codec

84
tests/mix/testmixnode.nim Normal file
View File

@@ -0,0 +1,84 @@
{.used.}
import strformat, results, unittest2
import ../../libp2p/[crypto/crypto, crypto/secp, multiaddress, multicodec, peerid]
import ../../libp2p/protocols/mix/[curve25519, mix_node]
suite "Mix Node Tests":
var mixNodes {.threadvar.}: MixNodes
setup:
const count = 5
let mixNodes = initializeMixNodes(count).expect("could not generate mix nodes")
check:
mixNodes.len == count
teardown:
deleteNodeInfoFolder()
deletePubInfoFolder()
test "get mix node by index":
for i in 0 ..< count:
let node = mixNodes[i]
let
(peerId, multiAddr, mixPubKey, mixPrivKey, libp2pPubKey, libp2pPrivKey) =
node.get()
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: libp2pPubKey)
expectedPeerId = PeerId.init(pubKeyProto).get()
expectedMA = MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{4242 + i}").tryGet()
check:
peerId == expectedPeerId
multiAddr == expectedMA
fieldElementToBytes(mixPubKey).len == FieldElementSize
fieldElementToBytes(mixPrivKey).len == FieldElementSize
libp2pPubKey.getBytes().len == SkRawPublicKeySize
libp2pPrivKey.getBytes().len == SkRawPrivateKeySize
test "find mixnode by peerid":
for i in 0 ..< count:
let
node = mixNodes[i]
foundNode = mixNodes.findByPeerId(node.peerId).expect("find mix node error")
check:
foundNode == node
test "invalid_peer_id_lookup":
let peerId = PeerId.random().expect("could not generate peerId")
check:
mixNodes.findByPeerId(peerId).isErr()
test "write and read mixnodeinfo":
for i in 0 ..< count:
let node = mixNodes[i]
node.writeToFile(i).expect("File write error")
let readNode = MixNodeInfo.readFromFile(i).expect("Read node error")
check:
readNode == node
test "write and read mixpubinfo":
for i in 0 ..< count:
let mixPubInfo =
mixNodes.getMixPubInfoByIndex(i).expect("couldnt obtain mixpubinfo")
mixPubInfo.writeToFile(i).expect("File write error")
let readNode = MixPubInfo.readFromFile(i).expect("File read error")
check:
mixPubInfo == readNode
test "read nonexistent mixnodeinfo":
check:
MixNodeInfo.readFromFile(999).isErr()
test "generate mix nodes with different ports":
const count = 3
let basePort = 5000
let mixNodes =
initializeMixNodes(count, basePort).expect("could not generate mix nodes")
for i in 0 ..< count:
let tcpPort = mixNodes[i].multiAddr.getPart(multiCodec("tcp")).value()
let expectedPort = MultiAddress.init(fmt"/tcp/{basePort + i}").tryGet()
check:
tcpPort == expectedPort

View File

@@ -0,0 +1,57 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/[serialization, multiaddr]
import ../../libp2p/[peerid, multiaddress]
template maddr(ma: string): MultiAddress =
MultiAddress.init(ma).tryGet()
proc maddrConversionShouldFail(ma: string, msg: string) =
test msg:
let peerId = PeerId.random().expect("could not generate peerId")
let ma = MultiAddress.init(ma).expect("could not initialize multiaddr")
check:
multiAddrToBytes(peerId, ma).isErr
suite "Utils tests":
test "multiaddress conversion":
let multiAddrs = [
"/ip4/0.0.0.0/tcp/4242", "/ip4/10.0.0.1/tcp/1234",
"/ip4/192.168.1.1/udp/8080/quic-v1",
"/ip4/10.0.0.1/tcp/1234/p2p/16Uiu2HAmDHw4mwBdEjxjJPhrt8Eq1kvDjXAuwkqCmhNiz363AFV2/p2p-circuit",
"/ip4/10.0.0.1/udp/1234/quic-v1/p2p/16Uiu2HAmDHw4mwBdEjxjJPhrt8Eq1kvDjXAuwkqCmhNiz363AFV2/p2p-circuit",
]
for multiAddr in multiAddrs:
let
ma = maddr(multiAddr)
peerId = PeerId.random().expect("could not generate peerId")
multiAddrBytes = multiAddrToBytes(peerId, ma).expect("conversion failed")
check multiAddrBytes.len == AddrSize
let (dPeerId, deserializedMa) =
bytesToMultiAddr(multiAddrBytes).expect("conversion failed")
check:
deserializedMa == ma
dPeerId == peerId
maddrConversionShouldFail("/ip4/0.0.0.0/tcp/4242/quic-v1/", "invalid protocol")
maddrConversionShouldFail("/ip4/0.0.0.0", "invalid multiaddress format")
maddrConversionShouldFail(
"/ip4/0.0.0.0/tcp/4242/p2p-circuit", "invalid multiaddress format circuit relay"
)
maddrConversionShouldFail(
"/ip4/0.0.0.0/tcp/4242/p2p/QmcycySVeRSftFQGM392xCqDh6UUbhSU9ykNpxrFBPX3gJ/p2p-circuit",
"invalid peerId in circuit relay addr",
)
test "invalid address length":
let invalidBytes = newSeq[byte](AddrSize - 1)
check:
bytesToMultiAddr(invalidBytes).isErr

View File

@@ -0,0 +1,94 @@
{.used.}
import chronicles, sets, unittest
import std/[os, times]
import ../../libp2p/peerid
include ../../libp2p/protocols/mix/seqno_generator
const second = 1000
suite "Sequence Number Generator":
test "init_seq_no_from_peer_id":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
seqNo = SeqNo.init(peerId)
check seqNo != 0
test "generate_seq_nos_for_different_messages":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
msg1 = @[byte 1, 2, 3]
msg2 = @[byte 4, 5, 6]
var seqNo = SeqNo.init(peerId)
seqNo.generate(msg1)
let seqNo1 = seqNo
seqNo.generate(msg2)
let seqNo2 = seqNo
check seqNo1 != seqNo2
test "generate_seq_nos_for_same_message":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
msg = @[byte 1, 2, 3]
var seqNo = SeqNo.init(peerId)
seqNo.generate(msg)
let seqNo1 = seqNo
sleep(second)
seqNo.generate(msg)
let seqNo2 = seqNo
check seqNo1 != seqNo2
test "generate_seq_nos_for_different_peer_ids":
let
peerId1 =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
peerId2 =
PeerId.init("16Uiu2HAm6WNzw8AssyPscYYi8x1bY5wXyQrGTShRH75bh5dPCjBQ").get()
var
seqNo1 = SeqNo.init(peerId1)
seqNo2 = SeqNo.init(peerId2)
check seqNo1 != seqNo2
test "increment_seq_no":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
var seqNo: SeqNo = SeqNo.init(peerId)
let initialCounter = seqNo
seqNo.inc()
check seqNo == initialCounter + 1
test "seq_no_wraps_around_at_max_value":
var seqNo = high(uint32) - 1
seqNo.inc()
check seqNo == 0
test "generate_seq_no_uses_entire_uint32_range":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
var
seqNo = SeqNo.init(peerId)
seenValues = initHashSet[uint32]()
for i in 0 ..< 10000:
seqNo.generate(@[i.uint8])
seenValues.incl(seqNo)
check seenValues.len > 9000

View File

@@ -0,0 +1,71 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/serialization
# Define test cases
suite "serialization_tests":
test "serialize_and_deserialize_header":
let header = Header.init(
newSeq[byte](AlphaSize), newSeq[byte](BetaSize), newSeq[byte](GammaSize)
)
let serialized = header.serialize()
check serialized.len() == HeaderSize
test "serialize_and_deserialize_message":
let message = Message(newSeq[byte](MessageSize))
let serialized = message.serialize()
let deserialized =
Message.deserialize(serialized).expect("Failed to deserialize message")
check message == deserialized
test "serialize_and_deserialize_hop":
let hop = Hop.init(newSeq[byte](AddrSize))
let serialized = hop.serialize()
let deserialized = Hop.deserialize(serialized).expect("Failed to deserialize hop")
check hop.get() == deserialized.get()
test "serialize_and_deserialize_routing_info":
let routingInfo = RoutingInfo.init(
Hop.init(newSeq[byte](AddrSize)),
newSeq[byte](DelaySize),
newSeq[byte](GammaSize),
newSeq[byte](((r * (t + 1)) - t) * k),
)
let serialized = routingInfo.serialize()
let suffixLength = (t + 1) * k
let suffix = newSeq[byte](suffixLength)
let deserialized = RoutingInfo.deserialize(serialized & suffix).expect(
"Failed to deserialize routing info"
)
let
(hop, delay, gamma, beta) = getRoutingInfo(routingInfo)
(dHop, dDelay, dGamma, dBeta) = getRoutingInfo(deserialized)
check:
hop.get() == dHop.get()
delay == dDelay
gamma == dGamma
beta == dBeta[0 .. (((r * (t + 1)) - t) * k) - 1]
test "serialize_and_deserialize_sphinx_packet":
let
header = Header.init(
newSeq[byte](AlphaSize), newSeq[byte](BetaSize), newSeq[byte](GammaSize)
)
payload = newSeq[byte](PayloadSize)
packet = SphinxPacket.init(header, payload)
let serialized = packet.serialize()
let deserializedSP =
SphinxPacket.deserialize(serialized).expect("Failed to deserialize sphinx packet")
check:
header.Alpha == deserializedSP.Hdr.Alpha
header.Beta == deserializedSP.Hdr.Beta
header.Gamma == deserializedSP.Hdr.Gamma
payload == deserializedSP.Payload

335
tests/mix/testsphinx.nim Normal file
View File

@@ -0,0 +1,335 @@
{.used.}
import random, results, unittest, chronicles
import ../../libp2p/crypto/crypto
import ../../libp2p/protocols/mix/[curve25519, serialization, sphinx, tag_manager]
import bearssl/rand
# Helper function to pad/truncate message
proc addPadding(message: openArray[byte], size: int): seq[byte] =
if message.len >= size:
return message[0 .. size - 1] # Truncate if larger
else:
result = @message
let paddingLength = size - message.len
result.add(newSeq[byte](paddingLength)) # Pad with zeros
# Helper function to create dummy data
proc createDummyData(): (
Message, seq[FieldElement], seq[FieldElement], seq[seq[byte]], seq[Hop], Hop
) =
let (privateKey1, publicKey1) = generateKeyPair().expect("generate keypair error")
let (privateKey2, publicKey2) = generateKeyPair().expect("generate keypair error")
let (privateKey3, publicKey3) = generateKeyPair().expect("generate keypair error")
let
privateKeys = @[privateKey1, privateKey2, privateKey3]
publicKeys = @[publicKey1, publicKey2, publicKey3]
delay = @[newSeq[byte](DelaySize), newSeq[byte](DelaySize), newSeq[byte](DelaySize)]
hops =
@[
Hop.init(newSeq[byte](AddrSize)),
Hop.init(newSeq[byte](AddrSize)),
Hop.init(newSeq[byte](AddrSize)),
]
message = newSeq[byte](MessageSize)
dest = Hop.init(newSeq[byte](AddrSize))
return (message, privateKeys, publicKeys, delay, hops, dest)
template randomI(): SURBIdentifier =
newRng()[].generate(SURBIdentifier)
# Unit tests for sphinx.nim
suite "Sphinx Tests":
var tm: TagManager
setup:
tm = TagManager.new()
teardown:
clearTags(tm)
test "sphinx wrap and process":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"sphinx wrap error"
)
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm).expect(
"Sphinx processing error"
)
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm).expect(
"Sphinx processing error"
)
check:
processedSP3.status == Exit
processedSP3.messageChunk == message
test "sphinx wrap empty public keys":
let (message, _, _, delay, _, dest) = createDummyData()
check wrapInSphinxPacket(message, @[], delay, @[], dest).isErr
test "sphinx_process_invalid_mac":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"Sphinx wrap error"
)
check packetBytes.len == PacketSize
# Corrupt the MAC for testing
var tamperedPacketBytes = packetBytes
tamperedPacketBytes[0] = packetBytes[0] xor 0x01
let tamperedPacket =
SphinxPacket.deserialize(tamperedPacketBytes).expect("Sphinx wrap error")
let invalidMacPkt = processSphinxPacket(tamperedPacket, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check invalidMacPkt.status == InvalidMAC
test "sphinx process duplicate tag":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"Sphinx wrap error"
)
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
# Process the packet twice to test duplicate tag handling
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP1.status == Intermediate
let processedSP2 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP2.status == Duplicate
test "sphinx wrap and process message sizes":
let MessageSizes = @[32, 64, 128, 256, 512]
for size in MessageSizes:
let (_, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
var message = newSeq[byte](size)
randomize()
for i in 0 ..< size:
message[i] = byte(rand(256))
let paddedMessage = addPadding(message, MessageSize)
let packetBytes = wrapInSphinxPacket(paddedMessage, publicKeys, delay, hops, dest)
.expect("Sphinx wrap error")
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(packet, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm)
.expect("Sphinx processing error")
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm)
.expect("Error in Sphinx processing")
check:
processedSP3.status == Exit
processedSP3.messageChunk == paddedMessage
test "create and use surb":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm).expect(
"Sphinx processing error"
)
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm).expect(
"Sphinx processing error"
)
check processedSP3.status == Reply
let msg = processReply(surb.key, surb.secret.get(), processedSP3.delta_prime).expect(
"Reply processing failed"
)
check msg == message
test "create surb empty public keys":
let (message, _, _, delay, _, _) = createDummyData()
check createSURB(@[], delay, @[], randomI()).isErr()
test "surb sphinx process invalid mac":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
# Corrupt the MAC for testing
var tamperedPacketBytes = packetBytes
tamperedPacketBytes[0] = packetBytes[0] xor 0x01
let tamperedPacket =
SphinxPacket.deserialize(tamperedPacketBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(tamperedPacket, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check processedSP1.status == InvalidMAC
test "surb sphinx process duplicate tag":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
# Process the packet twice to test duplicate tag handling
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP1.status == Intermediate
let processedSP2 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP2.status == Duplicate
test "create and use surb message sizes":
let messageSizes = @[32, 64, 128, 256, 512]
for size in messageSizes:
let (_, privateKeys, publicKeys, delay, hops, _) = createDummyData()
var message = newSeq[byte](size)
randomize()
for i in 0 ..< size:
message[i] = byte(rand(256))
let paddedMessage = addPadding(message, MessageSize)
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, Message(paddedMessage)).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(packet, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm)
.expect("Sphinx processing error")
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm)
.expect("Sphinx processing error")
check processedSP3.status == Reply
let msg = processReply(surb.key, surb.secret.get(), processedSP3.delta_prime)
.expect("Reply processing failed")
check paddedMessage == msg

View File

@@ -0,0 +1,61 @@
{.used.}
import chronicles, results, unittest
import ../../libp2p/protocols/mix/[curve25519, tag_manager]
suite "tag_manager_tests":
var tm: TagManager
setup:
tm = TagManager.new()
teardown:
tm.clearTags()
test "add_and_check_tag":
let
tag = generateRandomFieldElement().expect("should generate FE")
nonexistentTag = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag)
check:
tm.isTagSeen(tag)
not tm.isTagSeen(nonexistentTag)
test "remove_tag":
let tag = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag)
check tm.isTagSeen(tag)
tm.removeTag(tag)
check not tm.isTagSeen(tag)
test "check_tag_presence":
let tag = generateRandomFieldElement().expect("should generate FE")
check not tm.isTagSeen(tag)
tm.addTag(tag)
check tm.isTagSeen(tag)
tm.removeTag(tag)
check not tm.isTagSeen(tag)
test "handle_multiple_tags":
let tag1 = generateRandomFieldElement().expect("should generate FE")
let tag2 = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag1)
tm.addTag(tag2)
check:
tm.isTagSeen(tag1)
tm.isTagSeen(tag2)
tm.removeTag(tag1)
tm.removeTag(tag2)
check:
not tm.isTagSeen(tag1)
not tm.isTagSeen(tag2)

View File

@@ -311,7 +311,8 @@ suite "GossipSub":
check gossipSub.mcache.msgs.len == 0
asyncTest "rpcHandler - subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let gossipSub =
TestGossipSub.init(newStandardSwitch(transport = TransportType.QUIC))
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
@@ -333,7 +334,8 @@ suite "GossipSub":
await conn.close()
asyncTest "rpcHandler - invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let gossipSub =
TestGossipSub.init(newStandardSwitch(transport = TransportType.QUIC))
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)

View File

@@ -87,7 +87,7 @@ proc setupGossipSubWithPeers*(
populateMesh: bool = false,
populateFanout: bool = false,
): (TestGossipSub, seq[Connection], seq[PubSubPeer]) =
let gossipSub = TestGossipSub.init(newStandardSwitch())
let gossipSub = TestGossipSub.init(newStandardSwitch(transport = TransportType.QUIC))
for topic in topics:
gossipSub.subscribe(topic, voidTopicHandler)

View File

@@ -1,3 +1,3 @@
{.used.}
import testnative, ./pubsub/testpubsub, testinterop, testdaemon
import testnative, ./pubsub/testpubsub

View File

@@ -9,7 +9,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/options
import std/options, net
import chronos
import
../libp2p/[
@@ -18,10 +18,36 @@ import
upgrademngrs/upgrade,
builders,
protocols/connectivity/autonatv2/types,
protocols/connectivity/autonatv2/server,
protocols/connectivity/autonatv2/utils,
protocols/connectivity/autonatv2/client,
protocols/connectivity/autonatv2/mockserver,
],
./helpers
proc setupAutonat(
srcAddrs: seq[MultiAddress] = newSeq[MultiAddress](),
config: AutonatV2Config = AutonatV2Config.new(),
): Future[(Switch, Switch, AutonatV2Client)] {.async.} =
let
src = newStandardSwitchBuilder(addrs = srcAddrs).build()
dst = newStandardSwitchBuilder().withAutonatV2Server(config = config).build()
client = AutonatV2Client.new(newRng())
client.setup(src)
src.mount(client)
await src.start()
await dst.start()
await src.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
(src, dst, client)
proc checkedGetIPAddress(): string =
try:
$getPrimaryIPAddr()
except Exception:
""
proc checkEncodeDecode[T](msg: T) =
# this would be equivalent of doing the following (e.g. for DialBack)
# check msg == DialBack.decode(msg.encode()).get()
@@ -149,3 +175,146 @@ suite "AutonatV2":
AutonatV2Response(
reachability: Reachable, dialResp: correctDialResp, addrs: Opt.some(addrs[0])
)
asyncTest "Instantiate server":
let serverSwitch = newStandardSwitchBuilder().withAutonatV2Server().build()
await serverSwitch.start()
await serverSwitch.stop()
asyncTest "Instantiate server with config":
let serverSwitch = newStandardSwitchBuilder()
.withAutonatV2Server(config = AutonatV2Config.new(allowPrivateAddresses = true))
.build()
await serverSwitch.start()
await serverSwitch.stop()
asyncTest "Successful DialRequest":
let (src, dst, client) =
await setupAutonat(config = AutonatV2Config.new(allowPrivateAddresses = true))
defer:
await allFutures(src.stop(), dst.stop())
check (await client.sendDialRequest(dst.peerInfo.peerId, src.peerInfo.addrs)) ==
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
addrs: Opt.some(src.peerInfo.addrs[0]),
)
asyncTest "Successful DialRequest with amplification attack prevention":
# use ip address other than 127.0.0.1 for client
let
listenAddrs =
@[
MultiAddress.init("/ip4/" & checkedGetIPAddress() & "/tcp/4040").get(),
MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get(),
]
reqAddrs = @[listenAddrs[0]]
(src, dst, client) = await setupAutonat(srcAddrs = listenAddrs)
defer:
await allFutures(src.stop(), dst.stop())
check (await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)) ==
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
addrs: Opt.some(src.peerInfo.addrs[0]),
)
asyncTest "Failed DialRequest":
let (src, dst, client) =
await setupAutonat(config = AutonatV2Config.new(dialTimeout = 1.seconds))
defer:
await allFutures(src.stop(), dst.stop())
check (
await client.sendDialRequest(
dst.peerInfo.peerId, @[MultiAddress.init("/ip4/1.1.1.1/tcp/4040").get()]
)
) ==
AutonatV2Response(
reachability: NotReachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
addrs: Opt.some(MultiAddress.init("/ip4/1.1.1.1/tcp/4040").get()),
)
asyncTest "Failed DialRequest with amplification attack prevention":
# use ip address other than 127.0.0.1 for client
let
listenAddrs =
@[
MultiAddress.init("/ip4/" & checkedGetIPAddress() & "/tcp/4040").get(),
MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get(),
]
reqAddrs = @[MultiAddress.init("/ip4/1.1.1.1/tcp/4040").get()]
(src, dst, client) = await setupAutonat(
srcAddrs = listenAddrs, config = AutonatV2Config.new(dialTimeout = 1.seconds)
)
defer:
await allFutures(src.stop(), dst.stop())
check (await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)) ==
AutonatV2Response(
reachability: NotReachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
addrs: Opt.some(reqAddrs[0]),
)
asyncTest "Server responding with invalid messages":
let
src = newStandardSwitchBuilder().build()
dst = newStandardSwitchBuilder().build()
client = AutonatV2Client.new(newRng())
autonatV2Mock = AutonatV2Mock.new()
reqAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()]
client.setup(src)
dst.mount(autonatV2Mock)
src.mount(client)
await src.start()
await dst.start()
defer:
await allFutures(src.stop(), dst.stop())
await src.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
# 1. invalid autonatv2msg
autonatV2Mock.response = DialBackResponse(status: DialBackStatus.Ok).encode()
expect(AutonatV2Error):
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)
# 2. msg that is not DialResponse or DialDataRequest
autonatV2Mock.response = AutonatV2Msg(
msgType: MsgType.DialRequest, dialReq: DialRequest(addrs: @[], nonce: 0)
).encode()
expect(AutonatV2Error):
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)
# 3. invalid addrIdx (e.g. 1000 when only 1 is present)
autonatV2Mock.response = AutonatV2Msg(
msgType: MsgType.DialResponse,
dialResp: DialResponse(
status: ResponseStatus.Ok,
addrIdx: Opt.some(1000.AddrIdx),
dialStatus: Opt.some(DialStatus.Ok),
),
).encode()
expect(AutonatV2Error):
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)

View File

@@ -0,0 +1,528 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/sequtils
import chronos, metrics
import unittest2
import
../libp2p/[
builders,
switch,
protocols/connectivity/autonatv2/types,
protocols/connectivity/autonatv2/service,
protocols/connectivity/autonatv2/mockclient,
]
import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ./helpers
proc createSwitch(
autonatSvc: Service = nil,
withAutonat = true,
maxConnsPerPeer = 1,
maxConns = 100,
nameResolver: NameResolver = nil,
): Switch =
var builder = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()], false)
.withTcpTransport()
.withMaxConnsPerPeer(maxConnsPerPeer)
.withMaxConnections(maxConns)
.withYamux()
.withNoise()
if withAutonat:
builder = builder.withAutonatV2()
if autonatSvc != nil:
builder = builder.withServices(@[autonatSvc])
if nameResolver != nil:
builder = builder.withNameResolver(nameResolver)
return builder.build()
proc createSwitches(n: int): seq[Switch] =
var switches: seq[Switch]
for i in 0 ..< n:
switches.add(createSwitch())
switches
proc startAll(switches: seq[Switch]) {.async.} =
await allFuturesThrowing(switches.mapIt(it.start()))
proc stopAll(switches: seq[Switch]) {.async.} =
await allFuturesThrowing(switches.mapIt(it.stop()))
proc startAndConnect(switch: Switch, switches: seq[Switch]) {.async.} =
await switch.start()
for peer in switches:
await peer.start()
await switch.connect(peer.peerInfo.peerId, peer.peerInfo.addrs)
proc newService(
reachability: NetworkReachability,
expectedDials = 3,
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
): (AutonatV2Service, AutonatV2ClientMock) =
let client =
case reachability
of Reachable:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
of NotReachable:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: NotReachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
of Unknown:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: Unknown,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
(AutonatV2Service.new(newRng(), client = client, config = config), client)
suite "AutonatV2 Service":
teardown:
checkTrackers()
asyncTest "Reachability unknown before starting switch":
let
(service, client) = newService(NetworkReachability.Reachable)
switch = createSwitch(service)
check service.networkReachability == NetworkReachability.Unknown
asyncTest "Peer must be reachable":
let
(service, client) = newService(NetworkReachability.Reachable)
switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be not reachable":
let
(service, client) = newService(NetworkReachability.NotReachable)
switch = createSwitch(service)
var switches = createSwitches(3)
await switch.startAndConnect(switches)
await client.finished
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be not reachable and then reachable":
let (service, client) = newService(
NetworkReachability.NotReachable,
expectedDials = 6,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
client.response = AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
# addrs: Opt.none(MultiAddress), # this will be inferred from sendDialRequest
)
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await client.finished
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be reachable when one connected peer has autonat disabled":
let (service, client) = newService(
NetworkReachability.Reachable,
expectedDials = 3,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 2
),
)
let switch = createSwitch(service)
var switches = createSwitches(2)
switches.add(createSwitch(withAutonat = false))
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await switch.stop()
await switches.stopAll()
asyncTest "Unknown answers must be ignored":
let (service, client) = newService(
NetworkReachability.NotReachable,
expectedDials = 6,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
client.response = AutonatV2Response(
reachability: Unknown,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
)
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await client.finished
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Calling setup and stop twice must work":
let (service, client) = newService(
NetworkReachability.NotReachable,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch()
check (await service.setup(switch)) == true
check (await service.setup(switch)) == false
check (await service.stop(switch)) == true
check (await service.stop(switch)) == false
await switch.stop()
asyncTest "Must bypass maxConnectionsPerPeer limit":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 1
),
)
let switch1 = createSwitch(service, maxConnsPerPeer = 0)
let switch2 =
createSwitch(maxConnsPerPeer = 0, nameResolver = MockResolver.default())
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch1.start()
switch1.peerInfo.addrs.add(
[
MultiAddress.init("/dns4/localhost/").tryGet() &
switch1.peerInfo.addrs[0][1].tryGet()
]
)
await switch2.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop())
asyncTest "Must work when peers ask each other at the same time with max 1 conn per peer":
let
(service1, client1) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service2, client2) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service3, client3) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
switch1 = createSwitch(service1, maxConnsPerPeer = 0)
switch2 = createSwitch(service2, maxConnsPerPeer = 0)
switch3 = createSwitch(service2, maxConnsPerPeer = 0)
awaiter1 = newFuture[void]()
awaiter2 = newFuture[void]()
awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
service1.setStatusAndConfidenceHandler(statusAndConfidenceHandler1)
service2.setStatusAndConfidenceHandler(statusAndConfidenceHandler2)
await switch1.start()
await switch2.start()
await switch3.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs)
await switch2.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
await awaiter1
await awaiter2
check service1.networkReachability == NetworkReachability.Reachable
check service2.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop())
asyncTest "Must work for one peer when two peers ask each other at the same time with max 1 conn per peer":
let
(service1, client1) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service2, client2) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
let switch1 = createSwitch(service1, maxConnsPerPeer = 0)
let switch2 = createSwitch(service2, maxConnsPerPeer = 0)
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
service1.setStatusAndConfidenceHandler(statusAndConfidenceHandler1)
await switch1.start()
await switch2.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
try:
# We allow a temp conn for the peer to dial us. It could use this conn to just connect to us and not dial.
# We don't care if it fails at this point or not. But this conn must be closed eventually.
# Bellow we check that there's only one connection between the peers
await switch2.connect(
switch1.peerInfo.peerId, switch1.peerInfo.addrs, reuseConnection = false
)
except CatchableError:
discard
await awaiter1
check service1.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
# Make sure remote peer can't create a connection to us
check switch1.connManager.connCount(switch2.peerInfo.peerId) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop())
asyncTest "Must work with low maxConnections":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 1
),
)
let switch = createSwitch(service, maxConns = 4)
var switches = createSwitches(4)
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.start()
await switches.startAll()
await switch.connect(switches[0].peerInfo.peerId, switches[0].peerInfo.addrs)
await awaiter
await switch.connect(switches[2].peerInfo.peerId, switches[2].peerInfo.addrs)
await switch.connect(switches[3].peerInfo.peerId, switches[3].peerInfo.addrs)
await switches[3].connect(switch.peerInfo.peerId, switch.peerInfo.addrs)
# switch1 is now full, should stick to last observation
awaiter = newFuture[void]()
await service.run(switch)
await sleepAsync(100.millis)
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await switch.stop()
await switches.stopAll()
asyncTest "Peer must not ask an incoming peer":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch1 = createSwitch(service)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
fail()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch1.start()
await switch2.start()
await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs)
await sleepAsync(250.milliseconds)
await allFuturesThrowing(switch1.stop(), switch2.stop())

View File

@@ -31,6 +31,8 @@ import ./helpers
when defined(linux) and defined(amd64):
{.used.}
import ../libp2p/utils/ipaddr
suite "AutoTLS Integration":
asyncTeardown:
checkTrackers()

View File

@@ -1,123 +0,0 @@
import chronos, unittest2, helpers
import
../libp2p/daemon/daemonapi,
../libp2p/multiaddress,
../libp2p/multicodec,
../libp2p/cid,
../libp2p/multihash,
../libp2p/peerid
when defined(nimHasUsed):
{.used.}
proc identitySpawnTest(): Future[bool] {.async.} =
var api = await newDaemonApi()
var data = await api.identity()
await api.close()
result = true
proc connectStreamTest(): Future[bool] {.async.} =
var api1 = await newDaemonApi()
var api2 = await newDaemonApi()
var id1 = await api1.identity()
var id2 = await api2.identity()
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc streamHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
var line = await stream.transp.readLine()
testFuture.complete(line)
await api2.addHandler(protos, streamHandler)
await api1.connect(id2.peer, id2.addresses)
# echo await api1.listPeers()
var stream = await api1.openStream(id2.peer, protos)
let sent = await stream.transp.write(test & "\r\n")
doAssert(sent == len(test) + 2)
doAssert((await wait(testFuture, 10.seconds)) == test)
await stream.close()
await api1.close()
await api2.close()
result = true
proc pubsubTest(f: set[P2PDaemonFlags]): Future[bool] {.async.} =
var pubsubData = "TEST MESSAGE"
var msgData = cast[seq[byte]](pubsubData)
var api1, api2: DaemonAPI
api1 = await newDaemonApi(f)
api2 = await newDaemonApi(f)
var id1 = await api1.identity()
var id2 = await api2.identity()
var resultsCount = 0
var handlerFuture1 = newFuture[void]()
var handlerFuture2 = newFuture[void]()
proc pubsubHandler1(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = cast[string](message.data)
if smsg == pubsubData:
inc(resultsCount)
handlerFuture1.complete()
# Callback must return `false` to close subscription channel.
result = false
proc pubsubHandler2(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = cast[string](message.data)
if smsg == pubsubData:
inc(resultsCount)
handlerFuture2.complete()
# Callback must return `false` to close subscription channel.
result = false
await api1.connect(id2.peer, id2.addresses)
await api2.connect(id1.peer, id1.addresses)
var ticket1 = await api1.pubsubSubscribe("test-topic", pubsubHandler1)
var ticket2 = await api2.pubsubSubscribe("test-topic", pubsubHandler2)
await sleepAsync(1.seconds)
var topics1 = await api1.pubsubGetTopics()
var topics2 = await api2.pubsubGetTopics()
if len(topics1) == 1 and len(topics2) == 1:
var peers1 = await api1.pubsubListPeers("test-topic")
var peers2 = await api2.pubsubListPeers("test-topic")
if len(peers1) == 1 and len(peers2) == 1:
# Publish test data via api1.
await sleepAsync(250.milliseconds)
await api1.pubsubPublish("test-topic", msgData)
var res =
await one(allFutures(handlerFuture1, handlerFuture2), sleepAsync(5.seconds))
await api1.close()
await api2.close()
if resultsCount == 2:
result = true
suite "libp2p-daemon test suite":
test "Simple spawn and get identity test":
check:
waitFor(identitySpawnTest()) == true
test "Connect/Accept peer/stream test":
check:
waitFor(connectStreamTest()) == true
asyncTest "GossipSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSGossipSub}))
asyncTest "FloodSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSFloodSub}))

View File

@@ -1,58 +0,0 @@
{.used.}
import helpers, commoninterop
import ../libp2p
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/relay
proc switchMplexCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =
SwitchBuilder
.new()
.withSignedPeerRecord(false)
.withMaxConnections(MaxConnections)
.withRng(crypto.newRng())
.withAddresses(@[ma])
.withMaxIn(-1)
.withMaxOut(-1)
.withTransport(prov)
.withMplex()
.withMaxConnsPerPeer(MaxConnectionsPerPeer)
.withPeerStore(capacity = 1000)
.withNoise()
.withCircuitRelay(relay)
.withNameResolver(nil)
.build()
proc switchYamuxCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =
SwitchBuilder
.new()
.withSignedPeerRecord(false)
.withMaxConnections(MaxConnections)
.withRng(crypto.newRng())
.withAddresses(@[ma])
.withMaxIn(-1)
.withMaxOut(-1)
.withTransport(prov)
.withYamux()
.withMaxConnsPerPeer(MaxConnectionsPerPeer)
.withPeerStore(capacity = 1000)
.withNoise()
.withCircuitRelay(relay)
.withNameResolver(nil)
.build()
suite "Tests interop":
commonInteropTests("mplex", switchMplexCreator)
relayInteropTests("mplex", switchMplexCreator)
commonInteropTests("yamux", switchYamuxCreator)
relayInteropTests("yamux", switchYamuxCreator)

View File

@@ -123,7 +123,7 @@ const
"/ip4/127.0.0.1/ipfs", "/ip4/127.0.0.1/ipfs/tcp", "/p2p-circuit/50",
]
PathVectors = ["/unix/tmp/p2pd.sock", "/unix/a/b/c/d/e/f/g/h/i.sock"]
PathVectors = ["/unix/a/b/c/d/e/f/g/h/i.sock"]
PathExpects = [
"90030E2F746D702F703270642E736F636B",

View File

@@ -31,9 +31,9 @@ import
testnameresolve, testmultistream, testbufferstream, testidentify,
testobservedaddrmanager, testconnmngr, testswitch, testnoise, testpeerinfo,
testpeerstore, testping, testmplex, testrelayv1, testrelayv2, testyamux,
testyamuxheader, testautonat, testautonatservice, testautonatv2, testautorelay,
testdcutr, testhpservice, testutility, testhelpers, testwildcardresolverservice,
testperf
testyamuxheader, testautonat, testautonatservice, testautonatv2, testautonatv2service,
testautorelay, testdcutr, testhpservice, testutility, testhelpers,
testwildcardresolverservice, testperf
import discovery/testdiscovery
@@ -41,3 +41,9 @@ import kademlia/[testencoding, testroutingtable, testfindnode, testputval]
when defined(libp2p_autotls_support):
import testautotls
import
mix/[
testcrypto, testcurve25519, testtagmanager, testseqnogenerator, testserialization,
testmixmessage, testsphinx, testmultiaddr, testfragmentation, testmixnode,
]

View File

@@ -60,15 +60,19 @@ proc invalidCertGenerator(
except ResultError[crypto.CryptoError]:
raiseAssert "private key should be set"
proc createTransport(withInvalidCert: bool = false): Future[QuicTransport] {.async.} =
let ma = @[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
proc createTransport(
isServer: bool = false, withInvalidCert: bool = false
): Future[QuicTransport] {.async.} =
let privateKey = PrivateKey.random(ECDSA, (newRng())[]).tryGet()
let trans =
if withInvalidCert:
QuicTransport.new(Upgrade(), privateKey, invalidCertGenerator)
else:
QuicTransport.new(Upgrade(), privateKey)
await trans.start(ma)
if isServer: # servers are started because they need to listen
let ma = @[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
await trans.start(ma)
return trans
@@ -77,12 +81,46 @@ suite "Quic transport":
checkTrackers()
asyncTest "can handle local address":
let trans = await createTransport()
check trans.handles(trans.addrs[0])
await trans.stop()
let server = await createTransport(isServer = true)
check server.handles(server.addrs[0])
await server.stop()
asyncTest "handle accept cancellation":
let server = await createTransport(isServer = true)
let acceptFut = server.accept()
await acceptFut.cancelAndWait()
check acceptFut.cancelled
await server.stop()
asyncTest "handle dial cancellation":
let server = await createTransport(isServer = true)
let client = await createTransport(isServer = false)
let connFut = client.dial(server.addrs[0])
await connFut.cancelAndWait()
check connFut.cancelled
await client.stop()
await server.stop()
asyncTest "stopping transport kills connections":
let server = await createTransport(isServer = true)
let client = await createTransport(isServer = false)
let acceptFut = server.accept()
let conn = await client.dial(server.addrs[0])
let serverConn = await acceptFut
await client.stop()
await server.stop()
check serverConn.closed()
check conn.closed()
asyncTest "transport e2e":
let server = await createTransport()
let server = await createTransport(isServer = true)
asyncSpawn createServerAcceptConn(server)()
defer:
await server.stop()
@@ -101,7 +139,7 @@ suite "Quic transport":
await runClient()
asyncTest "transport e2e - invalid cert - server":
let server = await createTransport(true)
let server = await createTransport(isServer = true, withInvalidCert = true)
asyncSpawn createServerAcceptConn(server)()
defer:
await server.stop()
@@ -115,22 +153,27 @@ suite "Quic transport":
await runClient()
asyncTest "transport e2e - invalid cert - client":
let server = await createTransport()
let server = await createTransport(isServer = true)
asyncSpawn createServerAcceptConn(server)()
defer:
await server.stop()
proc runClient() {.async.} =
let client = await createTransport(true)
let client = await createTransport(withInvalidCert = true)
expect QuicTransportDialError:
discard await client.dial("", server.addrs[0])
await client.stop()
await runClient()
asyncTest "should allow multiple local addresses":
# TODO(#1663): handle multiple addr
# See test example in commonTransportTest
return
asyncTest "server not accepting":
let server = await createTransport()
# itentionally not calling createServerAcceptConn as server should not accept
let server = await createTransport(isServer = true)
# intentionally not calling createServerAcceptConn as server should not accept
defer:
await server.stop()
@@ -145,7 +188,7 @@ suite "Quic transport":
await runClient()
asyncTest "closing session should close all streams":
let server = await createTransport()
let server = await createTransport(isServer = true)
# because some clients will not write full message,
# it is expected for server to receive eof
asyncSpawn createServerAcceptConn(server, true)()
@@ -201,7 +244,7 @@ suite "Quic transport":
check (await stream.readLp(100)) == fromHex("5678")
await client.stop()
let server = await createTransport()
let server = await createTransport(isServer = true)
asyncSpawn serverHandler(server)
defer:
await server.stop()

View File

@@ -28,7 +28,6 @@ import
builders,
upgrademngrs/upgrade,
varint,
daemon/daemonapi,
]
import ./helpers

View File

@@ -25,6 +25,7 @@ import
builders,
protocols/ping,
wire,
utils/ipaddr,
]
from ./helpers import suite, asyncTest, asyncTeardown, checkTrackers, skip, check