Compare commits

...

22 Commits

Author SHA1 Message Date
Radosław Kamiński
5f6b8e86a5 test(rendezvous): error cases (#1683) 2025-09-22 15:53:47 +01:00
richΛrd
11b98b7a3f fix: add missing import (#1707) 2025-09-22 13:38:32 +00:00
richΛrd
647f76341e feat(mix): mix_protocol and entry connection (#1703) 2025-09-22 13:09:18 +00:00
Radosław Kamiński
fbf96bb2ce chore(readme): Update chat code example link (#1709) 2025-09-22 13:28:03 +01:00
richΛrd
f0aaecb743 feat(mix): mixnode (#1702) 2025-09-21 16:46:48 +00:00
richΛrd
8d3076ea99 fix: add missing import for linux/amd64 daily job (#1706) 2025-09-20 18:39:12 -04:00
richΛrd
70b7d61436 feat(mix): SURBs and fragmentation (#1700) 2025-09-19 15:56:26 -04:00
Gabriel Cruz
37bae0986c chore: remove go daemon (#1705) 2025-09-19 15:21:23 -04:00
Gabriel Cruz
b34ddab10c chore(autonat-v2): add interop tests (#1695) 2025-09-18 14:30:56 +00:00
richΛrd
e09457da12 feat(mix): sphinx (#1691) 2025-09-18 09:04:44 -04:00
vladopajic
94ad1dcbc8 chore: nimble config tidy (#1696) 2025-09-17 18:10:07 +00:00
Gabriel Cruz
5b9f2cba6f fix: autonatV2 request addresses (#1698) 2025-09-17 13:38:03 -03:00
richΛrd
59e7069c15 feat: v1.13.0 (#1673) 2025-09-17 08:18:53 -04:00
richΛrd
18a0e9c2d1 feat(mix): message (#1690) 2025-09-16 10:49:49 -04:00
richΛrd
34a9a03b73 feat(mix): serialization (#1689) 2025-09-16 14:12:19 +00:00
Gabriel Cruz
788109b4f4 fix(autonat-v2): service setting up correctly (#1694) 2025-09-16 09:10:12 +00:00
vladopajic
44aab92b3e chore(quic): better error handling in stream.write() (#1693) 2025-09-16 08:35:57 +00:00
richΛrd
ad0812b40b feat(mix): sequence number generator and tag manager (#1688) 2025-09-15 16:49:34 -03:00
richΛrd
0751f240a2 feat(mix): crypto (#1687) 2025-09-15 16:46:28 +00:00
richΛrd
d8ecf8a135 chore: add missing import (#1692) 2025-09-15 10:04:07 -04:00
Gabriel Cruz
bab863859c chore: add autonatv2 service to builder (#1686) 2025-09-12 21:23:03 +02:00
Gabriel Cruz
73d04def6f feat: add autonat v2 service (#1684) 2025-09-12 08:26:33 -04:00
72 changed files with 4961 additions and 3118 deletions

View File

@@ -72,15 +72,6 @@ jobs:
shell: ${{ matrix.shell }}
nim_ref: ${{ matrix.nim.ref }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3

View File

@@ -69,16 +69,6 @@ jobs:
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |

View File

@@ -60,3 +60,30 @@ jobs:
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-autonatv2-interop:
name: Run AutoNATv2 interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.25"
- name: Set up Nim
uses: jiro4989/setup-nim-action@v1
with:
nim-version: "stable"
- name: Run Go and Nim together
run: |
nimble install
cd interop/autonatv2/go-peer
git clone https://github.com/libp2p/go-libp2p
cd go-libp2p
git apply ../disable-filtering-of-private-ip-addresses.patch
cd ..
go run testautonatv2.go &
cd ../nim-peer
nim r src/nim_peer.nim $(cat ../go-peer/peer.id)

View File

@@ -47,7 +47,7 @@ nimble install libp2p
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
Try out the chat example. Full code can be found [here](https://github.com/vacp2p/nim-libp2p/blob/master/examples/directchat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
@@ -81,12 +81,6 @@ Run unit tests:
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
@@ -155,8 +149,6 @@ List of packages modules implemented in nim-libp2p:
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |

View File

@@ -5,12 +5,13 @@ if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warningAsError", "UseBase:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--styleCheck:
usages
switch("warningAsError", "UseBase:on")
--styleCheck:
error
--mm:
@@ -23,7 +24,7 @@ if defined(windows) and not defined(vcc):
--define:
nimRawSetjmp
# begin Nimble config (version 1)
when fileExists("nimble.paths"):
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -1,54 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
import ../hexdump
const PubSubTopic = "test-net"
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
var peers = await api.pubsubListPeers(PubSubTopic)
echo "= List of connected and subscribed peers:"
for item in peers:
echo item.pretty()
proc dumpAllPeers(api: DaemonAPI) {.async.} =
var peers = await api.listPeers()
echo "Current connected peers count = ", len(peers)
for item in peers:
echo item.peer.pretty()
proc monitor(api: DaemonAPI) {.async.} =
while true:
echo "Dumping all peers"
await dumpAllPeers(api)
await sleepAsync(5000)
proc main() {.async.} =
echo "= Starting P2P bootnode"
var api = await newDaemonApi({DHTFull, PSGossipSub})
var id = await api.identity()
echo "= P2P bootnode ", id.peer.pretty(), " started."
let mcip4 = multiCodec("ip4")
let mcip6 = multiCodec("ip6")
echo "= You can use one of this addresses to bootstrap your nodes:"
for item in id.addresses:
if item.protoCode() == mcip4 or item.protoCode() == mcip6:
echo $item & "/ipfs/" & id.peer.pretty()
asyncSpawn monitor(api)
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty()
echo dumpHex(message.data)
await api.dumpSubscribedPeers()
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
when isMainModule:
waitFor(main())
while true:
poll()

View File

@@ -1,132 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
## nim c -r --threads:on chat.nim
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
const ServerProtocols = @["/test-chat-stream"]
type CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
proc threadMain(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
## pipe to main thread.
var transp = fromPipe(wfd)
while true:
var line = stdin.readLine()
let res = waitFor transp.write(line & "\r\n")
proc serveThread(udata: CustomData) {.async.} =
## This procedure perform reading on pipe and sends data to remote clients.
var transp = fromPipe(udata.consoleFd)
proc remoteReader(transp: StreamTransport) {.async.} =
while true:
var line = await transp.readLine()
if len(line) == 0:
break
echo ">> ", line
while true:
try:
var line = await transp.readLine()
if line.startsWith("/connect"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
var address = MultiAddress.init(multiCodec("p2p-circuit"))
address &= MultiAddress.init(multiCodec("p2p"), peerId)
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
echo "= Connecting to peer ", $address
await udata.api.connect(peerId, @[address], 30)
echo "= Opening stream to peer chat ", parts[1]
var stream = await udata.api.openStream(peerId, ServerProtocols)
udata.remotes.add(stream.transp)
echo "= Connected to peer chat ", parts[1]
asyncSpawn remoteReader(stream.transp)
elif line.startsWith("/search"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
elif line.startsWith("/consearch"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peers connected to peer ", parts[1]
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
echo "= Found ", len(peers), " connected to peer ", parts[1]
for item in peers:
var peer = item.peer
var addresses = newSeq[string]()
var relay = false
for a in item.addresses:
addresses.add($a)
if a.protoName() == "/p2p-circuit":
relay = true
break
if relay:
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
else:
echo peer.pretty(), " [", addresses.join(", "), "]"
elif line.startsWith("/exit"):
break
else:
var msg = line & "\r\n"
echo "<< ", line
var pending = newSeq[Future[int]]()
for item in udata.remotes:
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData
data.remotes = newSeq[StreamTransport]()
var (rfd, wfd) = createAsyncPipe()
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
raise newException(ValueError, "Could not initialize pipe!")
data.consoleFd = rfd
data.serveFut = serveThread(data)
var thread: Thread[AsyncFD]
thread.createThread(threadMain, wfd)
echo "= Starting P2P node"
data.api = await newDaemonApi({DHTFull, Bootstrap})
await sleepAsync(3000)
var id = await data.api.identity()
proc streamHandler(api: DaemonAPI, stream: P2PStream) {.async.} =
echo "= Peer ", stream.peer.pretty(), " joined chat"
data.remotes.add(stream.transp)
while true:
var line = await stream.transp.readLine()
if len(line) == 0:
break
echo ">> ", line
await data.api.addHandler(ServerProtocols, streamHandler)
echo "= Your PeerId is ", id.peer.pretty()
await data.serveFut
when isMainModule:
waitFor(main())

View File

@@ -1,43 +0,0 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Run the build script while having the `go` command pointing to the correct Go version.
```sh
./scripts/build_p2pd.sh
```
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)

View File

@@ -1,46 +0,0 @@
import chronos, nimcrypto, strutils, os
import ../../libp2p/daemon/daemonapi
const PubSubTopic = "test-net"
proc main(bn: string) {.async.} =
echo "= Starting P2P node"
var bootnodes = bn.split(",")
var api = await newDaemonApi(
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
)
var id = await api.identity()
echo "= P2P node ", id.peer.pretty(), " started:"
for item in id.addresses:
echo item
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty(), ": "
var strdata = cast[string](message.data)
echo strdata
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
# Waiting for gossipsub interval
while true:
var peers = await api.pubsubListPeers(PubSubTopic)
if len(peers) > 0:
break
await sleepAsync(1000)
var data = "HELLO\r\n"
var msgData = cast[seq[byte]](data)
await api.pubsubPublish(PubSubTopic, msgData)
when isMainModule:
if paramCount() != 1:
echo "Please supply bootnodes!"
else:
waitFor(main(paramStr(1)))
while true:
poll()

View File

@@ -0,0 +1,76 @@
From 29bac4cd8f28abfb9efb481d800b7c2e855d9b03 Mon Sep 17 00:00:00 2001
From: Gabriel Cruz <gabe@gmelodie.com>
Date: Wed, 17 Sep 2025 10:42:14 -0300
Subject: [PATCH] disable filtering of private ip addresses
---
p2p/protocol/autonatv2/autonat.go | 24 +-----------------------
p2p/protocol/autonatv2/server.go | 9 ++++++---
2 files changed, 7 insertions(+), 26 deletions(-)
diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go
index 24883052..00a6211f 100644
--- a/p2p/protocol/autonatv2/autonat.go
+++ b/p2p/protocol/autonatv2/autonat.go
@@ -16,7 +16,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
logging "github.com/libp2p/go-libp2p/gologshim"
ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
)
const (
@@ -180,21 +179,7 @@ func (an *AutoNAT) Close() {
// GetReachability makes a single dial request for checking reachability for requested addresses
func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) {
var filteredReqs []Request
- if !an.allowPrivateAddrs {
- filteredReqs = make([]Request, 0, len(reqs))
- for _, r := range reqs {
- if manet.IsPublicAddr(r.Addr) {
- filteredReqs = append(filteredReqs, r)
- } else {
- log.Error("private address in reachability check", "address", r.Addr)
- }
- }
- if len(filteredReqs) == 0 {
- return Result{}, ErrPrivateAddrs
- }
- } else {
- filteredReqs = reqs
- }
+ filteredReqs = reqs
an.mx.Lock()
now := time.Now()
var p peer.ID
@@ -215,13 +200,6 @@ func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result,
log.Debug("reachability check failed", "peer", p, "err", err)
return res, fmt.Errorf("reachability check with %s failed: %w", p, err)
}
- // restore the correct index in case we'd filtered private addresses
- for i, r := range reqs {
- if r.Addr.Equal(res.Addr) {
- res.Idx = i
- break
- }
- }
log.Debug("reachability check successful", "peer", p)
return res, nil
}
diff --git a/p2p/protocol/autonatv2/server.go b/p2p/protocol/autonatv2/server.go
index 167d3d8e..e6d1e492 100644
--- a/p2p/protocol/autonatv2/server.go
+++ b/p2p/protocol/autonatv2/server.go
@@ -196,9 +197,6 @@ func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted {
if err != nil {
continue
}
- if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) {
- continue
- }
if !as.dialerHost.Network().CanDial(p, a) {
continue
}
--
2.51.0

View File

@@ -0,0 +1,97 @@
module go-peer
go 1.25.1
require github.com/libp2p/go-libp2p v0.43.0
replace github.com/libp2p/go-libp2p => ./go-libp2p
require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.16.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

View File

@@ -0,0 +1,441 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@@ -0,0 +1 @@
12D3KooWSnUDxXeeEnerD1Wf35R5b8bjTMzdAz838aDUUY8GJPGa

View File

@@ -0,0 +1,2 @@
@i
(>%ËÁø‡®PM”ܘXE~§|# õ“ýºØ®ü\íÇØ¬åsqzïÔDSݺvöLË(± Úð…•(×

View File

@@ -0,0 +1,97 @@
package main
import (
"crypto/rand"
"fmt"
"io/ioutil"
"log"
"os"
libp2p "github.com/libp2p/go-libp2p"
crypto "github.com/libp2p/go-libp2p/core/crypto"
peer "github.com/libp2p/go-libp2p/core/peer"
)
const (
privKeyFile = "peer.key"
peerIDFile = "peer.id"
)
func loadOrCreateIdentity() (crypto.PrivKey, peer.ID, error) {
if _, err := os.Stat(privKeyFile); err == nil {
// Load private key
data, err := ioutil.ReadFile(privKeyFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read private key: %w", err)
}
priv, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
return nil, "", fmt.Errorf("failed to unmarshal private key: %w", err)
}
// Load peer ID as string
peerData, err := ioutil.ReadFile(peerIDFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read peer ID: %w", err)
}
pid, err := peer.Decode(string(peerData))
if err != nil {
return nil, "", fmt.Errorf("failed to decode peer ID: %w", err)
}
return priv, pid, nil
}
// Create new keypair
priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
if err != nil {
return nil, "", fmt.Errorf("failed to generate keypair: %w", err)
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
return nil, "", fmt.Errorf("failed to derive peer ID: %w", err)
}
// Save private key
privBytes, err := crypto.MarshalPrivateKey(priv)
if err != nil {
return nil, "", fmt.Errorf("failed to marshal private key: %w", err)
}
if err := ioutil.WriteFile(privKeyFile, privBytes, 0600); err != nil {
return nil, "", fmt.Errorf("failed to write private key: %w", err)
}
// Save peer ID in canonical string form
if err := ioutil.WriteFile(peerIDFile, []byte(pid.String()), 0644); err != nil {
return nil, "", fmt.Errorf("failed to write peer ID: %w", err)
}
return priv, pid, nil
}
func main() {
priv, pid, err := loadOrCreateIdentity()
if err != nil {
log.Fatalf("Identity setup failed: %v", err)
}
h, err := libp2p.New(
libp2p.Identity(priv),
libp2p.EnableAutoNATv2(),
libp2p.ListenAddrStrings(
"/ip4/0.0.0.0/tcp/4040",
"/ip6/::/tcp/4040",
),
)
if err != nil {
log.Fatalf("Failed to create host: %v", err)
}
defer h.Close()
fmt.Println("Peer ID:", pid.String())
fmt.Println("Listen addresses:", h.Addrs())
fmt.Println("AutoNATv2 client started.")
select {}
}

View File

@@ -0,0 +1,4 @@
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -0,0 +1,10 @@
version = "0.1.0"
author = "Status Research & Development Gmb"
description = "AutoNATv2 peer for interop testing"
license = "MIT"
srcDir = "src"
bin = @["nim_peer"]
# Dependencies
requires "nim >= 2.3.1", "libp2p"

View File

@@ -0,0 +1,64 @@
import net, os, chronos, libp2p
import libp2p/protocols/connectivity/autonatv2/service
import libp2p/protocols/connectivity/autonatv2/types
proc waitForService(
host: string, port: Port, retries: int = 20, delay: Duration = 500.milliseconds
): Future[bool] {.async.} =
for i in 0 ..< retries:
try:
var s = newSocket()
s.connect(host, port)
s.close()
return true
except OSError:
discard
await sleepAsync(delay)
return false
proc main() {.async.} =
if paramCount() != 1:
quit("Usage: nim r src/nim_peer.nim <peerid>", 1)
# ensure go peer is started
await sleepAsync(3.seconds)
let dstPeerId = PeerId.init(paramStr(1)).get()
var src = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/3030").tryGet()])
.withAutonatV2Server()
.withAutonatV2(
serviceConfig = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds))
)
.withTcpTransport()
.withYamux()
.withNoise()
.build()
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability != NetworkReachability.Unknown and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
let service = cast[AutonatV2Service](src.services[1])
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await src.start()
await src.connect(dstPeerId, @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()])
await awaiter
echo service.networkReachability
when isMainModule:
if waitFor(waitForService("127.0.0.1", Port(4040))):
waitFor(main())
else:
quit("timeout waiting for service", 1)

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.12.0"
version = "1.13.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -49,12 +49,6 @@ proc tutorialToMd(filename: string) =
task testnative, "Runs libp2p native tests":
runTest("testnative")
task testdaemon, "Runs daemon tests":
runTest("testdaemon")
task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")

View File

@@ -29,6 +29,7 @@ import
protocols/connectivity/[
autonat/server,
autonatv2/server,
autonatv2/service,
autonatv2/client,
relay/relay,
relay/client,
@@ -82,7 +83,8 @@ type
peerStoreCapacity: Opt[int]
autonat: bool
autonatV2ServerConfig: Opt[AutonatV2Config]
autonatV2Client: bool
autonatV2Client: AutonatV2Client
autonatV2ServiceConfig: AutonatV2ServiceConfig
autotls: AutotlsService
circuitRelay: Relay
rdv: RendezVous
@@ -295,8 +297,11 @@ proc withAutonatV2Server*(
b.autonatV2ServerConfig = Opt.some(config)
b
proc withAutonatV2*(b: SwitchBuilder): SwitchBuilder =
b.autonatV2Client = true
proc withAutonatV2*(
b: SwitchBuilder, serviceConfig = AutonatV2ServiceConfig.new()
): SwitchBuilder =
b.autonatV2Client = AutonatV2Client.new(b.rng)
b.autonatV2ServiceConfig = serviceConfig
b
when defined(libp2p_autotls_support):
@@ -385,6 +390,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if b.enableWildcardResolver:
b.services.insert(WildcardAddressResolverService.new(), 0)
if not isNil(b.autonatV2Client):
b.services.add(
AutonatV2Service.new(
b.rng, client = b.autonatV2Client, config = b.autonatV2ServiceConfig
)
)
let switch = newSwitch(
peerInfo = peerInfo,
transports = transports,
@@ -398,9 +410,9 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
switch.mount(identify)
if b.autonatV2Client:
let autonatV2Client = AutonatV2Client.new(switch.dialer, b.rng)
switch.mount(autonatV2Client)
if not isNil(b.autonatV2Client):
b.autonatV2Client.setup(switch)
switch.mount(b.autonatV2Client)
b.autonatV2ServerConfig.withValue(config):
switch.mount(AutonatV2.new(switch, config = config))

File diff suppressed because it is too large Load Diff

View File

@@ -1,156 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos
const DefaultPoolSize* = 8 ## Default pool size
type
ConnectionFlags = enum
None
Busy
PoolItem = object
transp*: StreamTransport
flags*: set[ConnectionFlags]
PoolState = enum
Connecting
Connected
Closing
Closed
TransportPool* = ref object ## Transports pool object
transports: seq[PoolItem]
busyCount: int
state: PoolState
bufferSize: int
event: AsyncEvent
TransportPoolError* = object of AsyncError
proc waitAll[T](futs: seq[Future[T]]): Future[void] =
## Performs waiting for all Future[T].
var counter = len(futs)
var retFuture = newFuture[void]("connpool.waitAllConnections")
proc cb(udata: pointer) =
dec(counter)
if counter == 0:
retFuture.complete()
for fut in futs:
fut.addCallback(cb)
return retFuture
proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
pool.bufferSize = bufferSize
pool.transports = newSeq[PoolItem](poolsize)
var conns = newSeq[Future[StreamTransport]](poolsize)
pool.state = Connecting
for i in 0 ..< poolsize:
conns[i] = connect(address, bufferSize)
# Waiting for all connections to be established.
await waitAll(conns)
# Checking connections and preparing pool.
for i in 0 ..< poolsize:
if conns[i].failed:
raise conns[i].error
else:
let transp = conns[i].read()
let item = PoolItem(transp: transp)
pool.transports[i] = item
# Setup available connections event
pool.event = newAsyncEvent()
pool.state = Connected
result = pool
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
while true:
if pool.busyCount < len(pool.transports):
for conn in pool.transports.mitems():
if Busy notin conn.flags:
conn.flags.incl(Busy)
inc(pool.busyCount)
transp = conn.transp
break
else:
await pool.event.wait()
pool.event.clear()
if not isNil(transp):
break
else:
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
for conn in pool.transports.mitems():
if conn.transp == transp:
conn.flags.excl(Busy)
dec(pool.busyCount)
pool.event.fire()
found = true
break
if not found:
raise newException(TransportPoolError, "Transport not bound to pool!")
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
if pool.busyCount == 0:
break
else:
await pool.event.wait()
pool.event.clear()
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing
# Waiting for all transports to become available.
await pool.join()
# Closing all transports
var pending = newSeq[Future[void]](len(pool.transports))
for i in 0 ..< len(pool.transports):
let transp = pool.transports[i].transp
transp.close()
pending[i] = transp.join()
# Waiting for all transports to be closed
await waitAll(pending)
# Mark pool as `Closed`.
pool.state = Closed

View File

@@ -93,6 +93,10 @@ proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
parseFullAddress(?MultiAddress.init(ma))
proc toFullAddress*(peerId: PeerId, ma: MultiAddress): MaResult[MultiAddress] =
let peerIdPart = ?MultiAddress.init(multiCodec("p2p"), peerId.data)
concat(ma, peerIdPart)
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,

View File

@@ -58,6 +58,9 @@ type
NotReachable
Reachable
proc isReachable*(self: NetworkReachability): bool =
self == NetworkReachability.Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
p.id.withValue(id):

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import results
import chronos, chronicles
import chronos, chronicles, tables
import
../../protocol,
../../../switch,
@@ -58,11 +58,10 @@ proc handleDialBack(
proc new*(
T: typedesc[AutonatV2Client],
dialer: Dial,
rng: ref HmacDrbgContext,
dialBackTimeout: Duration = DefaultDialBackTimeout,
): T =
let client = T(dialer: dialer, rng: rng, dialBackTimeout: dialBackTimeout)
let client = T(rng: rng, dialBackTimeout: dialBackTimeout)
# handler for DialBack messages
proc handleStream(
@@ -87,6 +86,9 @@ proc new*(
client.codec = $AutonatV2Codec.DialBack
client
proc setup*(self: AutonatV2Client, switch: Switch) =
self.dialer = switch.dialer
proc handleDialDataRequest*(
conn: Connection, req: DialDataRequest
): Future[DialResponse] {.
@@ -143,22 +145,20 @@ proc checkAddrIdx(
return false
true
proc sendDialRequest*(
self: AutonatV2Client,
pid: PeerId,
peerAddrs: seq[MultiAddress],
testAddrs: seq[MultiAddress],
method sendDialRequest*(
self: AutonatV2Client, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
base,
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
## Dials peer with `pid` at `peerAddrs` and requests that it tries connecting to `testAddrs`
## Dials peer with `pid` and requests that it tries connecting to `testAddrs`
let nonce = self.rng[].generate(Nonce)
self.expectedNonces[nonce] = Opt.none(MultiAddress)
var dialResp: DialResponse
try:
let conn = await self.dialer.dial(pid, peerAddrs, @[$AutonatV2Codec.DialRequest])
let conn = await self.dialer.dial(pid, @[$AutonatV2Codec.DialRequest])
defer:
await conn.close()

View File

@@ -0,0 +1,45 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos
import ../../../peerid, ../../../multiaddress, ../../../switch
import ./client, ./types
type AutonatV2ClientMock* = ref object of AutonatV2Client
response*: AutonatV2Response
dials*: int
expectedDials: int
finished*: Future[void]
proc new*(
T: typedesc[AutonatV2ClientMock], response: AutonatV2Response, expectedDials: int
): T =
return T(
dials: 0,
expectedDials: expectedDials,
finished: newFuture[void](),
response: response,
)
method sendDialRequest*(
self: AutonatV2ClientMock, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
self.dials += 1
if self.dials == self.expectedDials:
self.finished.complete()
var ans = self.response
ans.dialResp.addrIdx.withValue(addrIdx):
ans.addrs = Opt.some(testAddrs[addrIdx])
ans

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/
[
switch,
muxers/muxer,
dialer,
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
],
../../protocol,
./types,
./server
type AutonatV2Mock* = ref object of LPProtocol
config*: AutonatV2Config
response*: ProtoBuffer
proc new*(
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
): T =
let autonatV2 = T(config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
try:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
return
if msg.msgType != MsgType.DialRequest:
return
except LPStreamError:
return
try:
# return mocked message
await conn.writeLp(autonatV2.response.buffer)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError:
discard
except LPStreamError:
discard
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,278 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[deques, sequtils]
import chronos, chronicles, metrics, results
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
../../../wire,
../../../utils/heartbeat,
../../../crypto/crypto,
../autonat/types,
./types,
./client
declarePublicGauge(
libp2p_autonat_v2_reachability_confidence,
"autonat v2 reachability confidence",
labels = ["reachability"],
)
logScope:
topics = "libp2p autonatv2 service"
# needed because nim 2.0 can't do proper type assertions
const noneDuration: Opt[Duration] = Opt.none(Duration)
type
AutonatV2ServiceConfig* = object
scheduleInterval: Opt[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
minConfidence: float
enableAddressMapper: bool
AutonatV2Service* = ref object of Service
config*: AutonatV2ServiceConfig
confidence: Opt[float]
newConnectedPeerHandler: PeerEventHandler
statusAndConfidenceHandler: StatusAndConfidenceHandler
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability*: NetworkReachability
answers: Deque[NetworkReachability]
client*: AutonatV2Client
rng: ref HmacDrbgContext
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatV2ServiceConfig],
scheduleInterval: Opt[Duration] = noneDuration,
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
enableAddressMapper = true,
): T =
return T(
scheduleInterval: scheduleInterval,
askNewConnectedPeers: askNewConnectedPeers,
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
enableAddressMapper: enableAddressMapper,
)
proc new*(
T: typedesc[AutonatV2Service],
rng: ref HmacDrbgContext,
client: AutonatV2Client = AutonatV2Client.new(),
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
): T =
return T(
config: config,
confidence: Opt.none(float),
networkReachability: Unknown,
answers: initDeque[NetworkReachability](),
client: client,
rng: rng,
)
proc callHandler(self: AutonatV2Service) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
proc hasEnoughIncomingSlots(switch: Switch): bool =
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
return switch.connManager.slotsAvailable(In) >= 2
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(
self: AutonatV2Service, ans: NetworkReachability
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
let oldNetworkReachability = self.networkReachability
let oldConfidence = self.confidence
if self.answers.len == self.config.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
self.confidence = Opt.none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.config.maxQueueSize
libp2p_autonat_v2_reachability_confidence.set(
value = confidence, labelValues = [$reachability]
)
if self.confidence.isNone and confidence >= self.config.minConfidence:
self.networkReachability = reachability
self.confidence = Opt.some(confidence)
debug "Current status",
currentStats = $self.networkReachability,
confidence = $self.confidence,
answers = self.answers
# Return whether anything has changed
return
self.networkReachability != oldNetworkReachability or
self.confidence != oldConfidence
proc askPeer(
self: AutonatV2Service, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
if doesPeerHaveIncomingConn(switch, peerId):
return Unknown
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peer",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
return Unknown
trace "Asking peer for reachability"
let ans =
try:
let reqAddrs = switch.peerInfo.addrs
let autonatV2Resp = await self.client.sendDialRequest(peerId, reqAddrs)
debug "AutonatV2Response", autonatV2Resp = autonatV2Resp
autonatV2Resp.reachability
except CancelledError as exc:
raise exc
except LPStreamError as exc:
debug "DialRequest stream error", description = exc.msg
Unknown
except DialFailedError as exc:
debug "DialRequest dial failed", description = exc.msg
Unknown
except AutonatV2Error as exc:
debug "DialRequest error", description = exc.msg
Unknown
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
if hasReachabilityOrConfidenceChanged:
await self.callHandler()
await switch.peerInfo.update()
return ans
proc askConnectedPeers(
self: AutonatV2Service, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
var answersFromPeers = 0
for peer in peers:
if answersFromPeers >= self.config.numPeersToAsk:
break
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peers",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
break
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(
service: AutonatV2Service, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatV2Service run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatV2Service, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if not self.networkReachability.isReachable():
return listenAddrs
var addrs = newSeq[MultiAddress]()
for listenAddr in listenAddrs:
if listenAddr.isPublicMA() or not self.networkReachability.isReachable():
addrs.add(listenAddr)
else:
addrs.add(peerStore.guessDialableAddr(listenAddr))
return addrs
method setup*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
trace "Setting up AutonatV2Service"
let hasBeenSetup = await procCall Service(self).setup(switch)
if not hasBeenSetup:
return hasBeenSetup
if self.config.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.config.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(
self: AutonatV2Service, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatV2Service"
await askConnectedPeers(self, switch)
method stop*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
trace "Stopping AutonatV2Service"
let hasBeenStopped = await procCall Service(self).stop(switch)
if not hasBeenStopped:
return hasBeenStopped
if not isNil(self.scheduleHandle):
self.scheduleHandle.cancelSoon()
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc setStatusAndConfidenceHandler*(
self: AutonatV2Service, statusAndConfidenceHandler: StatusAndConfidenceHandler
) =
self.statusAndConfidenceHandler = statusAndConfidenceHandler

View File

@@ -14,6 +14,8 @@ import
../../../multiaddress, ../../../peerid, ../../../protobuf/minprotobuf, ../../../switch
from ../autonat/types import NetworkReachability
export NetworkReachability
const
DefaultDialTimeout*: Duration = 15.seconds
DefaultAmplificationAttackDialTimeout*: Duration = 3.seconds

View File

@@ -9,7 +9,6 @@ import
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
../autonat/service,
./types
proc asNetworkReachability*(self: DialResponse): NetworkReachability =

View File

@@ -0,0 +1,53 @@
import endians, nimcrypto
proc aes_ctr*(key, iv, data: openArray[byte]): seq[byte] =
## Processes 'data' using AES in CTR mode.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
var
ctx: CTR[aes128]
output = newSeq[byte](data.len)
ctx.init(key, iv)
ctx.encrypt(data, output)
ctx.clear()
output
proc advance_ctr*(iv: var openArray[byte], blocks: uint64) =
## Advances the counter in the AES-CTR IV by a specified number of blocks.
var counter: uint64
bigEndian64(addr counter, addr iv[8])
counter += blocks
bigEndian64(addr iv[8], addr counter)
proc aes_ctr_start_index*(key, iv, data: openArray[byte], startIndex: int): seq[byte] =
## Encrypts 'data' using AES in CTR mode from startIndex, without processing all preceding data.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
doAssert startIndex mod 16 == 0, "Start index must be a multiple of 16"
var advIV = @iv
# Advance the counter to the start index
let blocksToAdvance = startIndex div 16
advance_ctr(advIV, blocksToAdvance.uint64)
return aes_ctr(key, advIV, data)
proc sha256_hash*(data: openArray[byte]): array[32, byte] =
## hashes 'data' using SHA-256.
return sha256.digest(data).data
proc kdf*(key: openArray[byte]): seq[byte] =
## Returns the hash of 'key' truncated to 16 bytes.
let hash = sha256_hash(key)
return hash[0 .. 15]
proc hmac*(key, data: openArray[byte]): seq[byte] =
## Computes a HMAC for 'data' using given 'key'.
let hmac = sha256.hmac(key, data).data
return hmac[0 .. 15]

View File

@@ -0,0 +1,52 @@
import results
import bearssl/rand
import ../../crypto/curve25519
const FieldElementSize* = Curve25519KeySize
type FieldElement* = Curve25519Key
proc bytesToFieldElement*(bytes: openArray[byte]): Result[FieldElement, string] =
## Convert bytes to FieldElement
if bytes.len != FieldElementSize:
return err("Field element size must be 32 bytes")
ok(intoCurve25519Key(bytes))
proc fieldElementToBytes*(fe: FieldElement): seq[byte] =
## Convert FieldElement to bytes
fe.getBytes()
# Generate a random FieldElement
proc generateRandomFieldElement*(): Result[FieldElement, string] =
let rng = HmacDrbgContext.new()
if rng.isNil:
return err("Failed to create HmacDrbgContext with system randomness")
ok(Curve25519Key.random(rng[]))
# Generate a key pair (private key and public key are both FieldElements)
proc generateKeyPair*(): Result[tuple[privateKey, publicKey: FieldElement], string] =
let privateKey = generateRandomFieldElement().valueOr:
return err("Error in private key generation: " & error)
let publicKey = public(privateKey)
ok((privateKey, publicKey))
proc multiplyPointWithScalars*(
point: FieldElement, scalars: openArray[FieldElement]
): FieldElement =
## Multiply a given Curve25519 point with a set of scalars
var res = point
for scalar in scalars:
Curve25519.mul(res, scalar)
res
proc multiplyBasePointWithScalars*(
scalars: openArray[FieldElement]
): Result[FieldElement, string] =
## Multiply the Curve25519 base point with a set of scalars
if scalars.len <= 0:
return err("Atleast one scalar must be provided")
var res: FieldElement = public(scalars[0]) # Use the predefined base point
for i in 1 ..< scalars.len:
Curve25519.mul(res, scalars[i]) # Multiply with each scalar
ok(res)

View File

@@ -0,0 +1,133 @@
import hashes, chronos, stew/byteutils, results, chronicles
import ../../stream/connection
import ../../varint
import ../../utils/sequninit
import ./mix_protocol
from fragmentation import DataSize
type MixDialer* = proc(
msg: seq[byte], codec: string, destination: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).}
type MixEntryConnection* = ref object of Connection
destination: MixDestination
codec: string
mixDialer: MixDialer
func shortLog*(conn: MixEntryConnection): string =
if conn == nil:
"MixEntryConnection(nil)"
else:
"MixEntryConnection(" & $conn.destination & ")"
chronicles.formatIt(MixEntryConnection):
shortLog(it)
method readOnce*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readExactly*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLine*(
s: MixEntryConnection, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readVarint*(
conn: MixEntryConnection
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLp*(
s: MixEntryConnection, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method write*(
self: MixEntryConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.mixDialer(msg, self.codec, self.destination)
proc write*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.write(msg.toBytes())
method writeLp*(
self: MixEntryConnection, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
if msg.len() > DataSize:
let fut = newFuture[void]()
fut.fail(
newException(LPStreamError, "exceeds max msg size of " & $DataSize & " bytes")
)
return fut
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< buf.len] = msg
self.write(buf)
method writeLp*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.writeLp(msg.toOpenArrayByte(0, msg.high))
proc shortLog*(self: MixEntryConnection): string {.raises: [].} =
"[MixEntryConnection] Destination: " & $self.destination
method closeImpl*(
self: MixEntryConnection
): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
return fut
func hash*(self: MixEntryConnection): Hash =
hash($self.destination)
when defined(libp2p_agents_metrics):
proc setShortAgent*(self: MixEntryConnection, shortAgent: string) =
discard
proc new*(
T: typedesc[MixEntryConnection],
srcMix: MixProtocol,
destination: MixDestination,
codec: string,
): T {.raises: [].} =
var instance = T()
instance.destination = destination
instance.codec = codec
instance.mixDialer = proc(
msg: seq[byte], codec: string, dest: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
await srcMix.anonymizeLocalProtocolSend(
nil, msg, codec, dest, 0 # TODO: set incoming queue for replies and surbs
)
when defined(libp2p_agents_metrics):
instance.shortAgent = connection.shortAgent
instance
proc toConnection*(
srcMix: MixProtocol, destination: MixDestination, codec: string
): Result[Connection, string] {.gcsafe, raises: [].} =
## Create a stream to send and optionally receive responses.
## Under the hood it will wrap the message in a sphinx packet
## and send it via a random mix path.
ok(MixEntryConnection.new(srcMix, destination, codec))

View File

@@ -0,0 +1,36 @@
import chronicles, chronos, metrics
import ../../builders
import ../../stream/connection
import ./mix_metrics
type ExitLayer* = object
switch: Switch
proc init*(T: typedesc[ExitLayer], switch: Switch): T =
ExitLayer(switch: switch)
proc onMessage*(
self: ExitLayer,
codec: string,
message: seq[byte],
destAddr: MultiAddress,
destPeerId: PeerId,
) {.async: (raises: [CancelledError]).} =
# If dialing destination fails, no response is returned to
# the sender, so, flow can just end here. Only log errors
# for now
# https://github.com/vacp2p/mix/issues/86
try:
let destConn = await self.switch.dial(destPeerId, @[destAddr], codec)
defer:
await destConn.close()
await destConn.write(message)
except LPStreamError as exc:
error "Stream error while writing to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "LPSTREAM_ERR"])
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "DIAL_FAILED"])
except CancelledError as exc:
raise exc

View File

@@ -0,0 +1,95 @@
import ./[serialization, seqno_generator]
import results, stew/endians2
import ../../peerid
const PaddingLengthSize* = 2
const SeqNoSize* = 4
const DataSize* = MessageSize - PaddingLengthSize - SeqNoSize
# Unpadding and reassembling messages will be handled by the top-level applications.
# Although padding and splitting messages could also be managed at that level, we
# implement it here to clarify the sender's logic.
# This is crucial as the sender is responsible for wrapping messages in Sphinx packets.
type MessageChunk* = object
paddingLength: uint16
data: seq[byte]
seqNo: uint32
proc init*(
T: typedesc[MessageChunk], paddingLength: uint16, data: seq[byte], seqNo: uint32
): T =
T(paddingLength: paddingLength, data: data, seqNo: seqNo)
proc get*(msgChunk: MessageChunk): (uint16, seq[byte], uint32) =
(msgChunk.paddingLength, msgChunk.data, msgChunk.seqNo)
proc serialize*(msgChunk: MessageChunk): seq[byte] =
let
paddingBytes = msgChunk.paddingLength.toBytesBE()
seqNoBytes = msgChunk.seqNo.toBytesBE()
doAssert msgChunk.data.len == DataSize,
"Padded data must be exactly " & $DataSize & " bytes"
return @paddingBytes & msgChunk.data & @seqNoBytes
proc deserialize*(T: typedesc[MessageChunk], data: openArray[byte]): Result[T, string] =
if data.len != MessageSize:
return err("Data must be exactly " & $MessageSize & " bytes")
let
paddingLength = uint16.fromBytesBE(data[0 .. PaddingLengthSize - 1])
chunk = data[PaddingLengthSize .. (PaddingLengthSize + DataSize - 1)]
seqNo = uint32.fromBytesBE(data[PaddingLengthSize + DataSize ..^ 1])
ok(T(paddingLength: paddingLength, data: chunk, seqNo: seqNo))
proc ceilDiv*(a, b: int): int =
(a + b - 1) div b
proc addPadding*(messageBytes: seq[byte], seqNo: SeqNo): MessageChunk =
## Pads messages smaller than DataSize
let paddingLength = uint16(DataSize - messageBytes.len)
let paddedData =
if paddingLength > 0:
let paddingBytes = newSeq[byte](paddingLength)
paddingBytes & messageBytes
else:
messageBytes
MessageChunk(paddingLength: paddingLength, data: paddedData, seqNo: seqNo)
proc addPadding*(messageBytes: seq[byte], peerId: PeerId): MessageChunk =
## Pads messages smaller than DataSize
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
messageBytes.addPadding(seqNoGen)
proc removePadding*(msgChunk: MessageChunk): Result[seq[byte], string] =
let msgLength = len(msgChunk.data) - int(msgChunk.paddingLength)
if msgLength < 0:
return err("Invalid padding length")
ok(msgChunk.data[msgChunk.paddingLength ..^ 1])
proc padAndChunkMessage*(messageBytes: seq[byte], peerId: PeerId): seq[MessageChunk] =
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
var chunks: seq[MessageChunk] = @[]
# Split to chunks
let totalChunks = max(1, ceilDiv(messageBytes.len, DataSize))
# Ensure at least one chunk is generated
for i in 0 ..< totalChunks:
let
startIdx = i * DataSize
endIdx = min(startIdx + DataSize, messageBytes.len)
chunkData = messageBytes[startIdx .. endIdx - 1]
msgChunk = chunkData.addPadding(seqNoGen)
chunks.add(msgChunk)
seqNoGen.inc()
return chunks

View File

@@ -0,0 +1,47 @@
import chronicles, results
import stew/[byteutils, leb128]
import ../../protobuf/minprotobuf
import ../../utils/sequninit
type MixMessage* = object
message*: seq[byte]
codec*: string
proc init*(T: typedesc[MixMessage], message: openArray[byte], codec: string): T =
return T(message: @message, codec: codec)
proc serialize*(mixMsg: MixMessage): seq[byte] =
let vbytes = toBytes(mixMsg.codec.len.uint64, Leb128)
doAssert vbytes.len <= 2, "serialization failed: codec length exceeds 2 bytes"
var buf = newSeqUninit[byte](vbytes.len + mixMsg.codec.len + mixMsg.message.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< mixMsg.codec.len] = mixMsg.codec.toBytes()
buf[vbytes.len + mixMsg.codec.len ..< buf.len] = mixMsg.message
buf
proc deserialize*(
T: typedesc[MixMessage], data: openArray[byte]
): Result[MixMessage, string] =
if data.len == 0:
return err("deserialization failed: data is empty")
var codecLen: int
var varintLen: int
for i in 0 ..< min(data.len, 2):
let parsed = uint16.fromBytes(data[0 ..< i], Leb128)
if parsed.len < 0 or (i == 1 and parsed.len == 0):
return err("deserialization failed: invalid codec length")
varintLen = parsed.len
codecLen = parsed.val.int
if data.len < varintLen + codecLen:
return err("deserialization failed: not enough data")
ok(
T(
codec: string.fromBytes(data[varintLen ..< varintLen + codecLen]),
message: data[varintLen + codecLen ..< data.len],
)
)

View File

@@ -0,0 +1,13 @@
{.push raises: [].}
import metrics
declarePublicCounter mix_messages_recvd, "number of mix messages received", ["type"]
declarePublicCounter mix_messages_forwarded,
"number of mix messages forwarded", ["type"]
declarePublicCounter mix_messages_error,
"number of mix messages failed processing", ["type", "error"]
declarePublicGauge mix_pool_size, "number of nodes in the pool"

View File

@@ -0,0 +1,318 @@
import os, results, strformat, sugar, sequtils
import std/streams
import ../../crypto/[crypto, curve25519, secp]
import ../../[multiaddress, multicodec, peerid, peerinfo]
import ./[serialization, curve25519, multiaddr]
const MixNodeInfoSize* =
AddrSize + (2 * FieldElementSize) + (SkRawPublicKeySize + SkRawPrivateKeySize)
const MixPubInfoSize* = AddrSize + FieldElementSize + SkRawPublicKeySize
type MixNodeInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
mixPrivKey*: FieldElement
libp2pPubKey*: SkPublicKey
libp2pPrivKey*: SkPrivateKey
proc initMixNodeInfo*(
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey, mixPrivKey: FieldElement,
libp2pPubKey: SkPublicKey,
libp2pPrivKey: SkPrivateKey,
): MixNodeInfo =
MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
proc get*(
info: MixNodeInfo
): (PeerId, MultiAddress, FieldElement, FieldElement, SkPublicKey, SkPrivateKey) =
(
info.peerId, info.multiAddr, info.mixPubKey, info.mixPrivKey, info.libp2pPubKey,
info.libp2pPrivKey,
)
proc serialize*(nodeInfo: MixNodeInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
mixPrivKeyBytes = fieldElementToBytes(nodeInfo.mixPrivKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
libp2pPrivKeyBytes = nodeInfo.libp2pPrivKey.getBytes()
return ok(
addrBytes & mixPubKeyBytes & mixPrivKeyBytes & libp2pPubKeyBytes & libp2pPrivKeyBytes
)
proc deserialize*(T: typedesc[MixNodeInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixNodeInfoSize:
return
err("Serialized Mix node info must be exactly " & $MixNodeInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let mixPrivKey = bytesToFieldElement(
data[(AddrSize + FieldElementSize) .. (AddrSize + (2 * FieldElementSize) - 1)]
).valueOr:
return err("Mix private key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(
data[
AddrSize + (2 * FieldElementSize) ..
AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize - 1
]
).valueOr:
return err("Failed to initialize libp2p public key")
let libp2pPrivKey = SkPrivateKey.init(
data[AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize ..^ 1]
).valueOr:
return err("Failed to initialize libp2p private key")
ok(
T(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
)
proc writeToFile*(
node: MixNodeInfo, index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[void, string] =
if not dirExists(nodeInfoFolderPath):
createDir(nodeInfoFolderPath)
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix node info: " & error)
file.writeData(addr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixNodeInfo], index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[T, string] =
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixNodeInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixNodeInfo = MixNodeInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix node info deserialize error: " & error)
return ok(dMixNodeInfo)
proc deleteNodeInfoFolder*(nodeInfoFolderPath: string = "./nodeInfo") =
## Deletes the folder that stores serialized mix node info files
## along with all its contents, if the folder exists.
if dirExists(nodeInfoFolderPath):
removeDir(nodeInfoFolderPath)
type MixPubInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
libp2pPubKey*: SkPublicKey
proc init*(
T: typedesc[MixPubInfo],
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey: FieldElement,
libp2pPubKey: SkPublicKey,
): T =
T(
peerId: PeerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
proc get*(info: MixPubInfo): (PeerId, MultiAddress, FieldElement, SkPublicKey) =
(info.peerId, info.multiAddr, info.mixPubKey, info.libp2pPubKey)
proc serialize*(nodeInfo: MixPubInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
return ok(addrBytes & mixPubKeyBytes & libp2pPubKeyBytes)
proc deserialize*(T: typedesc[MixPubInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixPubInfoSize:
return
err("Serialized mix public info must be exactly " & $MixPubInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in bytes to multiaddress conversion: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(data[(AddrSize + FieldElementSize) ..^ 1]).valueOr:
return err("Failed to initialize libp2p public key: ")
ok(
MixPubInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
)
proc writeToFile*(
node: MixPubInfo, index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[void, string] =
if not dirExists(pubInfoFolderPath):
createDir(pubInfoFolderPath)
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix pub info: " & error)
file.writeData(unsafeAddr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixPubInfo], index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[T, string] =
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixPubInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixPubInfo = MixPubInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix pub info deserialize error: " & error)
return ok(dMixPubInfo)
proc deletePubInfoFolder*(pubInfoFolderPath: string = "./pubInfo") =
## Deletes the folder containing serialized public mix node info
## and all files inside it, if the folder exists.
if dirExists(pubInfoFolderPath):
removeDir(pubInfoFolderPath)
type MixNodes* = seq[MixNodeInfo]
proc getMixPubInfoByIndex*(self: MixNodes, index: int): Result[MixPubInfo, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
ok(
MixPubInfo(
peerId: self[index].peerId,
multiAddr: self[index].multiAddr,
mixPubKey: self[index].mixPubKey,
libp2pPubKey: self[index].libp2pPubKey,
)
)
proc generateMixNodes(
count: int, basePort: int = 4242, rng: ref HmacDrbgContext = newRng()
): Result[MixNodes, string] =
var nodes = newSeq[MixNodeInfo](count)
for i in 0 ..< count:
let keyPairResult = generateKeyPair()
if keyPairResult.isErr:
return err("Generate key pair error: " & $keyPairResult.error)
let (mixPrivKey, mixPubKey) = keyPairResult.get()
let
rng = newRng()
keyPair = SkKeyPair.random(rng[])
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: keyPair.pubkey)
peerId = PeerId.init(pubKeyProto).get()
multiAddr =
?MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{basePort + i}").tryGet().catch().mapErr(
x => x.msg
)
nodes[i] = MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: keyPair.pubkey,
libp2pPrivKey: keyPair.seckey,
)
ok(nodes)
proc initializeMixNodes*(count: int, basePort: int = 4242): Result[MixNodes, string] =
## Creates and initializes a set of mix nodes
let mixNodes = generateMixNodes(count, basePort).valueOr:
return err("Mix node initialization error: " & error)
return ok(mixNodes)
proc findByPeerId*(self: MixNodes, peerId: PeerId): Result[MixNodeInfo, string] =
let filteredNodes = self.filterIt(it.peerId == peerId)
if filteredNodes.len != 0:
return ok(filteredNodes[0])
return err("No node with peer id: " & $peerId)
proc initMixMultiAddrByIndex*(
self: var MixNodes, index: int, peerId: PeerId, multiAddr: MultiAddress
): Result[void, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
self[index].multiAddr = multiAddr
self[index].peerId = peerId
ok()

View File

@@ -0,0 +1,392 @@
import chronicles, chronos, sequtils, strutils, os, results
import std/[strformat, tables], metrics
import
./[
curve25519, fragmentation, mix_message, mix_node, sphinx, serialization,
tag_manager, mix_metrics, exit_layer, multiaddr,
]
import stew/endians2
import ../protocol
import ../../stream/[connection, lpstream]
import ../../[switch, multicodec, peerinfo]
const MixProtocolID* = "/mix/1.0.0"
## Mix Protocol defines a decentralized anonymous message routing layer for libp2p networks.
## It enables sender anonymity by routing each message through a decentralized mix overlay
## network composed of participating libp2p nodes, known as mix nodes. Each message is
## routed independently in a stateless manner, allowing other libp2p protocols to selectively
## anonymize messages without modifying their core protocol behavior.
type MixProtocol* = ref object of LPProtocol
mixNodeInfo: MixNodeInfo
pubNodeInfo: Table[PeerId, MixPubInfo]
switch: Switch
tagManager: TagManager
exitLayer: ExitLayer
rng: ref HmacDrbgContext
proc loadAllButIndexMixPubInfo*(
index, numNodes: int, pubInfoFolderPath: string = "./pubInfo"
): Result[Table[PeerId, MixPubInfo], string] =
var pubInfoTable = initTable[PeerId, MixPubInfo]()
for i in 0 ..< numNodes:
if i == index:
continue
let pubInfo = MixPubInfo.readFromFile(i, pubInfoFolderPath).valueOr:
return err("Failed to load pub info from file: " & error)
pubInfoTable[pubInfo.peerId] = pubInfo
return ok(pubInfoTable)
proc cryptoRandomInt(rng: ref HmacDrbgContext, max: int): Result[int, string] =
if max == 0:
return err("Max cannot be zero.")
let res = rng[].generate(uint64) mod uint64(max)
ok(res.int)
proc handleMixNodeConnection(
mixProto: MixProtocol, conn: Connection
) {.async: (raises: [LPStreamError, CancelledError]).} =
let receivedBytes =
try:
await conn.readLp(PacketSize)
except CancelledError as exc:
raise exc
finally:
await conn.close()
if receivedBytes.len == 0:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "NO_DATA"])
return # No data, end of stream
# Process the packet
let (peerId, multiAddr, _, mixPrivKey, _, _) = mixProto.mixNodeInfo.get()
let sphinxPacket = SphinxPacket.deserialize(receivedBytes).valueOr:
error "Sphinx packet deserialization error", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
let processedSP = processSphinxPacket(sphinxPacket, mixPrivKey, mixProto.tagManager).valueOr:
error "Failed to process Sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
case processedSP.status
of Exit:
mix_messages_recvd.inc(labelValues = ["Exit"])
# This is the exit node, forward to destination
let msgChunk = MessageChunk.deserialize(processedSP.messageChunk).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let unpaddedMsg = msgChunk.removePadding().valueOr:
error "Unpadding message failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let deserialized = MixMessage.deserialize(unpaddedMsg).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
if processedSP.destination == Hop():
error "no destination available"
mix_messages_error.inc(labelValues = ["Exit", "NO_DESTINATION"])
return
let destBytes = processedSP.destination.get()
let (destPeerId, destAddr) = bytesToMultiAddr(destBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_DEST"])
return
trace "Exit node - Received mix message",
peerId,
message = deserialized.message,
codec = deserialized.codec,
to = destPeerId
await mixProto.exitLayer.onMessage(
deserialized.codec, deserialized.message, destAddr, destPeerId
)
mix_messages_forwarded.inc(labelValues = ["Exit"])
of Reply:
# TODO: implement
discard
of Intermediate:
trace "# Intermediate: ", peerId, multiAddr
# Add delay
mix_messages_recvd.inc(labelValues = ["Intermediate"])
await sleepAsync(milliseconds(processedSP.delayMs))
# Forward to next hop
let nextHopBytes = processedSP.nextHop.get()
let (nextPeerId, nextAddr) = bytesToMultiAddr(nextHopBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Intermediate", "INVALID_DEST"])
return
try:
let nextHopConn =
await mixProto.switch.dial(nextPeerId, @[nextAddr], MixProtocolID)
defer:
await nextHopConn.close()
await nextHopConn.writeLp(processedSP.serializedSphinxPacket)
mix_messages_forwarded.inc(labelValues = ["Intermediate"])
except CancelledError as exc:
raise exc
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
of Duplicate:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "DUPLICATE"])
of InvalidMAC:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_MAC"])
proc getMaxMessageSizeForCodec*(
codec: string, numberOfSurbs: uint8 = 0
): Result[int, string] =
## Computes the maximum payload size (in bytes) available for a message when encoded
## with the given `codec`
## Returns an error if the codec length would cause it to exceeds the data capacity.
let serializedMsg = MixMessage.init(@[], codec).serialize()
if serializedMsg.len > DataSize:
return err("cannot encode messages for this codec")
return ok(DataSize - serializedMsg.len)
proc sendPacket(
mixProto: MixProtocol,
multiAddrs: MultiAddress,
sphinxPacket: seq[byte],
label: string,
) {.async: (raises: [CancelledError]).} =
## Send the wrapped message to the first mix node in the selected path
let (firstMixPeerId, firstMixAddr) = parseFullAddress(multiAddrs).valueOr:
error "Invalid multiaddress", err = error
mix_messages_error.inc(labelValues = [label, "NON_RECOVERABLE"])
return
try:
let nextHopConn =
await mixProto.switch.dial(firstMixPeerId, @[firstMixAddr], @[MixProtocolID])
defer:
await nextHopConn.close()
await nextHopConn.writeLp(sphinxPacket)
except DialFailedError as exc:
error "Failed to dial next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except CancelledError as exc:
raise exc
mix_messages_forwarded.inc(labelValues = ["Entry"])
proc buildMessage(
msg: seq[byte], codec: string, peerId: PeerId
): Result[Message, (string, string)] =
let
mixMsg = MixMessage.init(msg, codec)
serialized = mixMsg.serialize()
if serialized.len > DataSize:
return err(("message size exceeds maximum payload size", "INVALID_SIZE"))
let
paddedMsg = addPadding(serialized, peerId)
serializedMsgChunk = paddedMsg.serialize()
ok(serializedMsgChunk)
## Represents the final target of a mixnet message.
## contains the peer id and multiaddress of the destination node.
type MixDestination* = object
peerId: PeerId
address: MultiAddress
proc init*(T: typedesc[MixDestination], peerId: PeerId, address: MultiAddress): T =
## Initializes a destination object with the given peer id and multiaddress.
T(peerId: peerId, address: address)
proc `$`*(d: MixDestination): string =
$d.address & "/p2p/" & $d.peerId
proc anonymizeLocalProtocolSend*(
mixProto: MixProtocol,
incoming: AsyncQueue[seq[byte]],
msg: seq[byte],
codec: string,
destination: MixDestination,
numSurbs: uint8,
) {.async: (raises: [CancelledError, LPStreamError]).} =
mix_messages_recvd.inc(labelValues = ["Entry"])
var
multiAddrs: seq[MultiAddress] = @[]
publicKeys: seq[FieldElement] = @[]
hop: seq[Hop] = @[]
delay: seq[seq[byte]] = @[]
exitPeerId: PeerId
# Select L mix nodes at random
let numMixNodes = mixProto.pubNodeInfo.len
var numAvailableNodes = numMixNodes
debug "Destination data", destination
if mixProto.pubNodeInfo.hasKey(destination.peerId):
numAvailableNodes = numMixNodes - 1
if numAvailableNodes < PathLength:
error "No. of public mix nodes less than path length.",
numMixNodes = numAvailableNodes, pathLength = PathLength
mix_messages_error.inc(labelValues = ["Entry", "LOW_MIX_POOL"])
return
# Skip the destination peer
var pubNodeInfoKeys =
mixProto.pubNodeInfo.keys.toSeq().filterIt(it != destination.peerId)
var availableIndices = toSeq(0 ..< pubNodeInfoKeys.len)
var i = 0
while i < PathLength:
let randomIndexPosition = cryptoRandomInt(mixProto.rng, availableIndices.len).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
let selectedIndex = availableIndices[randomIndexPosition]
let randPeerId = pubNodeInfoKeys[selectedIndex]
availableIndices.del(randomIndexPosition)
# Last hop will be the exit node that will forward the request
if i == PathLength - 1:
exitPeerId = randPeerId
debug "Selected mix node: ", indexInPath = i, peerId = randPeerId
# Extract multiaddress, mix public key, and hop
let (peerId, multiAddr, mixPubKey, _) =
mixProto.pubNodeInfo.getOrDefault(randPeerId).get()
multiAddrs.add(multiAddr)
publicKeys.add(mixPubKey)
let multiAddrBytes = multiAddrToBytes(peerId, multiAddr).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_MIX_INFO"])
#TODO: should we skip and pick a different node here??
return
hop.add(Hop.init(multiAddrBytes))
# Compute delay
let delayMillisec =
if i != PathLength - 1:
cryptoRandomInt(mixProto.rng, 3).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
else:
0 # Last hop does not require a delay
delay.add(@(delayMillisec.uint16.toBytesBE()))
i = i + 1
#Encode destination
let destAddrBytes = multiAddrToBytes(destination.peerId, destination.address).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_DEST"])
return
let destHop = Hop.init(destAddrBytes)
let message = buildMessage(msg, codec, mixProto.mixNodeInfo.peerId).valueOr:
error "Error building message", err = error[0]
mix_messages_error.inc(labelValues = ["Entry", error[1]])
return
# Wrap in Sphinx packet
let sphinxPacket = wrapInSphinxPacket(message, publicKeys, delay, hop, destHop).valueOr:
error "Failed to wrap in sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
# Send the wrapped message to the first mix node in the selected path
await mixProto.sendPacket(multiAddrs[0], sphinxPacket, "Entry")
proc init*(
mixProto: MixProtocol,
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
) =
mixProto.mixNodeInfo = mixNodeInfo
mixProto.pubNodeInfo = pubNodeInfo
mixProto.switch = switch
mixProto.tagManager = tagManager
mixProto.exitLayer = ExitLayer.init(switch)
mixProto.codecs = @[MixProtocolID]
mixProto.rng = rng
mixProto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
await mixProto.handleMixNodeConnection(conn)
except LPStreamError as e:
debug "Stream error", conn = conn, err = e.msg
proc new*(
T: typedesc[MixProtocol],
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
): T =
let mixProto = new(T)
mixProto.init(mixNodeInfo, pubNodeInfo, switch)
mixProto
proc new*(
T: typedesc[MixProtocol],
index, numNodes: int,
switch: Switch,
nodeFolderInfoPath: string = ".",
rng: ref HmacDrbgContext = newRng(),
): Result[T, string] =
## Constructs a new `MixProtocol` instance for the mix node at `index`,
## loading its private info from `nodeInfo` and the public info of all other nodes from `pubInfo`.
let mixNodeInfo = MixNodeInfo.readFromFile(index, nodeFolderInfoPath / fmt"nodeInfo").valueOr:
return err("Failed to load mix node info for index " & $index & " - err: " & error)
let pubNodeInfo = loadAllButIndexMixPubInfo(
index, numNodes, nodeFolderInfoPath / fmt"pubInfo"
).valueOr:
return err("Failed to load mix pub info for index " & $index & " - err: " & error)
let mixProto =
MixProtocol.new(mixNodeInfo, pubNodeInfo, switch, TagManager.new(), rng)
return ok(mixProto)
proc setNodePool*(
mixProtocol: MixProtocol, mixNodeTable: Table[PeerId, MixPubInfo]
) {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo = mixNodeTable
proc getNodePoolSize*(mixProtocol: MixProtocol): int {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo.len

View File

@@ -0,0 +1,95 @@
import results, sugar, sequtils, strutils
import ./serialization
import stew/endians2
import ../../[multicodec, multiaddress, peerid]
const
PeerIdByteLen = 39 # ed25519 and secp256k1 multihash length
MinMultiAddrComponentLen = 2
MaxMultiAddrComponentLen = 5 # quic + circuit relay
# TODO: Add support for ipv6, dns, dns4, ws/wss/sni support
proc multiAddrToBytes*(
peerId: PeerId, multiAddr: MultiAddress
): Result[seq[byte], string] {.raises: [].} =
var ma = multiAddr
let sma = multiAddr.items().toSeq()
var res: seq[byte] = @[]
if not (sma.len >= MinMultiAddrComponentLen and sma.len <= MaxMultiAddrComponentLen):
return err("Invalid multiaddress format")
# Only IPV4 is supported
let isCircuitRelay = ?ma.contains(multiCodec("p2p-circuit"))
let baseP2PEndIdx = if isCircuitRelay: 3 else: 1
let baseAddr =
try:
if sma.len - 1 - baseP2PEndIdx < 0:
return err("Invalid multiaddress format")
sma[0 .. sma.len - baseP2PEndIdx].mapIt(it.tryGet()).foldl(a & b)
except LPError as exc:
return err("Could not obtain base address: " & exc.msg)
let isQuic = QUIC_V1_IP.match(baseAddr)
let isTCP = TCP_IP.match(baseAddr)
if not (isTCP or isQuic):
return err("Unsupported protocol")
# 4 bytes for the IP
let ip = ?ma.getPart(multiCodec("ip4")).value().protoArgument()
res.add(ip)
var port: string
if isQuic:
res.add(1.byte) # Protocol byte
let udpPortPart = ma.getPart(multiCodec("udp")).value()
port = $udpPortPart
elif isTCP:
res.add(0.byte) # Protocol byte
let tcpPortPart = ma.getPart(multiCodec("tcp")).value()
port = $tcpPortPart
let portNum = ?catch(port.split('/')[2].parseInt()).mapErr(x => x.msg)
res.add(portNum.uint16.toBytesBE())
if isCircuitRelay:
let relayIdPart = ?ma.getPart(multiCodec("p2p"))
let relayId = ?PeerId.init(?relayIdPart.protoArgument()).mapErr(x => $x)
if relayId.data.len != PeerIdByteLen:
return err("unsupported PeerId key type")
res.add(relayId.data)
# PeerID (39 bytes)
res.add(peerId.data)
if res.len > AddrSize:
return err("Address must be <= " & $AddrSize & " bytes")
return ok(res & newSeq[byte](AddrSize - res.len))
proc bytesToMultiAddr*(bytes: openArray[byte]): MaResult[(PeerId, MultiAddress)] =
if bytes.len != AddrSize:
return err("Address must be exactly " & $AddrSize & " bytes")
let
ip = bytes[0 .. 3].mapIt($it).join(".")
protocol = if bytes[4] == 0: "tcp" else: "udp"
quic = if bytes[4] == 1: "/quic-v1" else: ""
port = uint16.fromBytesBE(bytes[5 .. 6])
# peerId1 represents the circuit relay server addr if p2p-circuit addr, otherwise it's the node's actual peerId
peerId1Bytes = bytes[7 ..< 46]
peerId2Bytes = bytes[7 + PeerIdByteLen ..< 7 + (PeerIdByteLen * 2)]
let ma = ?MultiAddress.init("/ip4/" & ip & "/" & protocol & "/" & $port & quic)
return
if peerId2Bytes != newSeq[byte](PeerIdByteLen):
# Has circuit relay address
let relayIdMa = ?MultiAddress.init(multiCodec("p2p"), peerId1Bytes)
let p2pCircuitMa = ?MultiAddress.init(multiCodec("p2p-circuit"))
let peerId = ?PeerId.init(peerId2Bytes).mapErr(x => $x)
ok((peerId, ?(ma & relayIdMa & p2pCircuitMa).catch().mapErr(x => x.msg)))
else:
let peerId = ?PeerId.init(peerId1Bytes).mapErr(x => $x)
ok((peerId, ma))

View File

@@ -0,0 +1,29 @@
import std/endians, times
import ../../peerid
import ./crypto
import ../../utils/sequninit
type SeqNo* = uint32
proc init*(T: typedesc[SeqNo], data: seq[byte]): T =
var seqNo: SeqNo = 0
let hash = sha256_hash(data)
for i in 0 .. 3:
seqNo = seqNo or (uint32(hash[i]) shl (8 * (3 - i)))
return seqNo
proc init*(T: typedesc[SeqNo], peerId: PeerId): T =
SeqNo.init(peerId.data)
proc generate*(seqNo: var SeqNo, messageBytes: seq[byte]) =
let
currentTime = getTime().toUnix() * 1000
currentTimeBytes = newSeqUninit[byte](8)
bigEndian64(unsafeAddr currentTimeBytes[0], unsafeAddr currentTime)
let s = SeqNo.init(messageBytes & currentTimeBytes)
seqNo = (seqNo + s) mod high(uint32)
proc inc*(seqNo: var SeqNo) =
seqNo = (seqNo + 1) mod high(uint32)
# TODO: Manage sequence no. overflow in a way that it does not affect re-assembly

View File

@@ -0,0 +1,236 @@
import results
import std/sequtils
import ../../utility
const
k* = 16 # Security parameter
r* = 5 # Maximum path length
t* = 6 # t.k - combined length of next hop address and delay
AlphaSize* = 32 # Group element
BetaSize* = ((r * (t + 1)) + 1) * k # bytes
GammaSize* = 16 # Output of HMAC-SHA-256, truncated to 16 bytes
HeaderSize* = AlphaSize + BetaSize + GammaSize # Total header size
DelaySize* = 2 # Delay size
AddrSize* = (t * k) - DelaySize # Address size
PacketSize* = 4608 # Total packet size (from spec)
MessageSize* = PacketSize - HeaderSize - k # Size of the message itself
PayloadSize* = MessageSize + k # Total payload size
SurbSize* = HeaderSize + k + AddrSize
# Size of a surb packet inside the message payload
SurbLenSize* = 1 # Size of the field storing the number of surbs
SurbIdLen* = k # Size of the identifier used when sending a message with surb
DefaultSurbs* = uint8(4) # Default number of SURBs to send
type Header* = object
Alpha*: seq[byte]
Beta*: seq[byte]
Gamma*: seq[byte]
proc init*(
T: typedesc[Header], alpha: seq[byte], beta: seq[byte], gamma: seq[byte]
): T =
return T(Alpha: alpha, Beta: beta, Gamma: gamma)
proc get*(header: Header): (seq[byte], seq[byte], seq[byte]) =
(header.Alpha, header.Beta, header.Gamma)
proc serialize*(header: Header): seq[byte] =
doAssert header.Alpha.len == AlphaSize,
"Alpha must be exactly " & $AlphaSize & " bytes"
doAssert header.Beta.len == BetaSize, "Beta must be exactly " & $BetaSize & " bytes"
doAssert header.Gamma.len == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
return header.Alpha & header.Beta & header.Gamma
proc deserialize*(
T: typedesc[Header], serializedHeader: openArray[byte]
): Result[T, string] =
if len(serializedHeader) < HeaderSize:
return err("Serialized header must be exactly " & $HeaderSize & " bytes")
let header = Header(
Alpha: serializedHeader[0 .. (AlphaSize - 1)],
Beta: serializedHeader[AlphaSize .. (AlphaSize + BetaSize - 1)],
Gamma: serializedHeader[(AlphaSize + BetaSize) .. (HeaderSize - 1)],
)
ok(header)
type Message* = seq[byte]
proc serialize*(message: Message): seq[byte] =
doAssert message.len() == MessageSize,
"Message must be exactly " & $(MessageSize) & " bytes"
var res = newSeq[byte](k) # Prepend k bytes of zero padding
res.add(message)
return res
proc deserialize*(
T: typedesc[Message], serializedMessage: openArray[byte]
): Result[T, string] =
if len(serializedMessage) != PayloadSize:
return err("Serialized message must be exactly " & $PayloadSize & " bytes")
return ok(serializedMessage[k ..^ 1])
type Hop* = object
MultiAddress: seq[byte]
proc init*(T: typedesc[Hop], multiAddress: seq[byte]): T =
T(
MultiAddress:
if multiAddress == newSeq[byte](AddrSize):
@[]
else:
multiAddress
)
proc get*(hop: Hop): seq[byte] =
return hop.MultiAddress
proc serialize*(hop: Hop): seq[byte] =
if hop.MultiAddress.len == 0:
return newSeq[byte](AddrSize)
doAssert len(hop.MultiAddress) == AddrSize,
"MultiAddress must be exactly " & $AddrSize & " bytes"
proc deserialize*(T: typedesc[Hop], data: openArray[byte]): Result[T, string] =
if len(data) != AddrSize:
return err("MultiAddress must be exactly " & $AddrSize & " bytes")
ok(
T(
MultiAddress:
if data == newSeq[byte](AddrSize):
@[]
else:
@data
)
)
type RoutingInfo* = object
Addr: Hop
Delay: seq[byte]
Gamma: seq[byte]
Beta: seq[byte]
proc init*(
T: typedesc[RoutingInfo],
address: Hop,
delay: seq[byte],
gamma: seq[byte],
beta: seq[byte],
): T =
return T(Addr: address, Delay: delay, Gamma: gamma, Beta: beta)
proc getRoutingInfo*(info: RoutingInfo): (Hop, seq[byte], seq[byte], seq[byte]) =
(info.Addr, info.Delay, info.Gamma, info.Beta)
proc serialize*(info: RoutingInfo): seq[byte] =
doAssert info.Delay.len() == DelaySize,
"Delay must be exactly " & $DelaySize & " bytes"
doAssert info.Gamma.len() == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
let expectedBetaLen = ((r * (t + 1)) - t) * k
doAssert info.Beta.len() == expectedBetaLen,
"Beta must be exactly " & $expectedBetaLen & " bytes"
let addrBytes = info.Addr.serialize()
return addrBytes & info.Delay & info.Gamma & info.Beta
proc readBytes(
data: openArray[byte], offset: var int, readSize: Opt[int] = Opt.none(int)
): Result[seq[byte], string] =
if data.len < offset:
return err("not enough data")
readSize.withValue(size):
if data.len < offset + size:
return err("not enough data")
let slice = data[offset ..< offset + size]
offset += size
return ok(slice)
let slice = data[offset .. ^1]
offset = data.len
return ok(slice)
proc deserialize*(T: typedesc[RoutingInfo], data: openArray[byte]): Result[T, string] =
if len(data) != BetaSize + ((t + 1) * k):
return err("Data must be exactly " & $(BetaSize + ((t + 1) * k)) & " bytes")
let hop = Hop.deserialize(data[0 .. AddrSize - 1]).valueOr:
return err("Deserialize hop error: " & error)
var offset: int = AddrSize
return ok(
RoutingInfo(
Addr: hop,
Delay: ?data.readBytes(offset, Opt.some(DelaySize)),
Gamma: ?data.readBytes(offset, Opt.some(GammaSize)),
Beta: ?data.readBytes(offset, Opt.some(BetaSize)),
)
)
type SphinxPacket* = object
Hdr*: Header
Payload*: seq[byte]
proc init*(T: typedesc[SphinxPacket], header: Header, payload: seq[byte]): T =
T(Hdr: header, Payload: payload)
proc get*(packet: SphinxPacket): (Header, seq[byte]) =
(packet.Hdr, packet.Payload)
proc serialize*(packet: SphinxPacket): seq[byte] =
let headerBytes = packet.Hdr.serialize()
return headerBytes & packet.Payload
proc deserialize*(T: typedesc[SphinxPacket], data: openArray[byte]): Result[T, string] =
if len(data) != PacketSize:
return err("Sphinx packet size must be exactly " & $PacketSize & " bytes")
let header = ?Header.deserialize(data)
return ok(SphinxPacket(Hdr: header, Payload: data[HeaderSize ..^ 1]))
type
Secret* = seq[seq[byte]]
Key* = seq[byte]
SURBIdentifier* = array[SurbIdLen, byte]
SURB* = object
hop*: Hop
header*: Header
key*: Key
secret*: Opt[Secret]
proc serializeMessageWithSURBs*(
msg: seq[byte], surbs: seq[SURB]
): Result[seq[byte], string] =
if surbs.len > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
let surbBytes =
surbs.mapIt(it.hop.serialize() & it.header.serialize() & it.key).concat()
ok(byte(surbs.len) & surbBytes & msg)
proc extractSURBs*(msg: seq[byte]): Result[(seq[SURB], seq[byte]), string] =
var offset = 0
let surbsLenBytes = ?readBytes(msg, offset, Opt.some(1))
let surbsLen = int(surbsLenBytes[0])
if surbsLen > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
var surbs: seq[SURB] = newSeq[SURB](surbsLen)
for i in 0 ..< surbsLen:
let hopBytes = ?readBytes(msg, offset, Opt.some(AddrSize))
let headerBytes = ?readBytes(msg, offset, Opt.some(HeaderSize))
surbs[i].hop = ?Hop.deserialize(hopBytes)
surbs[i].header = ?Header.deserialize(headerBytes)
surbs[i].key = ?readBytes(msg, offset, Opt.some(k))
let msg = ?readBytes(msg, offset)
return ok((surbs, msg))

View File

@@ -0,0 +1,371 @@
import results, sequtils, stew/endians2
import ./[crypto, curve25519, serialization, tag_manager]
import ../../crypto/crypto
import ../../utils/sequninit
const PathLength* = 3 # Path length (L)
const PaddingLength = (((t + 1) * (r - PathLength)) + 1) * k
type ProcessingStatus* = enum
Exit
Intermediate
Reply
Duplicate
InvalidMAC
proc computeAlpha(
publicKeys: openArray[FieldElement]
): Result[(seq[byte], seq[seq[byte]]), string] =
## Compute alpha, an ephemeral public value. Each mix node uses its private key and
## alpha to derive a shared session key for that hop.
## This session key is used to decrypt and process one layer of the packet.
if publicKeys.len == 0:
return err("No public keys provided")
var
s: seq[seq[byte]] = newSeq[seq[byte]](publicKeys.len)
alpha_0: seq[byte]
alpha: FieldElement
secret: FieldElement
blinders: seq[FieldElement] = @[]
let x = generateRandomFieldElement().valueOr:
return err("Generate field element error: " & error)
blinders.add(x)
for i in 0 ..< publicKeys.len:
if publicKeys[i].len != FieldElementSize:
return err("Invalid public key size: " & $i)
# Compute alpha, shared secret, and blinder
if i == 0:
alpha = multiplyBasePointWithScalars([blinders[i]]).valueOr:
return err("Multiply base point with scalars error: " & error)
alpha_0 = fieldElementToBytes(alpha)
else:
alpha = multiplyPointWithScalars(alpha, [blinders[i]])
# TODO: Optimize point multiplication by multiplying scalars first
secret = multiplyPointWithScalars(publicKeys[i], blinders)
let blinder = bytesToFieldElement(
sha256_hash(fieldElementToBytes(alpha) & fieldElementToBytes(secret))
).valueOr:
return err("Error in bytes to field element conversion: " & error)
blinders.add(blinder)
s[i] = fieldElementToBytes(secret)
return ok((alpha_0, s))
proc deriveKeyMaterial(keyName: string, s: seq[byte]): seq[byte] =
@(keyName.toOpenArrayByte(0, keyName.high)) & s
proc computeFillerStrings(s: seq[seq[byte]]): Result[seq[byte], string] =
var filler: seq[byte] = @[] # Start with an empty filler string
for i in 1 ..< s.len:
# Derive AES key and IV
let
aes_key = deriveKeyMaterial("aes_key", s[i - 1]).kdf()
iv = deriveKeyMaterial("iv", s[i - 1]).kdf()
# Compute filler string
let
fillerLength = (t + 1) * k
zeroPadding = newSeq[byte](fillerLength)
filler = aes_ctr_start_index(
aes_key, iv, filler & zeroPadding, (((t + 1) * (r - i)) + t + 2) * k
)
return ok(filler)
proc computeBetaGamma(
s: seq[seq[byte]],
hop: openArray[Hop],
delay: openArray[seq[byte]],
destHop: Hop,
id: SURBIdentifier,
): Result[tuple[beta: seq[byte], gamma: seq[byte]], string] =
## Calculates the following elements:
## - Beta: The nested encrypted routing information. It encodes the next hop address, the forwarding delay, integrity check Gamma for the next hop, and the Beta for subsequent hops.
## - Gamma: A message authentication code computed over Beta using the session key derived from Alpha. It ensures header integrity at each hop.
let sLen = s.len
var
beta: seq[byte]
gamma: seq[byte]
# Compute filler strings
let filler = computeFillerStrings(s).valueOr:
return err("Error in filler generation: " & error)
for i in countdown(sLen - 1, 0):
# Derive AES key, MAC key, and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", s[i]).kdf()
mac_key = deriveKeyMaterial("mac_key", s[i]).kdf()
beta_iv = deriveKeyMaterial("iv", s[i]).kdf()
# Compute Beta and Gamma
if i == sLen - 1:
let destBytes = destHop.serialize()
let destPadding = destBytes & delay[i] & @id & newSeq[byte](PaddingLength)
let aes = aes_ctr(beta_aes_key, beta_iv, destPadding)
beta = aes & filler
else:
let routingInfo = RoutingInfo.init(
hop[i + 1], delay[i], gamma, beta[0 .. (((r * (t + 1)) - t) * k) - 1]
)
let serializedRoutingInfo = routingInfo.serialize()
beta = aes_ctr(beta_aes_key, beta_iv, serializedRoutingInfo)
gamma = hmac(mac_key, beta).toSeq()
return ok((beta: beta, gamma: gamma))
proc computeDelta(s: seq[seq[byte]], msg: Message): Result[seq[byte], string] =
let sLen = s.len
var delta: seq[byte]
for i in countdown(sLen - 1, 0):
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", s[i]).kdf()
delta_iv = deriveKeyMaterial("delta_iv", s[i]).kdf()
# Compute Delta
if i == sLen - 1:
let serializedMsg = msg.serialize()
delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
else:
delta = aes_ctr(delta_aes_key, delta_iv, delta)
return ok(delta)
proc createSURB*(
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hops: openArray[Hop],
id: SURBIdentifier,
rng: ref HmacDrbgContext = newRng(),
): Result[SURB, string] =
if id == default(SURBIdentifier):
return err("id should be initialized")
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(s, hops, delay, Hop(), id).valueOr:
return err("Error in beta and gamma generation: " & error)
# Generate key
var key = newSeqUninit[byte](k)
rng[].generate(key)
return ok(
SURB(
hop: hops[0],
header: Header.init(alpha_0, beta_0, gamma_0),
secret: Opt.some(s),
key: key,
)
)
proc useSURB*(surb: SURB, msg: Message): SphinxPacket =
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", surb.key).kdf()
delta_iv = deriveKeyMaterial("delta_iv", surb.key).kdf()
# Compute Delta
let serializedMsg = msg.serialize()
let delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
return SphinxPacket.init(surb.header, delta)
proc processReply*(
key: seq[byte], s: seq[seq[byte]], delta_prime: seq[byte]
): Result[seq[byte], string] =
var delta = delta_prime[0 ..^ 1]
var key_prime = key
for i in 0 .. s.len:
if i != 0:
key_prime = s[i - 1]
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", key_prime).kdf()
delta_iv = deriveKeyMaterial("delta_iv", key_prime).kdf()
delta = aes_ctr(delta_aes_key, delta_iv, delta)
let deserializeMsg = Message.deserialize(delta).valueOr:
return err("Message deserialization error: " & error)
return ok(deserializeMsg)
proc wrapInSphinxPacket*(
msg: Message,
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hop: openArray[Hop],
destHop: Hop,
): Result[seq[byte], string] =
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(
s, hop, delay, destHop, default(SURBIdentifier)
).valueOr:
return err("Error in beta and gamma generation: " & error)
# Compute delta
let delta_0 = computeDelta(s, msg).valueOr:
return err("Error in delta generation: " & error)
# Serialize sphinx packet
let sphinxPacket = SphinxPacket.init(Header.init(alpha_0, beta_0, gamma_0), delta_0)
let serialized = sphinxPacket.serialize()
return ok(serialized)
type ProcessedSphinxPacket* = object
case status*: ProcessingStatus
of ProcessingStatus.Exit:
destination*: Hop
messageChunk*: seq[byte]
of ProcessingStatus.Intermediate:
nextHop*: Hop
delayMs*: int
serializedSphinxPacket*: seq[byte]
of ProcessingStatus.Reply:
id*: SURBIdentifier
delta_prime*: seq[byte]
else:
discard
proc isZeros(data: seq[byte], startIdx: int, endIdx: int): bool =
doAssert 0 <= startIdx and endIdx < data.len and startIdx <= endIdx
for i in startIdx .. endIdx:
if data[i] != 0:
return false
return true
template extractSurbId(data: seq[byte]): SURBIdentifier =
const startIndex = t * k
const endIndex = startIndex + SurbIdLen - 1
doAssert data.len > startIndex and endIndex < data.len
var id: SURBIdentifier
copyMem(addr id[0], addr data[startIndex], SurbIdLen)
id
proc processSphinxPacket*(
sphinxPacket: SphinxPacket, privateKey: FieldElement, tm: var TagManager
): Result[ProcessedSphinxPacket, string] =
let
(header, payload) = sphinxPacket.get()
(alpha, beta, gamma) = header.get()
# Compute shared secret
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let
s = multiplyPointWithScalars(alphaFE, [privateKey])
sBytes = fieldElementToBytes(s)
# Check if the tag has been seen
if isTagSeen(tm, s):
return ok(ProcessedSphinxPacket(status: Duplicate))
# Compute MAC
let mac_key = deriveKeyMaterial("mac_key", sBytes).kdf()
if not (hmac(mac_key, beta).toSeq() == gamma):
# If MAC not verified
return ok(ProcessedSphinxPacket(status: InvalidMAC))
# Store the tag as seen
addTag(tm, s)
# Derive AES key and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", sBytes).kdf()
beta_iv = deriveKeyMaterial("iv", sBytes).kdf()
delta_aes_key = deriveKeyMaterial("delta_aes_key", sBytes).kdf()
delta_iv = deriveKeyMaterial("delta_iv", sBytes).kdf()
# Compute delta
let delta_prime = aes_ctr(delta_aes_key, delta_iv, payload)
# Compute B
let zeroPadding = newSeq[byte]((t + 1) * k)
let B = aes_ctr(beta_aes_key, beta_iv, beta & zeroPadding)
# Check if B has the required prefix for the original message
if B.isZeros((t + 1) * k, ((t + 1) * k) + PaddingLength - 1):
let hop = Hop.deserialize(B[0 .. AddrSize - 1]).valueOr:
return err(error)
if B.isZeros(AddrSize, ((t + 1) * k) - 1):
if delta_prime.isZeros(0, k - 1):
let msg = Message.deserialize(delta_prime).valueOr:
return err("Message deserialization error: " & error)
return ok(
ProcessedSphinxPacket(
status: Exit, destination: hop, messageChunk: msg[0 .. MessageSize - 1]
)
)
else:
return err("delta_prime should be all zeros")
elif B.isZeros(0, (t * k) - 1):
return ok(
ProcessedSphinxPacket(
status: Reply, id: B.extractSurbId(), delta_prime: delta_prime
)
)
else:
# Extract routing information from B
let routingInfo = RoutingInfo.deserialize(B).valueOr:
return err("Routing info deserialization error: " & error)
let (address, delay, gamma_prime, beta_prime) = routingInfo.getRoutingInfo()
# Compute alpha
let blinder = bytesToFieldElement(sha256_hash(alpha & sBytes)).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alpha_prime = multiplyPointWithScalars(alphaFE, [blinder])
# Serialize sphinx packet
let sphinxPkt = SphinxPacket.init(
Header.init(fieldElementToBytes(alpha_prime), beta_prime, gamma_prime),
delta_prime,
)
return ok(
ProcessedSphinxPacket(
status: Intermediate,
nextHop: address,
delayMs: uint16.fromBytes(delay).int,
serializedSphinxPacket: sphinxPkt.serialize(),
)
)

View File

@@ -0,0 +1,28 @@
import tables, locks
import ./curve25519
type TagManager* = ref object
lock: Lock
seenTags: Table[FieldElement, bool]
proc new*(T: typedesc[TagManager]): T =
let tm = T()
tm.seenTags = initTable[FieldElement, bool]()
initLock(tm.lock)
return tm
proc addTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags[tag] = true
proc isTagSeen*(tm: TagManager, tag: FieldElement): bool {.gcsafe.} =
withLock tm.lock:
return tm.seenTags.contains(tag)
proc removeTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.del(tag)
proc clearTags*(tm: TagManager) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.clear()

View File

@@ -50,14 +50,6 @@ proc new(
method getWrapped*(self: QuicStream): P2PConnection =
self
template mapExceptions(body: untyped) =
try:
body
except QuicError:
raise newLPStreamEOFError()
except CatchableError:
raise newLPStreamEOFError()
method readOnce*(
stream: QuicStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
@@ -83,8 +75,16 @@ method readOnce*(
method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
try:
await stream.stream.write(bytes)
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
except QuicError:
raise newLPStreamEOFError()
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
(ref LPStreamError)(msg: "error in quic stream write: " & exc.msg, parent: exc)
{.pop.}
@@ -158,6 +158,8 @@ method newStream*(
.} =
try:
return await m.quicSession.getStream(Direction.Out)
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)

View File

@@ -150,57 +150,6 @@ proc createStreamServer*[T](
except CatchableError as exc:
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
## protocol information.
##
## Returns ``asyncInvalidSocket`` on error.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
socktype: SockType = SockType.SOCK_STREAM
protocol: Protocol = Protocol.IPPROTO_TCP
let address = initTAddress(ma).tryGet()
if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}:
if ma[1].tryGet().protoCode().tryGet() == multiCodec("udp"):
socktype = SockType.SOCK_DGRAM
protocol = Protocol.IPPROTO_UDP
elif ma[1].tryGet().protoCode().tryGet() == multiCodec("tcp"):
socktype = SockType.SOCK_STREAM
protocol = Protocol.IPPROTO_TCP
elif address.family in {AddressFamily.Unix}:
socktype = SockType.SOCK_STREAM
protocol = cast[Protocol](0)
else:
return asyncInvalidSocket
try:
createAsyncSocket(address.getDomain(), socktype, protocol)
except CatchableError as exc:
raise newException(
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
saddr: Sockaddr_storage
slen: SockLen
let address = initTAddress(ma).tryGet()
toSAddr(address, saddr, slen)
if bindSocket(SocketHandle(sock), cast[ptr SockAddr](addr saddr), slen) == 0:
result = true
else:
result = false
proc getLocalAddress*(sock: AsyncFD): TransportAddress =
## Retrieve local socket ``sock`` address.
##

View File

@@ -1,123 +0,0 @@
#!/bin/bash
# Copyright (c) 2018-2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
set -e
force=false
verbose=false
CACHE_DIR=""
LIBP2P_COMMIT="124530a3"
while [[ "$#" -gt 0 ]]; do
case "$1" in
-f|--force) force=true ;;
-v|--verbose) verbose=true ;;
-h|--help)
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
exit 0
;;
*)
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
if [[ -z "$CACHE_DIR" ]]; then
CACHE_DIR="$1"
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
LIBP2P_COMMIT="$1"
else
echo "Unknown argument: $1"
exit 1
fi
;;
esac
shift
done
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
if [[ ! -e "$SUBREPO_DIR" ]]; then
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q "$LIBP2P_COMMIT"
cd ..
fi
## env vars
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
TARGET_DIR="$(go env GOPATH)/bin"
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
target_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
}
build_target() {
echo -e "$BUILD_MSG"
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
if $verbose; then
echo "retry #${CURR}"
fi
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
popd
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully: $TARGET_BINARY"
}
if $force || target_needs_rebuilding; then
build_target
else
echo "No rebuild needed."
fi

View File

@@ -1,665 +0,0 @@
import chronos, chronicles, stew/byteutils
import helpers
import ../libp2p
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
type
SwitchCreator = proc(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.gcsafe, raises: [LPError].}
DaemonPeerInfo = daemonapi.PeerInfo
proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.} =
## write lenght prefixed
var buf = initVBuffer()
buf.writeSeq(msg)
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint
length: int
res: VarintResult[void]
result = newSeq[byte](10)
for i in 0 ..< len(result):
await s.readExactly(addr result[i], 1)
res = LP.getUVarint(result.toOpenArray(0, i), length, size)
if res.isOk():
break
res.expect("Valid varint")
result.setLen(size)
if size > 0.uint:
await s.readExactly(addr result[0], int(size))
proc testPubSubDaemonPublish(
gossip: bool = false, count: int = 1, swCreator: SwitchCreator
) {.async.} =
var pubsubData = "TEST MESSAGE"
var testTopic = "test-topic"
var msgData = pubsubData.toBytes()
var flags = {PSFloodSub}
if gossip:
flags = {PSGossipSub}
let daemonNode = await newDaemonApi(flags)
let daemonPeer = await daemonNode.identity()
let nativeNode = swCreator()
let pubsub =
if gossip:
GossipSub.init(switch = nativeNode).PubSub
else:
FloodSub.init(switch = nativeNode).PubSub
nativeNode.mount(pubsub)
await nativeNode.start()
await pubsub.start()
let nativePeer = nativeNode.peerInfo
var finished = false
var times = 0
proc nativeHandler(topic: string, data: seq[byte]) {.async.} =
let smsg = string.fromBytes(data)
check smsg == pubsubData
times.inc()
if times >= count and not finished:
finished = true
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
await sleepAsync(500.millis)
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
proc pubsubHandler(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
result = true # don't cancel subscription
asyncDiscard daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
pubsub.subscribe(testTopic, nativeHandler)
await sleepAsync(3.seconds)
proc publisher() {.async.} =
while not finished:
await daemonNode.pubsubPublish(testTopic, msgData)
await sleepAsync(250.millis)
await wait(publisher(), 5.minutes) # should be plenty of time
await nativeNode.stop()
await pubsub.stop()
await daemonNode.close()
proc testPubSubNodePublish(
gossip: bool = false, count: int = 1, swCreator: SwitchCreator
) {.async.} =
var pubsubData = "TEST MESSAGE"
var testTopic = "test-topic"
var msgData = pubsubData.toBytes()
var flags = {PSFloodSub}
if gossip:
flags = {PSGossipSub}
let daemonNode = await newDaemonApi(flags)
let daemonPeer = await daemonNode.identity()
let nativeNode = swCreator()
let pubsub =
if gossip:
GossipSub.init(switch = nativeNode).PubSub
else:
FloodSub.init(switch = nativeNode).PubSub
nativeNode.mount(pubsub)
await nativeNode.start()
await pubsub.start()
let nativePeer = nativeNode.peerInfo
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
await sleepAsync(500.millis)
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var times = 0
var finished = false
proc pubsubHandler(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = string.fromBytes(message.data)
check smsg == pubsubData
times.inc()
if times >= count and not finished:
finished = true
result = true # don't cancel subscription
discard await daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
proc nativeHandler(topic: string, data: seq[byte]) {.async.} =
discard
pubsub.subscribe(testTopic, nativeHandler)
await sleepAsync(3.seconds)
proc publisher() {.async.} =
while not finished:
discard await pubsub.publish(testTopic, msgData)
await sleepAsync(250.millis)
await wait(publisher(), 5.minutes) # should be plenty of time
check finished
await nativeNode.stop()
await pubsub.stop()
await daemonNode.close()
proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
suite "Interop using " & name:
# TODO: chronos transports are leaking,
# but those are tracked for both the daemon
# and libp2p, so not sure which one it is,
# need to investigate more
# teardown:
# checkTrackers()
# TODO: this test is failing sometimes on windows
# For some reason we receive EOF before test 4 sometimes
asyncTest "native -> daemon multiple reads and writes":
var protos = @["/test-stream"]
let nativeNode = swCreator()
await nativeNode.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[void]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
check string.fromBytes(await stream.transp.readLp()) == "test 1"
discard await stream.transp.writeLp("test 2")
check string.fromBytes(await stream.transp.readLp()) == "test 3"
discard await stream.transp.writeLp("test 4")
testFuture.complete()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp("test 1")
check "test 2" == string.fromBytes((await conn.readLp(1024)))
await conn.writeLp("test 3")
check "test 4" == string.fromBytes((await conn.readLp(1024)))
await wait(testFuture, 10.secs)
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "native -> daemon connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
# We are preparing expect string, which should be prefixed with varint
# length and do not have `\r\n` suffix, because we going to use
# readLine().
var buffer = initVBuffer()
buffer.writeSeq(test & "\r\n")
buffer.finish()
var expect = newString(len(buffer) - 2)
copyMem(addr expect[0], addr buffer.buffer[0], len(expect))
let nativeNode = swCreator()
await nativeNode.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[string]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
# We should perform `readLp()` instead of `readLine()`. `readLine()`
# here reads actually length prefixed string.
var line = await stream.transp.readLine()
check line == expect
testFuture.complete(line)
await stream.close()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp(test & "\r\n")
check expect == (await wait(testFuture, 10.secs))
await conn.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "daemon -> native connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
testFuture.complete(line)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
discard await stream.transp.writeLp(test)
check test == (await wait(testFuture, 10.secs))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "native -> daemon websocket connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
testFuture.complete(line)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
let nativeNode = swCreator(
ma = wsAddress,
prov = proc(config: TransportConfig): Transport =
WsTransport.new(config.upgr),
)
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi(hostAddresses = @[wsAddress])
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
discard await stream.transp.writeLp(test)
check test == (await wait(testFuture, 10.secs))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
await sleepAsync(500.millis)
asyncTest "daemon -> native websocket connection":
var protos = @["/test-stream"]
var test = "TEST STRING"
# We are preparing expect string, which should be prefixed with varint
# length and do not have `\r\n` suffix, because we going to use
# readLine().
var buffer = initVBuffer()
buffer.writeSeq(test & "\r\n")
buffer.finish()
var expect = newString(len(buffer) - 2)
copyMem(addr expect[0], addr buffer.buffer[0], len(expect))
let wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
let nativeNode = SwitchBuilder
.new()
.withAddress(wsAddress)
.withRng(crypto.newRng())
.withMplex()
.withWsTransport()
.withNoise()
.build()
await nativeNode.start()
let daemonNode = await newDaemonApi(hostAddresses = @[wsAddress])
let daemonPeer = await daemonNode.identity()
var testFuture = newFuture[string]("test.future")
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
# We should perform `readLp()` instead of `readLine()`. `readLine()`
# here reads actually length prefixed string.
var line = await stream.transp.readLine()
check line == expect
testFuture.complete(line)
await stream.close()
await daemonNode.addHandler(protos, daemonHandler)
let conn = await nativeNode.dial(daemonPeer.peer, daemonPeer.addresses, protos[0])
await conn.writeLp(test & "\r\n")
check expect == (await wait(testFuture, 10.secs))
await conn.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "daemon -> multiple reads and writes":
var protos = @["/test-stream"]
var testFuture = newFuture[void]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "test 1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test 2".toBytes())
check "test 3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test 4".toBytes())
testFuture.complete()
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
asyncDiscard stream.transp.writeLp("test 1")
check "test 2" == string.fromBytes(await stream.transp.readLp())
asyncDiscard stream.transp.writeLp("test 3")
check "test 4" == string.fromBytes(await stream.transp.readLp())
await wait(testFuture, 10.secs)
await stream.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "read write multiple":
var protos = @["/test-stream"]
var test = "TEST STRING"
var count = 0
var testFuture = newFuture[int]("test.future")
proc nativeHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while count < 10:
var line = string.fromBytes(await conn.readLp(1024))
check line == test
await conn.writeLp(test.toBytes())
count.inc()
testFuture.complete(count)
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
# custom proto
var proto = new LPProtocol
proto.handler = nativeHandler
proto.codec = protos[0] # codec
let nativeNode = swCreator()
nativeNode.mount(proto)
await nativeNode.start()
let nativePeer = nativeNode.peerInfo
let daemonNode = await newDaemonApi()
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
var stream = await daemonNode.openStream(nativePeer.peerId, protos)
var count2 = 0
while count2 < 10:
discard await stream.transp.writeLp(test)
let line = await stream.transp.readLp()
check test == string.fromBytes(line)
inc(count2)
check 10 == (await wait(testFuture, 1.minutes))
await stream.close()
await nativeNode.stop()
await daemonNode.close()
asyncTest "floodsub: daemon publish one":
await testPubSubDaemonPublish(swCreator = swCreator)
asyncTest "floodsub: daemon publish many":
await testPubSubDaemonPublish(count = 10, swCreator = swCreator)
asyncTest "gossipsub: daemon publish one":
await testPubSubDaemonPublish(gossip = true, swCreator = swCreator)
asyncTest "gossipsub: daemon publish many":
await testPubSubDaemonPublish(gossip = true, count = 10, swCreator = swCreator)
asyncTest "floodsub: node publish one":
await testPubSubNodePublish(swCreator = swCreator)
asyncTest "floodsub: node publish many":
await testPubSubNodePublish(count = 10, swCreator = swCreator)
asyncTest "gossipsub: node publish one":
await testPubSubNodePublish(gossip = true, swCreator = swCreator)
asyncTest "gossipsub: node publish many":
await testPubSubNodePublish(gossip = true, count = 10, swCreator = swCreator)
proc relayInteropTests*(name: string, relayCreator: SwitchCreator) =
suite "Interop relay using " & name:
asyncTest "NativeSrc -> NativeRelay -> DaemonDst":
let closeBlocker = newFuture[void]()
let daemonFinished = newFuture[void]()
# TODO: This Future blocks the daemonHandler after sending the last message.
# It exists because there's a strange behavior where stream.close sends
# a Rst instead of Fin. We should investigate this at some point.
proc daemonHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
check "line1" == string.fromBytes(await stream.transp.readLp())
discard await stream.transp.writeLp("line2")
check "line3" == string.fromBytes(await stream.transp.readLp())
discard await stream.transp.writeLp("line4")
await closeBlocker
await stream.close()
daemonFinished.complete()
let
maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
src = relayCreator(maSrc, relay = RelayClient.new(circuitRelayV1 = true))
rel = relayCreator(maRel)
await src.start()
await rel.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
let maStr =
$rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel.connect(daemonPeer.peer, daemonPeer.addresses)
await daemonNode.addHandler(@["/testCustom"], daemonHandler)
let conn = await src.dial(daemonPeer.peer, @[maddr], @["/testCustom"])
await conn.writeLp("line1")
check string.fromBytes(await conn.readLp(1024)) == "line2"
await conn.writeLp("line3")
check string.fromBytes(await conn.readLp(1024)) == "line4"
closeBlocker.complete()
await daemonFinished
await conn.close()
await allFutures(src.stop(), rel.stop())
try:
await daemonNode.close()
except CatchableError as e:
when defined(windows):
# On Windows, daemon close may fail due to socket race condition
# This is expected behavior and can be safely ignored
discard
else:
raise e
asyncTest "DaemonSrc -> NativeRelay -> NativeDst":
proc customHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "line1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line2")
check "line3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line4")
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
let protos = @["/customProto", RelayV1Codec]
var customProto = new LPProtocol
customProto.handler = customHandler
customProto.codec = protos[0]
let
maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
rel = relayCreator(maRel)
dst = relayCreator(maDst, relay = RelayClient.new())
dst.mount(customProto)
await rel.start()
await dst.start()
let daemonNode = await newDaemonApi()
let daemonPeer = await daemonNode.identity()
let maStr =
$rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await daemonNode.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
await daemonNode.connect(dst.peerInfo.peerId, @[maddr])
var stream = await daemonNode.openStream(dst.peerInfo.peerId, protos)
discard await stream.transp.writeLp("line1")
check string.fromBytes(await stream.transp.readLp()) == "line2"
discard await stream.transp.writeLp("line3")
check string.fromBytes(await stream.transp.readLp()) == "line4"
await allFutures(dst.stop(), rel.stop())
await daemonNode.close()
asyncTest "NativeSrc -> DaemonRelay -> NativeDst":
proc customHandler(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
check "line1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line2")
check "line3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("line4")
except CancelledError as e:
raise e
except CatchableError:
check false # should not be here
finally:
await conn.close()
let protos = @["/customProto", RelayV1Codec]
var customProto = new LPProtocol
customProto.handler = customHandler
customProto.codec = protos[0]
let
maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
src = relayCreator(maSrc, relay = RelayClient.new())
dst = relayCreator(maDst, relay = RelayClient.new())
dst.mount(customProto)
await src.start()
await dst.start()
let daemonNode = await newDaemonApi({RelayHop})
let daemonPeer = await daemonNode.identity()
let maStr = $daemonPeer.addresses[0] & "/p2p/" & $daemonPeer.peer & "/p2p-circuit"
let maddr = MultiAddress.init(maStr).tryGet()
await src.connect(daemonPeer.peer, daemonPeer.addresses)
await daemonNode.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
let conn = await src.dial(dst.peerInfo.peerId, @[maddr], protos[0])
await conn.writeLp("line1")
check string.fromBytes(await conn.readLp(1024)) == "line2"
await conn.writeLp("line3")
check string.fromBytes(await conn.readLp(1024)) == "line4"
await allFutures(src.stop(), dst.stop())
await daemonNode.close()

View File

@@ -1,4 +1,5 @@
{.used.}
import
testdiscoverymngr, testrendezvous, testrendezvousprotobuf, testrendezvousinterface
testdiscoverymngr, testrendezvous, testrendezvouserrors, testrendezvousprotobuf,
testrendezvousinterface

View File

@@ -9,9 +9,17 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import sequtils, strutils
import sequtils
import chronos
import ../../libp2p/[protocols/rendezvous, switch]
import
../../libp2p/[
protocols/rendezvous,
protocols/rendezvous/protobuf,
peerinfo,
switch,
routing_record,
crypto/crypto,
]
import ../../libp2p/discovery/discoverymngr
import ../../libp2p/utils/offsettedseq
import ../helpers
@@ -73,6 +81,20 @@ suite "RendezVous":
peerRecords.len == 1
peerRecords[0] == peerNodes[0].switch.peerInfo.signedPeerRecord.data
asyncTest "Peer is not registered when peer record validation fails":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
peerNodes[0].switch.peerInfo.signedPeerRecord =
createCorruptedSignedPeerRecord(peerNodes[0].switch.peerInfo.peerId)
const namespace = "foo"
await peerNodes[0].advertise(namespace)
check rendezvousNode.registered.s.len == 0
asyncTest "Unsubscribe removes registered peer from remote":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
@@ -87,6 +109,17 @@ suite "RendezVous":
await peerNodes[0].unsubscribe(namespace)
check (await peerNodes[0].request(Opt.some(namespace))).len == 0
asyncTest "Unsubscribe for not registered namespace is ignored":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
await peerNodes[0].advertise("foo")
await peerNodes[0].unsubscribe("bar")
check rendezvousNode.registered.s.len == 1
asyncTest "Consecutive requests with namespace returns peers with pagination":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(11)
(rendezvousNode & peerNodes).startAndDeferStop()
@@ -447,26 +480,3 @@ suite "RendezVous":
# 1001st registration ignored, limit reached
await peerRdv.advertise(namespace)
check rendezvousNode.registered.s.len == RegistrationLimitPerPeer
asyncTest "Various local error":
let rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
expect AdvertiseError:
discard await rdv.request(Opt.some("A".repeat(300)))
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), -1)
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), 3000)
expect AdvertiseError:
await rdv.advertise("A".repeat(300))
expect AdvertiseError:
await rdv.advertise("A", 73.hours)
expect AdvertiseError:
await rdv.advertise("A", 30.seconds)
test "Various config error":
expect RendezVousError:
discard RendezVous.new(minDuration = 30.seconds)
expect RendezVousError:
discard RendezVous.new(maxDuration = 73.hours)
expect RendezVousError:
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)

View File

@@ -0,0 +1,153 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import sequtils
import strformat
import strutils
import chronos
import
../../libp2p/[
protocols/rendezvous,
protocols/rendezvous/protobuf,
peerinfo,
switch,
routing_record,
crypto/crypto,
]
import ../../libp2p/discovery/discoverymngr
import ../helpers
import ./utils
suite "RendezVous Errors":
teardown:
checkTrackers()
asyncTest "Various local error":
let rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
expect AdvertiseError:
discard await rdv.request(Opt.some("A".repeat(300)))
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), -1)
expect AdvertiseError:
discard await rdv.request(Opt.some("A"), 3000)
expect AdvertiseError:
await rdv.advertise("A".repeat(300))
expect AdvertiseError:
await rdv.advertise("A", 73.hours)
expect AdvertiseError:
await rdv.advertise("A", 30.seconds)
test "Various config error":
expect RendezVousError:
discard RendezVous.new(minDuration = 30.seconds)
expect RendezVousError:
discard RendezVous.new(maxDuration = 73.hours)
expect RendezVousError:
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)
let testCases =
@[
(
"Register - Invalid Namespace",
(
proc(node: RendezVous): Message =
prepareRegisterMessage(
"A".repeat(300),
node.switch.peerInfo.signedPeerRecord.encode().get,
2.hours,
)
),
ResponseStatus.InvalidNamespace,
),
(
"Register - Invalid Signed Peer Record",
(
proc(node: RendezVous): Message =
# Malformed SPR - empty bytes will fail validation
prepareRegisterMessage("namespace", newSeq[byte](), 2.hours)
),
ResponseStatus.InvalidSignedPeerRecord,
),
(
"Register - Invalid TTL",
(
proc(node: RendezVous): Message =
prepareRegisterMessage(
"namespace", node.switch.peerInfo.signedPeerRecord.encode().get, 73.hours
)
),
ResponseStatus.InvalidTTL,
),
(
"Discover - Invalid Namespace",
(
proc(node: RendezVous): Message =
prepareDiscoverMessage(ns = Opt.some("A".repeat(300)))
),
ResponseStatus.InvalidNamespace,
),
(
"Discover - Invalid Cookie",
(
proc(node: RendezVous): Message =
# Empty buffer will fail Cookie.decode().tryGet() and yield InvalidCookie
prepareDiscoverMessage(cookie = Opt.some(newSeq[byte]()))
),
ResponseStatus.InvalidCookie,
),
]
for test in testCases:
let (testName, getMessage, expectedStatus) = test
asyncTest &"Node returns ERROR_CODE for invalid message - {testName}":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
let
peerNode = peerNodes[0]
messageBuf = encode(getMessage(peerNode)).buffer
let
responseBuf = await sendRdvMessage(peerNode, rendezvousNode, messageBuf)
responseMessage = Message.decode(responseBuf).tryGet()
actualStatus =
if responseMessage.registerResponse.isSome():
responseMessage.registerResponse.get.status
else:
responseMessage.discoverResponse.get.status
check actualStatus == expectedStatus
asyncTest "Node returns NotAuthorized when Register exceeding peer limit":
let (rendezvousNode, peerNodes) = setupRendezvousNodeWithPeerNodes(1)
(rendezvousNode & peerNodes).startAndDeferStop()
await connectNodes(peerNodes[0], rendezvousNode)
# Pre-populate registrations up to the limit for this peer under the same namespace
let namespace = "namespaceNA"
await populatePeerRegistrations(
peerNodes[0], rendezvousNode, namespace, RegistrationLimitPerPeer
)
# Attempt one more registration which should be rejected with NotAuthorized
let messageBuf = encode(
prepareRegisterMessage(
namespace, peerNodes[0].switch.peerInfo.signedPeerRecord.encode().get, 2.hours
)
).buffer
let responseBuf = await sendRdvMessage(peerNodes[0], rendezvousNode, messageBuf)
let responseMessage = Message.decode(responseBuf).tryGet()
check responseMessage.registerResponse.get.status == ResponseStatus.NotAuthorized

View File

@@ -1,6 +1,16 @@
import sequtils
import chronos
import ../../libp2p/[protobuf/minprotobuf, protocols/rendezvous, switch, builders]
import sequtils
import
../../libp2p/[
builders,
crypto/crypto,
peerid,
protobuf/minprotobuf,
protocols/rendezvous,
protocols/rendezvous/protobuf,
routing_record,
switch,
]
proc createSwitch*(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder
@@ -76,3 +86,41 @@ proc populatePeerRegistrations*(
let record = targetRdv.registered.s[0]
for i in 0 ..< count - 1:
targetRdv.registered.s.add(record)
proc createCorruptedSignedPeerRecord*(peerId: PeerId): SignedPeerRecord =
let rng = newRng()
let wrongPrivKey = PrivateKey.random(rng[]).tryGet()
let record = PeerRecord.init(peerId, @[])
SignedPeerRecord.init(wrongPrivKey, record).tryGet()
proc sendRdvMessage*(
node: RendezVous, target: RendezVous, buffer: seq[byte]
): Future[seq[byte]] {.async.} =
let conn = await node.switch.dial(target.switch.peerInfo.peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(buffer)
let response = await conn.readLp(4096)
response
proc prepareRegisterMessage*(
namespace: string, spr: seq[byte], ttl: Duration
): Message =
Message(
msgType: MessageType.Register,
register: Opt.some(
Register(ns: namespace, signedPeerRecord: spr, ttl: Opt.some(ttl.seconds.uint64))
),
)
proc prepareDiscoverMessage*(
ns: Opt[string] = Opt.none(string),
limit: Opt[uint64] = Opt.none(uint64),
cookie: Opt[seq[byte]] = Opt.none(seq[byte]),
): Message =
Message(
msgType: MessageType.Discover,
discover: Opt.some(Discover(ns: ns, limit: limit, cookie: cookie)),
)

125
tests/mix/testcrypto.nim Normal file
View File

@@ -0,0 +1,125 @@
{.used.}
import nimcrypto, results, unittest
import ../../libp2p/protocols/mix/crypto
suite "cryptographic_functions_tests":
test "aes_ctr_encrypt_decrypt":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisdata")
let encrypted = aes_ctr(key, iv, data)
let decrypted = aes_ctr(key, iv, encrypted)
check:
data == decrypted
data != encrypted
test "sha256_hash_computation":
let
data: seq[byte] = cast[seq[byte]]("thisisdata")
expectedHashHex =
"b53a20ecf0814267a83be82f941778ffda4b85fbf93a07847539f645ff5f1b9b"
expectedHash = fromHex(expectedHashHex)
hash = sha256_hash(data)
check hash == expectedHash
test "kdf_computation":
let
key: seq[byte] = cast[seq[byte]]("thisiskey")
expectedKdfHex = "37c9842d37dc404854428a0a3554dcaa"
expectedKdf = fromHex(expectedKdfHex)
derivedKey = kdf(key)
check derivedKey == expectedKdf
test "hmac_computation":
let
key: seq[byte] = cast[seq[byte]]("thisiskey")
data: seq[byte] = cast[seq[byte]]("thisisdata")
expectedHmacHex = "b075dd302655e085d35e8cef5dfdf101"
expectedHmac = fromHex(expectedHmacHex)
hmacResult = hmac(key, data)
check hmacResult == expectedHmac
test "aes_ctr_empty_data":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
emptyData: array[0, byte] = []
let encrypted = aes_ctr(key, iv, emptyData)
let decrypted = aes_ctr(key, iv, encrypted)
check:
emptyData == decrypted
emptyData == encrypted
test "sha256_hash_empty_data":
let
emptyData: array[0, byte] = []
expectedHashHex =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
expectedHash = fromHex(expectedHashHex)
hash = sha256_hash(emptyData)
check hash == expectedHash
test "kdf_empty_key":
let
emptyKey: array[0, byte] = []
expectedKdfHex = "e3b0c44298fc1c149afbf4c8996fb924"
expectedKdf = fromHex(expectedKdfHex)
derivedKey = kdf(emptyKey)
check derivedKey == expectedKdf
test "hmac_empty_key_and_data":
let
emptyKey: array[0, byte] = []
emptyData: array[0, byte] = []
expectedHmacHex = "b613679a0814d9ec772f95d778c35fc5"
expectedHmac = fromHex(expectedHmacHex)
hmacResult = hmac(emptyKey, emptyData)
check hmacResult == expectedHmac
test "aes_ctr_start_index_zero_index":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisdata")
startIndex = 0
let encrypted = aes_ctr_start_index(key, iv, data, startIndex)
let expected = aes_ctr(key, iv, data)
check encrypted == expected
test "aes_ctr_start_index_empty_data":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
emptyData: array[0, byte] = []
startIndex = 0
let encrypted = aes_ctr_start_index(key, iv, emptyData, startIndex)
check emptyData == encrypted
test "aes_ctr_start_index_middle":
let
key = cast[array[16, byte]]("thisis16byteskey")
iv = cast[array[16, byte]]("thisis16bytesiv!")
data: seq[byte] = cast[seq[byte]]("thisisverylongdata")
startIndex = 16
let encrypted2 = aes_ctr_start_index(key, iv, data[startIndex ..^ 1], startIndex)
let encrypted1 = aes_ctr(key, iv, data[0 .. startIndex - 1])
let expected = aes_ctr(key, iv, data)
check encrypted1 & encrypted2 == expected

View File

@@ -0,0 +1,45 @@
{.used.}
import results, unittest
import ../../libp2p/crypto/curve25519
import ../../libp2p/protocols/mix/curve25519
proc isNotZero(key: FieldElement): bool =
for byte in key:
if byte != 0:
return true
return false
suite "curve25519_tests":
test "generate_key_pair":
let (privateKey, publicKey) = generateKeyPair().expect("generate keypair error")
check:
fieldElementToBytes(privateKey).len == FieldElementSize
fieldElementToBytes(publicKey).len == FieldElementSize
privateKey.isNotZero()
publicKey.isNotZero()
let derivedPublicKey = multiplyBasePointWithScalars(@[privateKey]).expect(
"multiply base point with scalar error"
)
check publicKey == derivedPublicKey
test "commutativity":
let
x1 = generateRandomFieldElement().expect("generate random field element error")
x2 = generateRandomFieldElement().expect("generate random field element error")
res1 = multiplyBasePointWithScalars(@[x1, x2]).expect(
"multiply base point with scalar errors"
)
res2 = multiplyBasePointWithScalars(@[x2, x1]).expect(
"multiply base point with scalar errors"
)
res3 = multiplyPointWithScalars(public(x2), @[x1])
res4 = multiplyPointWithScalars(public(x1), @[x2])
check:
res1 == res2
res1 == res3
res1 == res4

View File

@@ -0,0 +1,101 @@
{.used.}
import results, unittest
import ../../libp2p/peerid
import ../../libp2p/protocols/mix/[serialization, fragmentation]
suite "Fragmentation":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
test "serialize and deserialize message chunk":
let
message = newSeq[byte](DataSize)
chunks = padAndChunkMessage(message, peerId)
serialized = chunks[0].serialize()
deserialized =
MessageChunk.deserialize(serialized).expect("Deserialization error")
check chunks[0] == deserialized
test "pad and unpad small message":
let
message = cast[seq[byte]]("Hello, World!")
messageBytesLen = len(message)
paddedMsg = addPadding(message, peerId)
unpaddedMessage = removePadding(paddedMsg).expect("Unpad error")
let (paddingLength, data, _) = paddedMsg.get()
check:
paddingLength == uint16(DataSize - messageBytesLen)
data.len == DataSize
unpaddedMessage.len == messageBytesLen
test "pad and chunk large message":
let
message = newSeq[byte](MessageSize * 2 + (MessageSize - 1))
messageBytesLen = len(message)
chunks = padAndChunkMessage(message, peerId)
totalChunks = max(1, ceilDiv(messageBytesLen, DataSize))
check chunks.len == totalChunks
for i in 0 ..< totalChunks:
let (paddingLength, data, _) = chunks[i].get()
if i != totalChunks - 1:
check paddingLength == 0
else:
let chunkSize = messageBytesLen mod DataSize
check paddingLength == uint16(DataSize - chunkSize)
check data.len == DataSize
test "chunk sequence numbers are consecutive":
let
message = newSeq[byte](MessageSize * 3)
messageBytesLen = len(message)
chunks = padAndChunkMessage(message, peerId)
totalChunks = max(1, ceilDiv(messageBytesLen, DataSize))
check chunks.len == totalChunks
let (_, _, firstSeqNo) = chunks[0].get()
for i in 1 ..< totalChunks:
let (_, _, seqNo) = chunks[i].get()
check seqNo == firstSeqNo + uint32(i)
test "chunk data reconstructs original message":
let
message = cast[seq[byte]]("This is a test message that will be split into multiple chunks.")
chunks = padAndChunkMessage(message, peerId)
var reconstructed: seq[byte]
for chunk in chunks:
let (paddingLength, data, _) = chunk.get()
reconstructed.add(data[paddingLength.int ..^ 1])
check reconstructed == message
test "empty message handling":
let
message = cast[seq[byte]]("")
chunks = padAndChunkMessage(message, peerId)
check chunks.len == 1
let (paddingLength, _, _) = chunks[0].get()
check paddingLength == uint16(DataSize)
test "message size equal to chunk size":
let
message = newSeq[byte](DataSize)
chunks = padAndChunkMessage(message, peerId)
check chunks.len == 1
let (paddingLength, _, _) = chunks[0].get()
check paddingLength == 0

View File

@@ -0,0 +1,34 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/mix_message
import stew/byteutils
# Define test cases
suite "mix_message_tests":
test "serialize_and_deserialize_mix_message":
let
message = "Hello World!"
codec = "/test/codec/1.0.0"
mixMsg = MixMessage.init(message.toBytes(), codec)
let serialized = mixMsg.serialize()
let deserializedMsg =
MixMessage.deserialize(serialized).expect("deserialization failed")
check:
message == string.fromBytes(deserializedMsg.message)
codec == deserializedMsg.codec
test "serialize_empty_mix_message":
let
emptyMessage = ""
codec = "/test/codec/1.0.0"
mixMsg = MixMessage.init(emptyMessage.toBytes(), codec)
let serialized = mixMsg.serialize()
let dMixMsg = MixMessage.deserialize(serialized).expect("deserialization failed")
check:
emptyMessage == string.fromBytes(dMixMsg.message)
codec == dMixMsg.codec

84
tests/mix/testmixnode.nim Normal file
View File

@@ -0,0 +1,84 @@
{.used.}
import strformat, results, unittest2
import ../../libp2p/[crypto/crypto, crypto/secp, multiaddress, multicodec, peerid]
import ../../libp2p/protocols/mix/[curve25519, mix_node]
suite "Mix Node Tests":
var mixNodes {.threadvar.}: MixNodes
setup:
const count = 5
let mixNodes = initializeMixNodes(count).expect("could not generate mix nodes")
check:
mixNodes.len == count
teardown:
deleteNodeInfoFolder()
deletePubInfoFolder()
test "get mix node by index":
for i in 0 ..< count:
let node = mixNodes[i]
let
(peerId, multiAddr, mixPubKey, mixPrivKey, libp2pPubKey, libp2pPrivKey) =
node.get()
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: libp2pPubKey)
expectedPeerId = PeerId.init(pubKeyProto).get()
expectedMA = MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{4242 + i}").tryGet()
check:
peerId == expectedPeerId
multiAddr == expectedMA
fieldElementToBytes(mixPubKey).len == FieldElementSize
fieldElementToBytes(mixPrivKey).len == FieldElementSize
libp2pPubKey.getBytes().len == SkRawPublicKeySize
libp2pPrivKey.getBytes().len == SkRawPrivateKeySize
test "find mixnode by peerid":
for i in 0 ..< count:
let
node = mixNodes[i]
foundNode = mixNodes.findByPeerId(node.peerId).expect("find mix node error")
check:
foundNode == node
test "invalid_peer_id_lookup":
let peerId = PeerId.random().expect("could not generate peerId")
check:
mixNodes.findByPeerId(peerId).isErr()
test "write and read mixnodeinfo":
for i in 0 ..< count:
let node = mixNodes[i]
node.writeToFile(i).expect("File write error")
let readNode = MixNodeInfo.readFromFile(i).expect("Read node error")
check:
readNode == node
test "write and read mixpubinfo":
for i in 0 ..< count:
let mixPubInfo =
mixNodes.getMixPubInfoByIndex(i).expect("couldnt obtain mixpubinfo")
mixPubInfo.writeToFile(i).expect("File write error")
let readNode = MixPubInfo.readFromFile(i).expect("File read error")
check:
mixPubInfo == readNode
test "read nonexistent mixnodeinfo":
check:
MixNodeInfo.readFromFile(999).isErr()
test "generate mix nodes with different ports":
const count = 3
let basePort = 5000
let mixNodes =
initializeMixNodes(count, basePort).expect("could not generate mix nodes")
for i in 0 ..< count:
let tcpPort = mixNodes[i].multiAddr.getPart(multiCodec("tcp")).value()
let expectedPort = MultiAddress.init(fmt"/tcp/{basePort + i}").tryGet()
check:
tcpPort == expectedPort

View File

@@ -0,0 +1,57 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/[serialization, multiaddr]
import ../../libp2p/[peerid, multiaddress]
template maddr(ma: string): MultiAddress =
MultiAddress.init(ma).tryGet()
proc maddrConversionShouldFail(ma: string, msg: string) =
test msg:
let peerId = PeerId.random().expect("could not generate peerId")
let ma = MultiAddress.init(ma).expect("could not initialize multiaddr")
check:
multiAddrToBytes(peerId, ma).isErr
suite "Utils tests":
test "multiaddress conversion":
let multiAddrs = [
"/ip4/0.0.0.0/tcp/4242", "/ip4/10.0.0.1/tcp/1234",
"/ip4/192.168.1.1/udp/8080/quic-v1",
"/ip4/10.0.0.1/tcp/1234/p2p/16Uiu2HAmDHw4mwBdEjxjJPhrt8Eq1kvDjXAuwkqCmhNiz363AFV2/p2p-circuit",
"/ip4/10.0.0.1/udp/1234/quic-v1/p2p/16Uiu2HAmDHw4mwBdEjxjJPhrt8Eq1kvDjXAuwkqCmhNiz363AFV2/p2p-circuit",
]
for multiAddr in multiAddrs:
let
ma = maddr(multiAddr)
peerId = PeerId.random().expect("could not generate peerId")
multiAddrBytes = multiAddrToBytes(peerId, ma).expect("conversion failed")
check multiAddrBytes.len == AddrSize
let (dPeerId, deserializedMa) =
bytesToMultiAddr(multiAddrBytes).expect("conversion failed")
check:
deserializedMa == ma
dPeerId == peerId
maddrConversionShouldFail("/ip4/0.0.0.0/tcp/4242/quic-v1/", "invalid protocol")
maddrConversionShouldFail("/ip4/0.0.0.0", "invalid multiaddress format")
maddrConversionShouldFail(
"/ip4/0.0.0.0/tcp/4242/p2p-circuit", "invalid multiaddress format circuit relay"
)
maddrConversionShouldFail(
"/ip4/0.0.0.0/tcp/4242/p2p/QmcycySVeRSftFQGM392xCqDh6UUbhSU9ykNpxrFBPX3gJ/p2p-circuit",
"invalid peerId in circuit relay addr",
)
test "invalid address length":
let invalidBytes = newSeq[byte](AddrSize - 1)
check:
bytesToMultiAddr(invalidBytes).isErr

View File

@@ -0,0 +1,94 @@
{.used.}
import chronicles, sets, unittest
import std/[os, times]
import ../../libp2p/peerid
include ../../libp2p/protocols/mix/seqno_generator
const second = 1000
suite "Sequence Number Generator":
test "init_seq_no_from_peer_id":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
seqNo = SeqNo.init(peerId)
check seqNo != 0
test "generate_seq_nos_for_different_messages":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
msg1 = @[byte 1, 2, 3]
msg2 = @[byte 4, 5, 6]
var seqNo = SeqNo.init(peerId)
seqNo.generate(msg1)
let seqNo1 = seqNo
seqNo.generate(msg2)
let seqNo2 = seqNo
check seqNo1 != seqNo2
test "generate_seq_nos_for_same_message":
let
peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
msg = @[byte 1, 2, 3]
var seqNo = SeqNo.init(peerId)
seqNo.generate(msg)
let seqNo1 = seqNo
sleep(second)
seqNo.generate(msg)
let seqNo2 = seqNo
check seqNo1 != seqNo2
test "generate_seq_nos_for_different_peer_ids":
let
peerId1 =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
peerId2 =
PeerId.init("16Uiu2HAm6WNzw8AssyPscYYi8x1bY5wXyQrGTShRH75bh5dPCjBQ").get()
var
seqNo1 = SeqNo.init(peerId1)
seqNo2 = SeqNo.init(peerId2)
check seqNo1 != seqNo2
test "increment_seq_no":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
var seqNo: SeqNo = SeqNo.init(peerId)
let initialCounter = seqNo
seqNo.inc()
check seqNo == initialCounter + 1
test "seq_no_wraps_around_at_max_value":
var seqNo = high(uint32) - 1
seqNo.inc()
check seqNo == 0
test "generate_seq_no_uses_entire_uint32_range":
let peerId =
PeerId.init("16Uiu2HAmFkwLVsVh6gGPmSm9R3X4scJ5thVdKfWYeJsKeVrbcgVC").get()
var
seqNo = SeqNo.init(peerId)
seenValues = initHashSet[uint32]()
for i in 0 ..< 10000:
seqNo.generate(@[i.uint8])
seenValues.incl(seqNo)
check seenValues.len > 9000

View File

@@ -0,0 +1,71 @@
{.used.}
import results, unittest
import ../../libp2p/protocols/mix/serialization
# Define test cases
suite "serialization_tests":
test "serialize_and_deserialize_header":
let header = Header.init(
newSeq[byte](AlphaSize), newSeq[byte](BetaSize), newSeq[byte](GammaSize)
)
let serialized = header.serialize()
check serialized.len() == HeaderSize
test "serialize_and_deserialize_message":
let message = Message(newSeq[byte](MessageSize))
let serialized = message.serialize()
let deserialized =
Message.deserialize(serialized).expect("Failed to deserialize message")
check message == deserialized
test "serialize_and_deserialize_hop":
let hop = Hop.init(newSeq[byte](AddrSize))
let serialized = hop.serialize()
let deserialized = Hop.deserialize(serialized).expect("Failed to deserialize hop")
check hop.get() == deserialized.get()
test "serialize_and_deserialize_routing_info":
let routingInfo = RoutingInfo.init(
Hop.init(newSeq[byte](AddrSize)),
newSeq[byte](DelaySize),
newSeq[byte](GammaSize),
newSeq[byte](((r * (t + 1)) - t) * k),
)
let serialized = routingInfo.serialize()
let suffixLength = (t + 1) * k
let suffix = newSeq[byte](suffixLength)
let deserialized = RoutingInfo.deserialize(serialized & suffix).expect(
"Failed to deserialize routing info"
)
let
(hop, delay, gamma, beta) = getRoutingInfo(routingInfo)
(dHop, dDelay, dGamma, dBeta) = getRoutingInfo(deserialized)
check:
hop.get() == dHop.get()
delay == dDelay
gamma == dGamma
beta == dBeta[0 .. (((r * (t + 1)) - t) * k) - 1]
test "serialize_and_deserialize_sphinx_packet":
let
header = Header.init(
newSeq[byte](AlphaSize), newSeq[byte](BetaSize), newSeq[byte](GammaSize)
)
payload = newSeq[byte](PayloadSize)
packet = SphinxPacket.init(header, payload)
let serialized = packet.serialize()
let deserializedSP =
SphinxPacket.deserialize(serialized).expect("Failed to deserialize sphinx packet")
check:
header.Alpha == deserializedSP.Hdr.Alpha
header.Beta == deserializedSP.Hdr.Beta
header.Gamma == deserializedSP.Hdr.Gamma
payload == deserializedSP.Payload

335
tests/mix/testsphinx.nim Normal file
View File

@@ -0,0 +1,335 @@
{.used.}
import random, results, unittest, chronicles
import ../../libp2p/crypto/crypto
import ../../libp2p/protocols/mix/[curve25519, serialization, sphinx, tag_manager]
import bearssl/rand
# Helper function to pad/truncate message
proc addPadding(message: openArray[byte], size: int): seq[byte] =
if message.len >= size:
return message[0 .. size - 1] # Truncate if larger
else:
result = @message
let paddingLength = size - message.len
result.add(newSeq[byte](paddingLength)) # Pad with zeros
# Helper function to create dummy data
proc createDummyData(): (
Message, seq[FieldElement], seq[FieldElement], seq[seq[byte]], seq[Hop], Hop
) =
let (privateKey1, publicKey1) = generateKeyPair().expect("generate keypair error")
let (privateKey2, publicKey2) = generateKeyPair().expect("generate keypair error")
let (privateKey3, publicKey3) = generateKeyPair().expect("generate keypair error")
let
privateKeys = @[privateKey1, privateKey2, privateKey3]
publicKeys = @[publicKey1, publicKey2, publicKey3]
delay = @[newSeq[byte](DelaySize), newSeq[byte](DelaySize), newSeq[byte](DelaySize)]
hops =
@[
Hop.init(newSeq[byte](AddrSize)),
Hop.init(newSeq[byte](AddrSize)),
Hop.init(newSeq[byte](AddrSize)),
]
message = newSeq[byte](MessageSize)
dest = Hop.init(newSeq[byte](AddrSize))
return (message, privateKeys, publicKeys, delay, hops, dest)
template randomI(): SURBIdentifier =
newRng()[].generate(SURBIdentifier)
# Unit tests for sphinx.nim
suite "Sphinx Tests":
var tm: TagManager
setup:
tm = TagManager.new()
teardown:
clearTags(tm)
test "sphinx wrap and process":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"sphinx wrap error"
)
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm).expect(
"Sphinx processing error"
)
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm).expect(
"Sphinx processing error"
)
check:
processedSP3.status == Exit
processedSP3.messageChunk == message
test "sphinx wrap empty public keys":
let (message, _, _, delay, _, dest) = createDummyData()
check wrapInSphinxPacket(message, @[], delay, @[], dest).isErr
test "sphinx_process_invalid_mac":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"Sphinx wrap error"
)
check packetBytes.len == PacketSize
# Corrupt the MAC for testing
var tamperedPacketBytes = packetBytes
tamperedPacketBytes[0] = packetBytes[0] xor 0x01
let tamperedPacket =
SphinxPacket.deserialize(tamperedPacketBytes).expect("Sphinx wrap error")
let invalidMacPkt = processSphinxPacket(tamperedPacket, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check invalidMacPkt.status == InvalidMAC
test "sphinx process duplicate tag":
let (message, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
let packetBytes = wrapInSphinxPacket(message, publicKeys, delay, hops, dest).expect(
"Sphinx wrap error"
)
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
# Process the packet twice to test duplicate tag handling
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP1.status == Intermediate
let processedSP2 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP2.status == Duplicate
test "sphinx wrap and process message sizes":
let MessageSizes = @[32, 64, 128, 256, 512]
for size in MessageSizes:
let (_, privateKeys, publicKeys, delay, hops, dest) = createDummyData()
var message = newSeq[byte](size)
randomize()
for i in 0 ..< size:
message[i] = byte(rand(256))
let paddedMessage = addPadding(message, MessageSize)
let packetBytes = wrapInSphinxPacket(paddedMessage, publicKeys, delay, hops, dest)
.expect("Sphinx wrap error")
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(packet, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm)
.expect("Sphinx processing error")
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm)
.expect("Error in Sphinx processing")
check:
processedSP3.status == Exit
processedSP3.messageChunk == paddedMessage
test "create and use surb":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm).expect(
"Sphinx processing error"
)
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm).expect(
"Sphinx processing error"
)
check processedSP3.status == Reply
let msg = processReply(surb.key, surb.secret.get(), processedSP3.delta_prime).expect(
"Reply processing failed"
)
check msg == message
test "create surb empty public keys":
let (message, _, _, delay, _, _) = createDummyData()
check createSURB(@[], delay, @[], randomI()).isErr()
test "surb sphinx process invalid mac":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
# Corrupt the MAC for testing
var tamperedPacketBytes = packetBytes
tamperedPacketBytes[0] = packetBytes[0] xor 0x01
let tamperedPacket =
SphinxPacket.deserialize(tamperedPacketBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(tamperedPacket, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check processedSP1.status == InvalidMAC
test "surb sphinx process duplicate tag":
let (message, privateKeys, publicKeys, delay, hops, _) = createDummyData()
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, message).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
# Process the packet twice to test duplicate tag handling
let processedSP1 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP1.status == Intermediate
let processedSP2 =
processSphinxPacket(packet, privateKeys[0], tm).expect("Sphinx processing error")
check processedSP2.status == Duplicate
test "create and use surb message sizes":
let messageSizes = @[32, 64, 128, 256, 512]
for size in messageSizes:
let (_, privateKeys, publicKeys, delay, hops, _) = createDummyData()
var message = newSeq[byte](size)
randomize()
for i in 0 ..< size:
message[i] = byte(rand(256))
let paddedMessage = addPadding(message, MessageSize)
let surb =
createSURB(publicKeys, delay, hops, randomI()).expect("Create SURB error")
let packetBytes = useSURB(surb, Message(paddedMessage)).serialize()
check packetBytes.len == PacketSize
let packet = SphinxPacket.deserialize(packetBytes).expect("Sphinx wrap error")
let processedSP1 = processSphinxPacket(packet, privateKeys[0], tm).expect(
"Sphinx processing error"
)
check:
processedSP1.status == Intermediate
processedSP1.serializedSphinxPacket.len == PacketSize
let processedPacket1 = SphinxPacket
.deserialize(processedSP1.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP2 = processSphinxPacket(processedPacket1, privateKeys[1], tm)
.expect("Sphinx processing error")
check:
processedSP2.status == Intermediate
processedSP2.serializedSphinxPacket.len == PacketSize
let processedPacket2 = SphinxPacket
.deserialize(processedSP2.serializedSphinxPacket)
.expect("Sphinx wrap error")
let processedSP3 = processSphinxPacket(processedPacket2, privateKeys[2], tm)
.expect("Sphinx processing error")
check processedSP3.status == Reply
let msg = processReply(surb.key, surb.secret.get(), processedSP3.delta_prime)
.expect("Reply processing failed")
check paddedMessage == msg

View File

@@ -0,0 +1,61 @@
{.used.}
import chronicles, results, unittest
import ../../libp2p/protocols/mix/[curve25519, tag_manager]
suite "tag_manager_tests":
var tm: TagManager
setup:
tm = TagManager.new()
teardown:
tm.clearTags()
test "add_and_check_tag":
let
tag = generateRandomFieldElement().expect("should generate FE")
nonexistentTag = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag)
check:
tm.isTagSeen(tag)
not tm.isTagSeen(nonexistentTag)
test "remove_tag":
let tag = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag)
check tm.isTagSeen(tag)
tm.removeTag(tag)
check not tm.isTagSeen(tag)
test "check_tag_presence":
let tag = generateRandomFieldElement().expect("should generate FE")
check not tm.isTagSeen(tag)
tm.addTag(tag)
check tm.isTagSeen(tag)
tm.removeTag(tag)
check not tm.isTagSeen(tag)
test "handle_multiple_tags":
let tag1 = generateRandomFieldElement().expect("should generate FE")
let tag2 = generateRandomFieldElement().expect("should generate FE")
tm.addTag(tag1)
tm.addTag(tag2)
check:
tm.isTagSeen(tag1)
tm.isTagSeen(tag2)
tm.removeTag(tag1)
tm.removeTag(tag2)
check:
not tm.isTagSeen(tag1)
not tm.isTagSeen(tag2)

View File

@@ -1,3 +1,3 @@
{.used.}
import testnative, ./pubsub/testpubsub, testinterop, testdaemon
import testnative, ./pubsub/testpubsub

View File

@@ -21,7 +21,7 @@ import
protocols/connectivity/autonatv2/server,
protocols/connectivity/autonatv2/utils,
protocols/connectivity/autonatv2/client,
protocols/connectivity/autonatv2/mock,
protocols/connectivity/autonatv2/mockserver,
],
./helpers
@@ -32,8 +32,9 @@ proc setupAutonat(
let
src = newStandardSwitchBuilder(addrs = srcAddrs).build()
dst = newStandardSwitchBuilder().withAutonatV2Server(config = config).build()
client = AutonatV2Client.new(src, newRng())
client = AutonatV2Client.new(newRng())
client.setup(src)
src.mount(client)
await src.start()
await dst.start()
@@ -194,11 +195,7 @@ suite "AutonatV2":
defer:
await allFutures(src.stop(), dst.stop())
check (
await client.sendDialRequest(
dst.peerInfo.peerId, dst.peerInfo.addrs, src.peerInfo.addrs
)
) ==
check (await client.sendDialRequest(dst.peerInfo.peerId, src.peerInfo.addrs)) ==
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
@@ -222,9 +219,7 @@ suite "AutonatV2":
defer:
await allFutures(src.stop(), dst.stop())
check (
await client.sendDialRequest(dst.peerInfo.peerId, dst.peerInfo.addrs, reqAddrs)
) ==
check (await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)) ==
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
@@ -243,9 +238,7 @@ suite "AutonatV2":
check (
await client.sendDialRequest(
dst.peerInfo.peerId,
dst.peerInfo.addrs,
@[MultiAddress.init("/ip4/1.1.1.1/tcp/4040").get()],
dst.peerInfo.peerId, @[MultiAddress.init("/ip4/1.1.1.1/tcp/4040").get()]
)
) ==
AutonatV2Response(
@@ -273,9 +266,7 @@ suite "AutonatV2":
defer:
await allFutures(src.stop(), dst.stop())
check (
await client.sendDialRequest(dst.peerInfo.peerId, dst.peerInfo.addrs, reqAddrs)
) ==
check (await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)) ==
AutonatV2Response(
reachability: NotReachable,
dialResp: DialResponse(
@@ -290,10 +281,11 @@ suite "AutonatV2":
let
src = newStandardSwitchBuilder().build()
dst = newStandardSwitchBuilder().build()
client = AutonatV2Client.new(src, newRng())
client = AutonatV2Client.new(newRng())
autonatV2Mock = AutonatV2Mock.new()
reqAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()]
client.setup(src)
dst.mount(autonatV2Mock)
src.mount(client)
await src.start()
@@ -306,16 +298,14 @@ suite "AutonatV2":
# 1. invalid autonatv2msg
autonatV2Mock.response = DialBackResponse(status: DialBackStatus.Ok).encode()
expect(AutonatV2Error):
discard
await client.sendDialRequest(dst.peerInfo.peerId, dst.peerInfo.addrs, reqAddrs)
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)
# 2. msg that is not DialResponse or DialDataRequest
autonatV2Mock.response = AutonatV2Msg(
msgType: MsgType.DialRequest, dialReq: DialRequest(addrs: @[], nonce: 0)
).encode()
expect(AutonatV2Error):
discard
await client.sendDialRequest(dst.peerInfo.peerId, dst.peerInfo.addrs, reqAddrs)
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)
# 3. invalid addrIdx (e.g. 1000 when only 1 is present)
autonatV2Mock.response = AutonatV2Msg(
@@ -327,5 +317,4 @@ suite "AutonatV2":
),
).encode()
expect(AutonatV2Error):
discard
await client.sendDialRequest(dst.peerInfo.peerId, dst.peerInfo.addrs, reqAddrs)
discard await client.sendDialRequest(dst.peerInfo.peerId, reqAddrs)

View File

@@ -0,0 +1,528 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/sequtils
import chronos, metrics
import unittest2
import
../libp2p/[
builders,
switch,
protocols/connectivity/autonatv2/types,
protocols/connectivity/autonatv2/service,
protocols/connectivity/autonatv2/mockclient,
]
import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ./helpers
proc createSwitch(
autonatSvc: Service = nil,
withAutonat = true,
maxConnsPerPeer = 1,
maxConns = 100,
nameResolver: NameResolver = nil,
): Switch =
var builder = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()], false)
.withTcpTransport()
.withMaxConnsPerPeer(maxConnsPerPeer)
.withMaxConnections(maxConns)
.withYamux()
.withNoise()
if withAutonat:
builder = builder.withAutonatV2()
if autonatSvc != nil:
builder = builder.withServices(@[autonatSvc])
if nameResolver != nil:
builder = builder.withNameResolver(nameResolver)
return builder.build()
proc createSwitches(n: int): seq[Switch] =
var switches: seq[Switch]
for i in 0 ..< n:
switches.add(createSwitch())
switches
proc startAll(switches: seq[Switch]) {.async.} =
await allFuturesThrowing(switches.mapIt(it.start()))
proc stopAll(switches: seq[Switch]) {.async.} =
await allFuturesThrowing(switches.mapIt(it.stop()))
proc startAndConnect(switch: Switch, switches: seq[Switch]) {.async.} =
await switch.start()
for peer in switches:
await peer.start()
await switch.connect(peer.peerInfo.peerId, peer.peerInfo.addrs)
proc newService(
reachability: NetworkReachability,
expectedDials = 3,
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
): (AutonatV2Service, AutonatV2ClientMock) =
let client =
case reachability
of Reachable:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
of NotReachable:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: NotReachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
of Unknown:
AutonatV2ClientMock.new(
AutonatV2Response(
reachability: Unknown,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
),
expectedDials = expectedDials,
)
(AutonatV2Service.new(newRng(), client = client, config = config), client)
suite "AutonatV2 Service":
teardown:
checkTrackers()
asyncTest "Reachability unknown before starting switch":
let
(service, client) = newService(NetworkReachability.Reachable)
switch = createSwitch(service)
check service.networkReachability == NetworkReachability.Unknown
asyncTest "Peer must be reachable":
let
(service, client) = newService(NetworkReachability.Reachable)
switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be not reachable":
let
(service, client) = newService(NetworkReachability.NotReachable)
switch = createSwitch(service)
var switches = createSwitches(3)
await switch.startAndConnect(switches)
await client.finished
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be not reachable and then reachable":
let (service, client) = newService(
NetworkReachability.NotReachable,
expectedDials = 6,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
client.response = AutonatV2Response(
reachability: Reachable,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.Ok),
addrIdx: Opt.some(0.AddrIdx),
),
# addrs: Opt.none(MultiAddress), # this will be inferred from sendDialRequest
)
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await client.finished
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Peer must be reachable when one connected peer has autonat disabled":
let (service, client) = newService(
NetworkReachability.Reachable,
expectedDials = 3,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 2
),
)
let switch = createSwitch(service)
var switches = createSwitches(2)
switches.add(createSwitch(withAutonat = false))
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await switch.stop()
await switches.stopAll()
asyncTest "Unknown answers must be ignored":
let (service, client) = newService(
NetworkReachability.NotReachable,
expectedDials = 6,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch(service)
var switches = createSwitches(3)
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
client.response = AutonatV2Response(
reachability: Unknown,
dialResp: DialResponse(
status: ResponseStatus.Ok,
dialStatus: Opt.some(DialStatus.EDialError),
addrIdx: Opt.some(0.AddrIdx),
),
)
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.startAndConnect(switches)
await awaiter
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await client.finished
check service.networkReachability == NetworkReachability.NotReachable
check libp2p_autonat_v2_reachability_confidence.value(["NotReachable"]) == 0.3
await switch.stop()
await switches.stopAll()
asyncTest "Calling setup and stop twice must work":
let (service, client) = newService(
NetworkReachability.NotReachable,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch = createSwitch()
check (await service.setup(switch)) == true
check (await service.setup(switch)) == false
check (await service.stop(switch)) == true
check (await service.stop(switch)) == false
await switch.stop()
asyncTest "Must bypass maxConnectionsPerPeer limit":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 1
),
)
let switch1 = createSwitch(service, maxConnsPerPeer = 0)
let switch2 =
createSwitch(maxConnsPerPeer = 0, nameResolver = MockResolver.default())
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch1.start()
switch1.peerInfo.addrs.add(
[
MultiAddress.init("/dns4/localhost/").tryGet() &
switch1.peerInfo.addrs[0][1].tryGet()
]
)
await switch2.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await awaiter
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop())
asyncTest "Must work when peers ask each other at the same time with max 1 conn per peer":
let
(service1, client1) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service2, client2) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service3, client3) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
switch1 = createSwitch(service1, maxConnsPerPeer = 0)
switch2 = createSwitch(service2, maxConnsPerPeer = 0)
switch3 = createSwitch(service2, maxConnsPerPeer = 0)
awaiter1 = newFuture[void]()
awaiter2 = newFuture[void]()
awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
service1.setStatusAndConfidenceHandler(statusAndConfidenceHandler1)
service2.setStatusAndConfidenceHandler(statusAndConfidenceHandler2)
await switch1.start()
await switch2.start()
await switch3.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs)
await switch2.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
await awaiter1
await awaiter2
check service1.networkReachability == NetworkReachability.Reachable
check service2.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop())
asyncTest "Must work for one peer when two peers ask each other at the same time with max 1 conn per peer":
let
(service1, client1) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
(service2, client2) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(500.millis), maxQueueSize = 3
),
)
let switch1 = createSwitch(service1, maxConnsPerPeer = 0)
let switch2 = createSwitch(service2, maxConnsPerPeer = 0)
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
service1.setStatusAndConfidenceHandler(statusAndConfidenceHandler1)
await switch1.start()
await switch2.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
try:
# We allow a temp conn for the peer to dial us. It could use this conn to just connect to us and not dial.
# We don't care if it fails at this point or not. But this conn must be closed eventually.
# Bellow we check that there's only one connection between the peers
await switch2.connect(
switch1.peerInfo.peerId, switch1.peerInfo.addrs, reuseConnection = false
)
except CatchableError:
discard
await awaiter1
check service1.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
# Make sure remote peer can't create a connection to us
check switch1.connManager.connCount(switch2.peerInfo.peerId) == 1
await allFuturesThrowing(switch1.stop(), switch2.stop())
asyncTest "Must work with low maxConnections":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(
scheduleInterval = Opt.some(1.seconds), maxQueueSize = 1
),
)
let switch = createSwitch(service, maxConns = 4)
var switches = createSwitches(4)
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and
confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch.start()
await switches.startAll()
await switch.connect(switches[0].peerInfo.peerId, switches[0].peerInfo.addrs)
await awaiter
await switch.connect(switches[2].peerInfo.peerId, switches[2].peerInfo.addrs)
await switch.connect(switches[3].peerInfo.peerId, switches[3].peerInfo.addrs)
await switches[3].connect(switch.peerInfo.peerId, switch.peerInfo.addrs)
# switch1 is now full, should stick to last observation
awaiter = newFuture[void]()
await service.run(switch)
await sleepAsync(100.millis)
check service.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_v2_reachability_confidence.value(["Reachable"]) == 1
await switch.stop()
await switches.stopAll()
asyncTest "Peer must not ask an incoming peer":
let (service, client) = newService(
NetworkReachability.Reachable,
config = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds)),
)
let switch1 = createSwitch(service)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
fail()
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await switch1.start()
await switch2.start()
await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs)
await sleepAsync(250.milliseconds)
await allFuturesThrowing(switch1.stop(), switch2.stop())

View File

@@ -31,6 +31,8 @@ import ./helpers
when defined(linux) and defined(amd64):
{.used.}
import ../libp2p/utils/ipaddr
suite "AutoTLS Integration":
asyncTeardown:
checkTrackers()

View File

@@ -1,123 +0,0 @@
import chronos, unittest2, helpers
import
../libp2p/daemon/daemonapi,
../libp2p/multiaddress,
../libp2p/multicodec,
../libp2p/cid,
../libp2p/multihash,
../libp2p/peerid
when defined(nimHasUsed):
{.used.}
proc identitySpawnTest(): Future[bool] {.async.} =
var api = await newDaemonApi()
var data = await api.identity()
await api.close()
result = true
proc connectStreamTest(): Future[bool] {.async.} =
var api1 = await newDaemonApi()
var api2 = await newDaemonApi()
var id1 = await api1.identity()
var id2 = await api2.identity()
var protos = @["/test-stream"]
var test = "TEST STRING"
var testFuture = newFuture[string]("test.future")
proc streamHandler(
api: DaemonAPI, stream: P2PStream
) {.async: (raises: [CatchableError]).} =
var line = await stream.transp.readLine()
testFuture.complete(line)
await api2.addHandler(protos, streamHandler)
await api1.connect(id2.peer, id2.addresses)
# echo await api1.listPeers()
var stream = await api1.openStream(id2.peer, protos)
let sent = await stream.transp.write(test & "\r\n")
doAssert(sent == len(test) + 2)
doAssert((await wait(testFuture, 10.seconds)) == test)
await stream.close()
await api1.close()
await api2.close()
result = true
proc pubsubTest(f: set[P2PDaemonFlags]): Future[bool] {.async.} =
var pubsubData = "TEST MESSAGE"
var msgData = cast[seq[byte]](pubsubData)
var api1, api2: DaemonAPI
api1 = await newDaemonApi(f)
api2 = await newDaemonApi(f)
var id1 = await api1.identity()
var id2 = await api2.identity()
var resultsCount = 0
var handlerFuture1 = newFuture[void]()
var handlerFuture2 = newFuture[void]()
proc pubsubHandler1(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = cast[string](message.data)
if smsg == pubsubData:
inc(resultsCount)
handlerFuture1.complete()
# Callback must return `false` to close subscription channel.
result = false
proc pubsubHandler2(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
let smsg = cast[string](message.data)
if smsg == pubsubData:
inc(resultsCount)
handlerFuture2.complete()
# Callback must return `false` to close subscription channel.
result = false
await api1.connect(id2.peer, id2.addresses)
await api2.connect(id1.peer, id1.addresses)
var ticket1 = await api1.pubsubSubscribe("test-topic", pubsubHandler1)
var ticket2 = await api2.pubsubSubscribe("test-topic", pubsubHandler2)
await sleepAsync(1.seconds)
var topics1 = await api1.pubsubGetTopics()
var topics2 = await api2.pubsubGetTopics()
if len(topics1) == 1 and len(topics2) == 1:
var peers1 = await api1.pubsubListPeers("test-topic")
var peers2 = await api2.pubsubListPeers("test-topic")
if len(peers1) == 1 and len(peers2) == 1:
# Publish test data via api1.
await sleepAsync(250.milliseconds)
await api1.pubsubPublish("test-topic", msgData)
var res =
await one(allFutures(handlerFuture1, handlerFuture2), sleepAsync(5.seconds))
await api1.close()
await api2.close()
if resultsCount == 2:
result = true
suite "libp2p-daemon test suite":
test "Simple spawn and get identity test":
check:
waitFor(identitySpawnTest()) == true
test "Connect/Accept peer/stream test":
check:
waitFor(connectStreamTest()) == true
asyncTest "GossipSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSGossipSub}))
asyncTest "FloodSub test":
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
(await pubsubTest({PSFloodSub}))

View File

@@ -1,58 +0,0 @@
{.used.}
import helpers, commoninterop
import ../libp2p
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/relay
proc switchMplexCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =
SwitchBuilder
.new()
.withSignedPeerRecord(false)
.withMaxConnections(MaxConnections)
.withRng(crypto.newRng())
.withAddresses(@[ma])
.withMaxIn(-1)
.withMaxOut(-1)
.withTransport(prov)
.withMplex()
.withMaxConnsPerPeer(MaxConnectionsPerPeer)
.withPeerStore(capacity = 1000)
.withNoise()
.withCircuitRelay(relay)
.withNameResolver(nil)
.build()
proc switchYamuxCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov = proc(config: TransportConfig): Transport =
TcpTransport.new({}, config.upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.raises: [LPError].} =
SwitchBuilder
.new()
.withSignedPeerRecord(false)
.withMaxConnections(MaxConnections)
.withRng(crypto.newRng())
.withAddresses(@[ma])
.withMaxIn(-1)
.withMaxOut(-1)
.withTransport(prov)
.withYamux()
.withMaxConnsPerPeer(MaxConnectionsPerPeer)
.withPeerStore(capacity = 1000)
.withNoise()
.withCircuitRelay(relay)
.withNameResolver(nil)
.build()
suite "Tests interop":
commonInteropTests("mplex", switchMplexCreator)
relayInteropTests("mplex", switchMplexCreator)
commonInteropTests("yamux", switchYamuxCreator)
relayInteropTests("yamux", switchYamuxCreator)

View File

@@ -123,7 +123,7 @@ const
"/ip4/127.0.0.1/ipfs", "/ip4/127.0.0.1/ipfs/tcp", "/p2p-circuit/50",
]
PathVectors = ["/unix/tmp/p2pd.sock", "/unix/a/b/c/d/e/f/g/h/i.sock"]
PathVectors = ["/unix/a/b/c/d/e/f/g/h/i.sock"]
PathExpects = [
"90030E2F746D702F703270642E736F636B",

View File

@@ -31,9 +31,9 @@ import
testnameresolve, testmultistream, testbufferstream, testidentify,
testobservedaddrmanager, testconnmngr, testswitch, testnoise, testpeerinfo,
testpeerstore, testping, testmplex, testrelayv1, testrelayv2, testyamux,
testyamuxheader, testautonat, testautonatservice, testautonatv2, testautorelay,
testdcutr, testhpservice, testutility, testhelpers, testwildcardresolverservice,
testperf
testyamuxheader, testautonat, testautonatservice, testautonatv2, testautonatv2service,
testautorelay, testdcutr, testhpservice, testutility, testhelpers,
testwildcardresolverservice, testperf
import discovery/testdiscovery
@@ -41,3 +41,9 @@ import kademlia/[testencoding, testroutingtable, testfindnode, testputval]
when defined(libp2p_autotls_support):
import testautotls
import
mix/[
testcrypto, testcurve25519, testtagmanager, testseqnogenerator, testserialization,
testmixmessage, testsphinx, testmultiaddr, testfragmentation, testmixnode,
]

View File

@@ -28,7 +28,6 @@ import
builders,
upgrademngrs/upgrade,
varint,
daemon/daemonapi,
]
import ./helpers

View File

@@ -25,6 +25,7 @@ import
builders,
protocols/ping,
wire,
utils/ipaddr,
]
from ./helpers import suite, asyncTest, asyncTeardown, checkTrackers, skip, check