mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 13:58:17 -05:00
Compare commits
71 Commits
autotls-re
...
pin-websoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f6d3e2dd8 | ||
|
|
92eaf16218 | ||
|
|
2951356c9d | ||
|
|
7ae21d0cbd | ||
|
|
eee8341ad2 | ||
|
|
e83bd2d582 | ||
|
|
998bb58aef | ||
|
|
c1f6dec7d3 | ||
|
|
13c613c26c | ||
|
|
45f0f9f47a | ||
|
|
b1dd0a2ec6 | ||
|
|
beecfdfadb | ||
|
|
e4faec5570 | ||
|
|
41c9bf8e8c | ||
|
|
7ae366d979 | ||
|
|
9b33cea225 | ||
|
|
f8077f7432 | ||
|
|
773fc67865 | ||
|
|
7e07ffc5a8 | ||
|
|
aa1c33ffe9 | ||
|
|
f1e220fba4 | ||
|
|
5ad656bf26 | ||
|
|
cfd631457a | ||
|
|
4f8597609b | ||
|
|
4ed72a753c | ||
|
|
2a9abbe925 | ||
|
|
ee61e234ac | ||
|
|
8f54367e3a | ||
|
|
61826a20e4 | ||
|
|
32951e1a68 | ||
|
|
1d13e405e4 | ||
|
|
729e879c1c | ||
|
|
64c9cf1b9e | ||
|
|
4d94892eb0 | ||
|
|
3ecb1744ce | ||
|
|
2f9c3fb3e2 | ||
|
|
2609c270b8 | ||
|
|
48b3e34cd3 | ||
|
|
abb2c43667 | ||
|
|
d1cfbb35d3 | ||
|
|
38a630eee0 | ||
|
|
be1a2023ce | ||
|
|
021d0c1700 | ||
|
|
f49cd377ce | ||
|
|
fc80840784 | ||
|
|
7742d06a58 | ||
|
|
e0ea1d48a4 | ||
|
|
f028ad8c12 | ||
|
|
9c153c822b | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 | ||
|
|
848fdde0a8 | ||
|
|
31e7dc68e2 | ||
|
|
08299a2059 | ||
|
|
2f3156eafb | ||
|
|
72e85101b0 | ||
|
|
d205260a3e | ||
|
|
97e576d146 | ||
|
|
888cb78331 | ||
|
|
1d4c261d2a | ||
|
|
83de0c0abd | ||
|
|
c501adc9ab | ||
|
|
f9fc24cc08 | ||
|
|
cd26244ccc | ||
|
|
cabab6aafe | ||
|
|
fb42a9b4aa | ||
|
|
141f4d9116 | ||
|
|
cb31152b53 | ||
|
|
3a7745f920 | ||
|
|
a89916fb1a | ||
|
|
c6cf46c904 |
14
.github/workflows/daily_common.yml
vendored
14
.github/workflows/daily_common.yml
vendored
@@ -27,6 +27,7 @@ jobs:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
@@ -77,14 +78,14 @@ jobs:
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
|
||||
- name: Install dependencies (pinned)
|
||||
if: ${{ inputs.pinned_deps }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Install dependencies (latest)
|
||||
if: ${{ inputs.pinned_deps != 'true' }}
|
||||
if: ${{ inputs.pinned_deps == false }}
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
@@ -95,3 +96,12 @@ jobs:
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ matrix.platform.os == 'linux' && matrix.cpu == 'amd64' }}
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble testintegration
|
||||
|
||||
36
.github/workflows/interop.yml
vendored
36
.github/workflows/interop.yml
vendored
@@ -41,20 +41,22 @@ jobs:
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
# nim-libp2p#1367: hole punching tests are temporary disabled as they keep failing
|
||||
# and issue does not seem to be on nim-libp2p side
|
||||
# run-hole-punching-interop:
|
||||
# name: Run hole-punching interoperability tests
|
||||
# runs-on: ubuntu-22.04
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: docker/setup-buildx-action@v3
|
||||
# - name: Build image
|
||||
# run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
# - name: Run tests
|
||||
# uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
# with:
|
||||
# test-filter: nim-libp2p-head
|
||||
# extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
# s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
4
.pinned
4
.pinned
@@ -15,5 +15,7 @@ serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2d
|
||||
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
|
||||
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
|
||||
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
|
||||
websock;https://github.com/status-im/nim-websock@#b197f4a77bcd0fc083bccb564dbec89e9903b37f
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
|
||||
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
|
||||
|
||||
@@ -119,6 +119,11 @@ Enable quic transport support
|
||||
nim c -d:libp2p_quic_support some_file.nim
|
||||
```
|
||||
|
||||
Enable autotls support
|
||||
```bash
|
||||
nim c -d:libp2p_autotls_support some_file.nim
|
||||
```
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# nim-libp2p examples
|
||||
|
||||
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
|
||||
In this folder, you'll find the sources of the [nim-libp2p website](https://vacp2p.github.io/nim-libp2p/docs/)
|
||||
|
||||
We recommand to follow the tutorials on the website, but feel free to grok the sources here!
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
Welcome to the nim-libp2p documentation!
|
||||
|
||||
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
|
||||
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
||||
the [full reference](https://vacp2p.github.io/nim-libp2p/master/libp2p.html).
|
||||
|
||||
@@ -84,8 +84,8 @@ proc main() {.async.} =
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError:
|
||||
raise newException(CatchableError, "Connection to relay timed out")
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
@@ -103,7 +103,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
@@ -130,8 +130,8 @@ try:
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError:
|
||||
error "Program execution timed out."
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
@@ -47,12 +47,9 @@ proc main() {.async.} =
|
||||
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
discard switchBuilder.withWsTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
|
||||
)
|
||||
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
@@ -83,7 +80,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
|
||||
let
|
||||
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
|
||||
dialingStart = Moment.now()
|
||||
@@ -108,8 +105,8 @@ try:
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(testTimeout))
|
||||
except AsyncTimeoutError:
|
||||
error "Program execution timed out."
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -10,7 +10,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7"
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7",
|
||||
"https://github.com/status-im/nim-websock.git#b197f4a77bcd0fc083bccb564dbec89e9903b37f",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -29,7 +31,7 @@ proc runTest(filename: string, moreoptions: string = "") =
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
|
||||
exec excstr & " -r -d:libp2p_quic_support -d:libp2p_autotls_support tests/" & filename
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
@@ -55,12 +57,15 @@ task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest("pubsub/testpubsub")
|
||||
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")
|
||||
|
||||
task testfilter, "Run PKI filter test":
|
||||
runTest("testpkifilter")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
|
||||
|
||||
task testintegration, "Runs integraion tests":
|
||||
runTest("testintegration")
|
||||
|
||||
task test, "Runs the test suite":
|
||||
runTest("testall")
|
||||
exec "nimble testfilter"
|
||||
|
||||
521
libp2p/autotls/acme/api.nim
Normal file
521
libp2p/autotls/acme/api.nim
Normal file
@@ -0,0 +1,521 @@
|
||||
import json, uri
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, results, chronicles
|
||||
|
||||
import ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export ACMEError
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme api"
|
||||
|
||||
const
|
||||
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
|
||||
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
|
||||
Alg = "RS256"
|
||||
DefaultChalCompletedRetries = 10
|
||||
DefaultChalCompletedRetryTime = 1.seconds
|
||||
DefaultFinalizeRetries = 10
|
||||
DefaultFinalizeRetryTime = 1.seconds
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Authorization* = string
|
||||
type Domain* = string
|
||||
type Kid* = string
|
||||
type Nonce* = string
|
||||
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
newOrder*: string
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: Opt[ACMEDirectory]
|
||||
session: HttpSessionRef
|
||||
acmeServerURL*: Uri
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
headers*: HttpTable
|
||||
|
||||
type JWK = object
|
||||
kty: string
|
||||
n: string
|
||||
e: string
|
||||
|
||||
# whether the request uses Kid or not
|
||||
type ACMERequestType = enum
|
||||
ACMEJwkRequest
|
||||
ACMEKidRequest
|
||||
|
||||
type ACMERequestHeader = object
|
||||
alg: string
|
||||
typ: string
|
||||
nonce: Nonce
|
||||
url: string
|
||||
case kind: ACMERequestType
|
||||
of ACMEJwkRequest:
|
||||
jwk: JWK
|
||||
of ACMEKidRequest:
|
||||
kid: Kid
|
||||
|
||||
type Email = string
|
||||
|
||||
type ACMERegisterRequest* = object
|
||||
termsOfServiceAgreed: bool
|
||||
contact: seq[Email]
|
||||
|
||||
type ACMEAccountStatus = enum
|
||||
valid = "valid"
|
||||
deactivated = "deactivated"
|
||||
revoked = "revoked"
|
||||
|
||||
type ACMERegisterResponseBody = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMERegisterResponse* = object
|
||||
kid*: Kid
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEChallengeType* {.pure.} = enum
|
||||
DNS01 = "dns-01"
|
||||
HTTP01 = "http-01"
|
||||
TLSALPN01 = "tls-alpn-01"
|
||||
|
||||
type ACMEChallengeToken* = string
|
||||
|
||||
type ACMEChallenge* = object
|
||||
url*: string
|
||||
`type`*: ACMEChallengeType
|
||||
status*: ACMEChallengeStatus
|
||||
token*: ACMEChallengeToken
|
||||
|
||||
type ACMEChallengeIdentifier = object
|
||||
`type`: string
|
||||
value: string
|
||||
|
||||
type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEOrderStatus
|
||||
authorizations: seq[Authorization]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEOrderStatus
|
||||
authorizations*: seq[Authorization]
|
||||
finalize*: string
|
||||
order*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalize*: string
|
||||
order*: string
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
url: string
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
ACMEChallengeCheck
|
||||
|
||||
type ACMECheckResponse* = object
|
||||
case kind: ACMECheckKind
|
||||
of ACMEOrderCheck:
|
||||
orderStatus: ACMEOrderStatus
|
||||
of ACMEChallengeCheck:
|
||||
chalStatus: ACMEChallengeStatus
|
||||
retryAfter: Duration
|
||||
|
||||
type ACMEFinalizeResponse* = object
|
||||
status: ACMEOrderStatus
|
||||
|
||||
type ACMEOrderResponse* = object
|
||||
certificate: string
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import options, sequtils, strutils, jwt, bearssl/pem
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except ACMEError as exc:
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except JsonKindError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except HttpError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
|
||||
except CatchableError as exc:
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
|
||||
): ACMEApi =
|
||||
let session = HttpSessionRef.new()
|
||||
|
||||
ACMEApi(
|
||||
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
|
||||
)
|
||||
|
||||
proc getDirectory(
|
||||
self: ACMEApi
|
||||
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("getDirectory"):
|
||||
self.directory.valueOr:
|
||||
let acmeResponse = await self.get(self.acmeServerURL / "directory")
|
||||
let directory = acmeResponse.body.to(ACMEDirectory)
|
||||
self.directory = Opt.some(directory)
|
||||
directory
|
||||
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone():
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let newNonce = await self.requestNonce()
|
||||
if needsJwk:
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
ACMERequestHeader(
|
||||
kind: ACMEJwkRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: $uri,
|
||||
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
|
||||
)
|
||||
else:
|
||||
ACMERequestHeader(
|
||||
kind: ACMEKidRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: $uri,
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
uri: Uri,
|
||||
payload: auto,
|
||||
key: KeyPair,
|
||||
needsJwk: bool = false,
|
||||
kid: Opt[Kid] = Opt.none(Kid),
|
||||
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
|
||||
handleError("createSignedAcmeRequest"):
|
||||
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
|
||||
let derPrivKey = key.seckey.rsakey.getBytes.get
|
||||
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
|
||||
token.sign(pemPrivKey)
|
||||
$token.toFlattenedJson()
|
||||
|
||||
proc requestRegister*(
|
||||
self: ACMEApi, key: KeyPair
|
||||
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
|
||||
handleError("acmeRegister"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
parseUri((await self.getDirectory()).newAccount),
|
||||
registerRequest,
|
||||
key,
|
||||
needsJwk = true,
|
||||
)
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
status: acmeResponseBody.status,
|
||||
kid: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
let orderRequest = ACMEChallengeRequest(
|
||||
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
|
||||
)
|
||||
handleError("requestNewOrder"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
parseUri((await self.getDirectory()).newOrder),
|
||||
orderRequest,
|
||||
key,
|
||||
kid = Opt.some(kid),
|
||||
)
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
order: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let acmeResponse = await self.get(parseUri(authorizations[0]))
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.
|
||||
async: (raises: [ACMEError, CancelledError])
|
||||
.} =
|
||||
let orderResponse = await self.requestNewOrder(domains, key, kid)
|
||||
if orderResponse.status != ACMEOrderStatus.PENDING and
|
||||
orderResponse.status != ACMEOrderStatus.READY:
|
||||
# ready is a valid status when renewing certs before expiry
|
||||
raise
|
||||
newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
|
||||
if authorizationsResponse.challenges.len == 0:
|
||||
raise newException(ACMEError, "No challenges received")
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalize: orderResponse.finalize,
|
||||
order: orderResponse.order,
|
||||
dns01: authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0],
|
||||
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
|
||||
)
|
||||
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
let retryAfter =
|
||||
try:
|
||||
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
|
||||
except ValueError:
|
||||
DefaultChalCompletedRetryTime
|
||||
|
||||
case checkKind
|
||||
of ACMEOrderCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
of ACMEChallengeCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
chalStatus:
|
||||
parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc sendChallengeCompleted*(
|
||||
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("sendChallengeCompleted"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
acmeResponse.body.to(ACMECompletedResponse)
|
||||
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
checkURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse =
|
||||
await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.PENDING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.VALID:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed challenge completion: expected 'valid', got '" &
|
||||
$checkResponse.chalStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalize, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
order: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.VALID:
|
||||
return true
|
||||
of ACMEOrderStatus.PROCESSING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
error "Failed certificate finalization",
|
||||
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
|
||||
return false # do not try again
|
||||
|
||||
return false
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
order: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(order, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let acmeResponse = await self.get(order)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let orderResponse = await self.requestGetOrder(order)
|
||||
|
||||
handleError("downloadCertificate"):
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.get(self.session, orderResponse.certificate)
|
||||
.get()
|
||||
.send()
|
||||
ACMECertificateResponse(
|
||||
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
else:
|
||||
{.hint: "autotls disabled. Use -d:libp2p_autotls_support".}
|
||||
93
libp2p/autotls/acme/client.nim
Normal file
93
libp2p/autotls/acme/client.nim
Normal file
@@ -0,0 +1,93 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, results, chronicles, stew/byteutils
|
||||
|
||||
import ./api, ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export api
|
||||
|
||||
type KeyAuthorization* = string
|
||||
|
||||
type ACMEClient* = ref object
|
||||
api: ACMEApi
|
||||
key*: KeyPair
|
||||
kid*: Kid
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme client"
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc new*(
|
||||
T: typedesc[ACMEClient],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
|
||||
key: Opt[KeyPair] = Opt.none(KeyPair),
|
||||
kid: Kid = Kid(""),
|
||||
): T {.raises: [].} =
|
||||
let key = key.valueOr:
|
||||
KeyPair.random(PKScheme.RSA, rng[]).get()
|
||||
T(api: api, key: key, kid: kid)
|
||||
|
||||
proc getOrInitKid*(
|
||||
self: ACMEClient
|
||||
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if self.kid.len == 0:
|
||||
let registerResponse = await self.api.requestRegister(self.key)
|
||||
self.kid = registerResponse.kid
|
||||
return self.kid
|
||||
|
||||
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
|
||||
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
|
||||
|
||||
proc getChallenge*(
|
||||
self: ACMEClient, domains: seq[api.Domain]
|
||||
): Future[ACMEChallengeResponseWrapper] {.
|
||||
async: (raises: [ACMEError, CancelledError])
|
||||
.} =
|
||||
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
|
||||
|
||||
proc getCertificate*(
|
||||
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let chalURL = parseUri(challenge.dns01.url)
|
||||
let orderURL = parseUri(challenge.order)
|
||||
let finalizeURL = parseUri(challenge.finalize)
|
||||
trace "sending challenge completed notification"
|
||||
discard await self.api.sendChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
|
||||
trace "checking for completed challenge"
|
||||
let completed = await self.api.checkChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not completed:
|
||||
raise newException(
|
||||
ACMEError, "Failed to signal ACME server about challenge completion"
|
||||
)
|
||||
|
||||
trace "waiting for certificate to be finalized"
|
||||
let finalized = await self.api.certificateFinalized(
|
||||
domain, finalizeURL, orderURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not finalized:
|
||||
raise
|
||||
newException(ACMEError, "Failed to finalize certificate for domain " & domain)
|
||||
|
||||
trace "downloading certificate"
|
||||
await self.api.downloadCertificate(orderURL)
|
||||
|
||||
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
|
||||
await self.api.close()
|
||||
39
libp2p/autotls/acme/mockapi.nim
Normal file
39
libp2p/autotls/acme/mockapi.nim
Normal file
@@ -0,0 +1,39 @@
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
mockedResponses*: seq[HTTPResponse]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(),
|
||||
directory: Opt.some(directory),
|
||||
acmeServerURL: parseUri(LetsEncryptURL),
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return $self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
70
libp2p/autotls/acme/utils.nim
Normal file
70
libp2p/autotls/acme/utils.nim
Normal file
@@ -0,0 +1,70 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
table.getString(key)
|
||||
|
||||
proc base64UrlEncode*(data: seq[byte]): string =
|
||||
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
|
||||
var encoded = base64.encode(data, safe = true)
|
||||
encoded.removeSuffix("=")
|
||||
encoded.removeSuffix("=")
|
||||
return encoded
|
||||
|
||||
proc thumbprint*(key: KeyPair): string =
|
||||
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
|
||||
let n = base64UrlEncode(nArray)
|
||||
let e = base64UrlEncode(eArray)
|
||||
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
|
||||
let digest = sha256.digest($keyJson)
|
||||
return base64UrlEncode(@(digest.data))
|
||||
|
||||
proc getResponseBody*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let bodyBytes = await response.getBodyBytes()
|
||||
if bodyBytes.len > 0:
|
||||
return bytesToString(bodyBytes).parseJson()
|
||||
return %*{} # empty body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(
|
||||
ACMEError, "Unexpected error occurred while getting body bytes", exc
|
||||
)
|
||||
except Exception as exc: # this is required for nim 1.6
|
||||
raise newException(
|
||||
ACMEError, "Unexpected error occurred while getting body bytes", exc
|
||||
)
|
||||
|
||||
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
let personalizationStr = "libp2p_autotls"
|
||||
if cert_init_drbg(
|
||||
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
|
||||
) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to initialize certCtx")
|
||||
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to generate cert key")
|
||||
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
|
||||
base64.encode(derCSR.toSeq, safe = true)
|
||||
230
libp2p/autotls/service.nim
Normal file
230
libp2p/autotls/service.nim
Normal file
@@ -0,0 +1,230 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, results, json, sequtils
|
||||
|
||||
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
./utils,
|
||||
../crypto/crypto,
|
||||
../nameresolving/dnsresolver,
|
||||
../peeridauth/client,
|
||||
../peerinfo,
|
||||
../switch,
|
||||
../utils/heartbeat,
|
||||
../wire
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autotls"
|
||||
|
||||
export LetsEncryptURL, AutoTLSError
|
||||
|
||||
const
|
||||
DefaultDnsServers* =
|
||||
@[
|
||||
initTAddress("1.1.1.1:53"),
|
||||
initTAddress("1.0.0.1:53"),
|
||||
initTAddress("[2606:4700:4700::1111]:53"),
|
||||
]
|
||||
DefaultRenewCheckTime* = 1.hours
|
||||
DefaultRenewBufferTime = 1.hours
|
||||
|
||||
AutoTLSBroker* = "registration.libp2p.direct"
|
||||
AutoTLSDNSServer* = "libp2p.direct"
|
||||
HttpOk* = 200
|
||||
HttpCreated* = 201
|
||||
# NoneIp is needed because nim 1.6.16 can't do proper generic inference
|
||||
NoneIp = Opt.none(IpAddress)
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
type AutotlsCert* = ref object
|
||||
cert*: TLSCertificate
|
||||
expiry*: Moment
|
||||
|
||||
type AutotlsConfig* = ref object
|
||||
acmeServerURL*: Uri
|
||||
dnsResolver*: DnsResolver
|
||||
ipAddress: Opt[IpAddress]
|
||||
renewCheckTime*: Duration
|
||||
renewBufferTime*: Duration
|
||||
|
||||
type AutotlsService* = ref object of Service
|
||||
acmeClient: ACMEClient
|
||||
bearer*: Opt[BearerToken]
|
||||
brokerClient: PeerIDAuthClient
|
||||
cert*: Opt[AutotlsCert]
|
||||
certReady*: AsyncEvent
|
||||
config: AutotlsConfig
|
||||
managerFut: Future[void]
|
||||
peerInfo: PeerInfo
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc new*(T: typedesc[AutotlsCert], cert: TLSCertificate, expiry: Moment): T =
|
||||
T(cert: cert, expiry: expiry)
|
||||
|
||||
proc getCertWhenReady*(
|
||||
self: AutotlsService
|
||||
): Future[TLSCertificate] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
await self.certReady.wait()
|
||||
return self.cert.get.cert
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsConfig],
|
||||
ipAddress: Opt[IpAddress] = NoneIp,
|
||||
nameServers: seq[TransportAddress] = DefaultDnsServers,
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
renewCheckTime: Duration = DefaultRenewCheckTime,
|
||||
renewBufferTime: Duration = DefaultRenewBufferTime,
|
||||
): T =
|
||||
T(
|
||||
dnsResolver: DnsResolver.new(nameServers),
|
||||
acmeServerURL: acmeServerURL,
|
||||
ipAddress: ipAddress,
|
||||
renewCheckTime: renewCheckTime,
|
||||
renewBufferTime: renewBufferTime,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsService],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
config: AutotlsConfig = AutotlsConfig.new(),
|
||||
): T =
|
||||
T(
|
||||
acmeClient:
|
||||
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
|
||||
brokerClient: PeerIDAuthClient.new(),
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
config: config,
|
||||
managerFut: nil,
|
||||
peerInfo: nil,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
method setup*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
trace "Setting up AutotlsService"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetup:
|
||||
self.peerInfo = switch.peerInfo
|
||||
if self.config.ipAddress.isNone():
|
||||
try:
|
||||
self.config.ipAddress = Opt.some(getPublicIPAddress())
|
||||
except AutoTLSError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
self.managerFut = self.run(switch)
|
||||
return hasBeenSetup
|
||||
|
||||
method issueCertificate(
|
||||
self: AutotlsService
|
||||
) {.
|
||||
base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
trace "Issuing certificate"
|
||||
|
||||
assert not self.peerInfo.isNil(), "Cannot issue new certificate: peerInfo not set"
|
||||
|
||||
# generate autotls domain string: "*.{peerID}.libp2p.direct"
|
||||
let baseDomain =
|
||||
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
|
||||
let domain = api.Domain("*." & baseDomain)
|
||||
|
||||
let acmeClient = self.acmeClient
|
||||
|
||||
trace "Requesting ACME challenge"
|
||||
let dns01Challenge = await acmeClient.getChallenge(@[domain])
|
||||
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
|
||||
let strMultiaddresses: seq[string] = self.peerInfo.addrs.mapIt($it)
|
||||
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
|
||||
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
|
||||
|
||||
trace "Sending challenge to AutoTLS broker"
|
||||
let (bearer, response) =
|
||||
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
|
||||
if self.bearer.isNone():
|
||||
# save bearer token for future
|
||||
self.bearer = Opt.some(bearer)
|
||||
if response.status != HttpOk:
|
||||
raise newException(
|
||||
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
)
|
||||
|
||||
debug "Waiting for DNS record to be set"
|
||||
let dnsSet = await checkDNSRecords(
|
||||
self.config.dnsResolver, self.config.ipAddress.get(), baseDomain, keyAuth
|
||||
)
|
||||
if not dnsSet:
|
||||
raise newException(AutoTLSError, "DNS records not set")
|
||||
|
||||
debug "Notifying challenge completion to ACME and downloading cert"
|
||||
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
|
||||
|
||||
debug "Installing certificate"
|
||||
let newCert =
|
||||
try:
|
||||
AutotlsCert.new(
|
||||
TLSCertificate.init(certResponse.rawCertificate),
|
||||
asMoment(certResponse.certificateExpiry),
|
||||
)
|
||||
except TLSStreamProtocolError:
|
||||
raise newException(AutoTLSError, "Could not parse downloaded certificates")
|
||||
self.cert = Opt.some(newCert)
|
||||
self.certReady.fire()
|
||||
debug "Certificate installed"
|
||||
|
||||
method run*(
|
||||
self: AutotlsService, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Certificate Management", self.config.renewCheckTime:
|
||||
if self.cert.isNone():
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
break
|
||||
|
||||
# AutotlsService will renew the cert 1h before it expires
|
||||
let cert = self.cert.get
|
||||
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
|
||||
if waitTime <= self.config.renewBufferTime:
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to renew certificate", err = exc.msg
|
||||
break
|
||||
|
||||
method stop*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
if not self.acmeClient.isNil():
|
||||
await self.acmeClient.close()
|
||||
if not self.brokerClient.isNil():
|
||||
await self.brokerClient.close()
|
||||
if not self.managerFut.isNil():
|
||||
await self.managerFut.cancelAndWait()
|
||||
self.managerFut = nil
|
||||
return hasBeenStopped
|
||||
109
libp2p/autotls/utils.nim
Normal file
109
libp2p/autotls/utils.nim
Normal file
@@ -0,0 +1,109 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import chronos
|
||||
import ../errors
|
||||
|
||||
const
|
||||
DefaultDnsRetries = 10
|
||||
DefaultDnsRetryTime = 1.seconds
|
||||
|
||||
type AutoTLSError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import net, strutils
|
||||
from times import DateTime, toTime, toUnix
|
||||
import stew/base36, chronicles
|
||||
import
|
||||
../peerid,
|
||||
../multihash,
|
||||
../cid,
|
||||
../multicodec,
|
||||
../nameresolving/dnsresolver,
|
||||
./acme/client
|
||||
|
||||
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
# This is so that we don't need to catch Exceptions directly
|
||||
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
|
||||
try:
|
||||
return getPrimaryIPAddr()
|
||||
except Exception as exc:
|
||||
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
|
||||
|
||||
proc isIPv4*(ip: IpAddress): bool =
|
||||
ip.family == IpAddressFamily.IPv4
|
||||
|
||||
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
|
||||
let ip = $ip
|
||||
try:
|
||||
not (
|
||||
ip.startsWith("10.") or
|
||||
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
|
||||
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(AutoTLSError, "Failed to parse IP address", exc)
|
||||
|
||||
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
let ip = checkedGetPrimaryIPAddr()
|
||||
if not ip.isIPv4():
|
||||
raise newException(AutoTLSError, "Host does not have an IPv4 address")
|
||||
if not ip.isPublic():
|
||||
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
|
||||
return ip
|
||||
|
||||
proc asMoment*(dt: DateTime): Moment =
|
||||
let unixTime: int64 = dt.toTime.toUnix
|
||||
return Moment.init(unixTime, Second)
|
||||
|
||||
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
|
||||
var mh: MultiHash
|
||||
let decodeResult = MultiHash.decode(peerId.data, mh)
|
||||
if decodeResult.isErr() or decodeResult.get() == -1:
|
||||
raise
|
||||
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
|
||||
|
||||
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
|
||||
if cidResult.isErr():
|
||||
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
|
||||
|
||||
return Base36.encode(cidResult.get().data.buffer)
|
||||
|
||||
proc checkDNSRecords*(
|
||||
dnsResolver: DnsResolver,
|
||||
ipAddress: IpAddress,
|
||||
baseDomain: api.Domain,
|
||||
keyAuth: KeyAuthorization,
|
||||
retries: int = DefaultDnsRetries,
|
||||
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
# if my ip address is 100.10.10.3 then the ip4Domain will be:
|
||||
# 100-10-10-3.{peerIdBase36}.libp2p.direct
|
||||
# and acme challenge TXT domain will be:
|
||||
# _acme-challenge.{peerIdBase36}.libp2p.direct
|
||||
let dashedIpAddr = ($ipAddress).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
|
||||
var txt: seq[string]
|
||||
var ip4: seq[TransportAddress]
|
||||
for _ in 0 .. retries:
|
||||
txt = await dnsResolver.resolveTxt(acmeChalDomain)
|
||||
try:
|
||||
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to resolve IP", description = exc.msg # retry
|
||||
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
|
||||
return true
|
||||
await sleepAsync(DefaultDnsRetryTime)
|
||||
|
||||
return false
|
||||
@@ -15,7 +15,7 @@ runnableExamples:
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, tables, chronos, chronicles, sequtils
|
||||
import options, tables, chronos, chronicles, sequtils, uri
|
||||
import
|
||||
switch,
|
||||
peerid,
|
||||
@@ -23,25 +23,29 @@ import
|
||||
stream/connection,
|
||||
multiaddress,
|
||||
crypto/crypto,
|
||||
transports/[transport, tcptransport, memorytransport],
|
||||
transports/[transport, tcptransport, wstransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager,
|
||||
upgrademngrs/muxedupgrade,
|
||||
observedaddrmanager,
|
||||
autotls/service,
|
||||
nameresolving/nameresolver,
|
||||
errors,
|
||||
utility
|
||||
import services/wildcardresolverservice
|
||||
|
||||
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
|
||||
TLSCertificate, TLSFlags, ServerFlags
|
||||
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.public.} = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise
|
||||
@@ -63,6 +67,7 @@ type
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
autotls: AutotlsService
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
@@ -154,7 +159,9 @@ proc withTransport*(
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
@@ -165,22 +172,34 @@ proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
|
||||
proc withWsTransport*(
|
||||
b: SwitchBuilder,
|
||||
tlsPrivateKey: TLSPrivateKey = nil,
|
||||
tlsCertificate: TLSCertificate = nil,
|
||||
tlsFlags: set[TLSFlags] = {},
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import transports/quictransport
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
MemoryTransport.new(upgr)
|
||||
)
|
||||
|
||||
@@ -238,6 +257,13 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonat = true
|
||||
b
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc withAutotls*(
|
||||
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
|
||||
): SwitchBuilder {.public.} =
|
||||
b.autotls = AutotlsService.new(config = config)
|
||||
b
|
||||
|
||||
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
||||
b.circuitRelay = r
|
||||
b
|
||||
@@ -289,10 +315,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
|
||||
if not b.autotls.isNil():
|
||||
b.services.insert(b.autotls, 0)
|
||||
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade, seckey))
|
||||
transports.add(tProvider(muxedUpgrade, seckey, b.autotls))
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
import stew/base58
|
||||
import ./utils/sequninit
|
||||
|
||||
export results
|
||||
|
||||
@@ -123,7 +124,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
return err(CidError.Incorrect)
|
||||
if len(data) == 46:
|
||||
if data[0] == 'Q' and data[1] == 'm':
|
||||
buffer = newSeq[byte](BTCBase58.decodedLength(len(data)))
|
||||
buffer = newSeqUninit[byte](BTCBase58.decodedLength(len(data)))
|
||||
if BTCBase58.decode(data, buffer, plen) != Base58Status.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
@@ -131,7 +132,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
let length = MultiBase.decodedLength(data[0], len(data))
|
||||
if length == -1:
|
||||
return err(CidError.Incorrect)
|
||||
buffer = newSeq[byte](length)
|
||||
buffer = newSeqUninit[byte](length)
|
||||
if MultiBase.decode(data, buffer, plen) != MultiBaseStatus.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
|
||||
@@ -140,7 +140,7 @@ proc triggerConnEvent*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
warn "Exception in triggerConnEvent",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(
|
||||
@@ -186,7 +186,7 @@ proc expectConnection*(
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(
|
||||
AlreadyExpectingConnectionError,
|
||||
"Already expecting an incoming connection from that peer",
|
||||
"Already expecting an incoming connection from that peer: " & shortLog(p),
|
||||
)
|
||||
|
||||
let future = Future[Muxer].Raising([CancelledError]).init()
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
from strutils import split, strip, cmpIgnoreCase
|
||||
import ../utils/sequninit
|
||||
|
||||
const libp2p_pki_schemes* {.strdefine.} = "rsa,ed25519,secp256k1,ecnist"
|
||||
|
||||
@@ -176,7 +177,7 @@ proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
|
||||
if x.len == 0:
|
||||
return
|
||||
|
||||
var randValues = newSeqUninitialized[byte](len(x) * 2)
|
||||
var randValues = newSeqUninit[byte](len(x) * 2)
|
||||
hmacDrbgGenerate(rng[], randValues)
|
||||
|
||||
for i in countdown(x.high, 1):
|
||||
@@ -873,7 +874,7 @@ proc stretchKeys*(
|
||||
var seed = "key expansion"
|
||||
result.macsize = 20
|
||||
let length = result.ivsize + result.keysize + result.macsize
|
||||
result.data = newSeq[byte](2 * length)
|
||||
result.data = newSeqUninit[byte](2 * length)
|
||||
|
||||
if hashType == "SHA256":
|
||||
makeSecret(result.data, HMAC[sha256], sharedSecret, seed)
|
||||
@@ -904,7 +905,7 @@ template macOpenArray*(secret: Secret, id: int): untyped =
|
||||
|
||||
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
## Get array of bytes with with initial vector.
|
||||
result = newSeq[byte](secret.ivsize)
|
||||
result = newSeqUninit[byte](secret.ivsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
@@ -913,7 +914,7 @@ proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
|
||||
|
||||
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.keysize)
|
||||
result = newSeqUninit[byte](secret.keysize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
@@ -923,7 +924,7 @@ proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
|
||||
|
||||
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.macsize)
|
||||
result = newSeqUninit[byte](secret.macsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
|
||||
@@ -23,6 +23,7 @@ import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/ctops
|
||||
import results
|
||||
import ../utils/sequninit
|
||||
|
||||
import ../utility
|
||||
|
||||
@@ -458,7 +459,7 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?seckey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?seckey.toBytes(res)
|
||||
@@ -471,7 +472,7 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?pubkey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?pubkey.toBytes(res)
|
||||
@@ -483,7 +484,7 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to ASN.1 DER binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?sig.toBytes(res)
|
||||
@@ -494,7 +495,7 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?seckey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?seckey.toRawBytes(res)
|
||||
@@ -507,7 +508,7 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?pubkey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?pubkey.toRawBytes(res)
|
||||
@@ -519,7 +520,7 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?sig.toBytes(res)
|
||||
@@ -929,7 +930,7 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
|
||||
var data: array[Secret521Length, byte]
|
||||
let res = toSecret(pubkey, seckey, data)
|
||||
if res > 0:
|
||||
result = newSeq[byte](res)
|
||||
result = newSeqUninit[byte](res)
|
||||
copyMem(addr result[0], addr data[0], res)
|
||||
|
||||
proc sign*[T: byte | char](
|
||||
@@ -943,7 +944,7 @@ proc sign*[T: byte | char](
|
||||
var impl = ecGetDefault()
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var sig = new EcSignature
|
||||
sig.buffer = newSeq[byte](256)
|
||||
sig.buffer = newSeqUninit[byte](256)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
|
||||
@@ -17,6 +17,7 @@ export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import ../utils/sequninit
|
||||
|
||||
type
|
||||
Asn1Error* {.pure.} = enum
|
||||
@@ -679,15 +680,15 @@ proc init*(t: typedesc[Asn1Buffer], data: string): Asn1Buffer =
|
||||
|
||||
proc init*(t: typedesc[Asn1Buffer]): Asn1Buffer =
|
||||
## Initialize empty ``Asn1Buffer``.
|
||||
Asn1Buffer(buffer: newSeq[byte]())
|
||||
Asn1Buffer(buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], tag: Asn1Tag): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag ``tag``.
|
||||
Asn1Composite(tag: tag, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: tag, buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], idx: int): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag context-specific id ``id``.
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc `$`*(buffer: Asn1Buffer): string =
|
||||
## Return string representation of ``buffer``.
|
||||
|
||||
@@ -21,6 +21,7 @@ import results
|
||||
import stew/ctops
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utils/sequninit
|
||||
|
||||
export Asn1Error, results
|
||||
|
||||
@@ -124,7 +125,7 @@ proc random*[T: RsaKP](
|
||||
length = eko + ((bits + 7) shr 3)
|
||||
|
||||
let res = new T
|
||||
res.buffer = newSeq[byte](length)
|
||||
res.buffer = newSeqUninit[byte](length)
|
||||
|
||||
var keygen = rsaKeygenGetDefault()
|
||||
|
||||
@@ -169,7 +170,7 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
|
||||
key.pubk.elen.uint + key.pexplen.uint
|
||||
result = new RsaPrivateKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
let po: uint = 0
|
||||
let qo = po + key.seck.plen
|
||||
let dpo = qo + key.seck.qlen
|
||||
@@ -207,7 +208,7 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
if len(key.buffer) > 0:
|
||||
let length = key.key.nlen + key.key.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
let no = 0
|
||||
let eo = no + key.key.nlen
|
||||
copyMem(addr result.buffer[no], key.key.n, key.key.nlen)
|
||||
@@ -226,7 +227,7 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
|
||||
doAssert(not isNil(key))
|
||||
let length = key.pubk.nlen + key.pubk.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
result.key.n = addr result.buffer[0]
|
||||
result.key.e = addr result.buffer[key.pubk.nlen]
|
||||
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
|
||||
@@ -357,7 +358,7 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -370,7 +371,7 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -382,7 +383,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
|
||||
## Serialize RSA signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(RsaSignatureError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?sig.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -753,7 +754,7 @@ proc sign*[T: byte | char](
|
||||
var hash: array[32, byte]
|
||||
let impl = rsaPkcs1SignGetDefault()
|
||||
var res = new RsaSignature
|
||||
res.buffer = newSeq[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
res.buffer = newSeqUninit[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
import bearssl/rand
|
||||
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
|
||||
import ../utils/sequninit
|
||||
|
||||
export sha2, results, rand
|
||||
|
||||
@@ -85,8 +86,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = hexToSeqByte(data)
|
||||
except ValueError:
|
||||
return err("secp: Hex to bytes failed")
|
||||
except ValueError as e:
|
||||
let errMsg = "secp: Hex to bytes failed: " & e.msg
|
||||
return err(errMsg.cstring)
|
||||
init(sig, buffer)
|
||||
|
||||
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||
@@ -181,7 +183,7 @@ proc getBytes*(key: SkPublicKey): seq[byte] {.inline.} =
|
||||
|
||||
proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
|
||||
## Serialize Secp256k1 `signature` and return it.
|
||||
result = newSeq[byte](72)
|
||||
result = newSeqUninit[byte](72)
|
||||
let length = toBytes(sig, result)
|
||||
result.setLen(length)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import pkg/[chronos, chronicles]
|
||||
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
|
||||
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
|
||||
import ../crypto/crypto, ../utility
|
||||
import ../utils/sequninit
|
||||
|
||||
export peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
|
||||
|
||||
@@ -496,7 +497,7 @@ proc recvMessage(
|
||||
size: uint
|
||||
length: int
|
||||
res: VarintResult[void]
|
||||
var buffer = newSeq[byte](10)
|
||||
var buffer = newSeqUninit[byte](10)
|
||||
try:
|
||||
for i in 0 ..< len(buffer):
|
||||
await conn.readExactly(addr buffer[i], 1)
|
||||
@@ -595,13 +596,13 @@ template exceptionToAssert(body: untyped): untyped =
|
||||
try:
|
||||
res = body
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Defect as exc:
|
||||
raise exc
|
||||
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
res
|
||||
@@ -957,8 +958,7 @@ proc openStream*(
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.STREAMINFO.int, res).isOk():
|
||||
let resPb = initProtoBuffer(res)
|
||||
# stream.peer = newSeq[byte]()
|
||||
var raddress = newSeq[byte]()
|
||||
var raddress = newSeqUninit[byte](0)
|
||||
stream.protocol = ""
|
||||
resPb.getRequiredField(1, stream.peer).tryGet()
|
||||
resPb.getRequiredField(2, raddress).tryGet()
|
||||
@@ -967,9 +967,9 @@ proc openStream*(
|
||||
stream.flags.incl(Outbound)
|
||||
stream.transp = transp
|
||||
result = stream
|
||||
except ResultError[ProtoError]:
|
||||
except ResultError[ProtoError] as e:
|
||||
await api.closeConnection(transp)
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
|
||||
|
||||
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
# must not specify raised exceptions as this is StreamCallback from chronos
|
||||
@@ -977,7 +977,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
var message = await transp.recvMessage()
|
||||
var pb = initProtoBuffer(message)
|
||||
var stream = new P2PStream
|
||||
var raddress = newSeq[byte]()
|
||||
var raddress = newSeqUninit[byte](0)
|
||||
stream.protocol = ""
|
||||
pb.getRequiredField(1, stream.peer).tryGet()
|
||||
pb.getRequiredField(2, raddress).tryGet()
|
||||
@@ -1023,10 +1023,10 @@ proc addHandler*(
|
||||
api.servers.add(P2PServer(server: server, address: maddress))
|
||||
except DaemonLocalError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
|
||||
except TransportError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
|
||||
except CancelledError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
@@ -1116,7 +1116,7 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError
|
||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
||||
|
||||
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte] {.raises: [DaemonLocalError].} =
|
||||
result = newSeq[byte]()
|
||||
result = newSeqUninit[byte](0)
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
@@ -1453,8 +1453,8 @@ proc pubsubPublish*(
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
|
||||
result.data = newSeq[byte]()
|
||||
result.seqno = newSeq[byte]()
|
||||
result.data = newSeqUninit[byte](0)
|
||||
result.seqno = newSeqUninit[byte](0)
|
||||
discard pb.getField(1, result.peer)
|
||||
discard pb.getField(2, result.data)
|
||||
discard pb.getField(3, result.seqno)
|
||||
@@ -1503,10 +1503,14 @@ proc pubsubSubscribe*(
|
||||
result = ticket
|
||||
except DaemonLocalError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except TransportError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
|
||||
@@ -127,8 +127,8 @@ proc expandDnsAddr(
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
except ResultError[string]:
|
||||
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
|
||||
except ResultError[string] as e:
|
||||
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
@@ -178,7 +178,7 @@ proc internalConnect(
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(DialFailedError, "can't dial self!")
|
||||
raise newException(DialFailedError, "internalConnect can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||
@@ -186,8 +186,8 @@ proc internalConnect(
|
||||
defer:
|
||||
try:
|
||||
lock.release()
|
||||
except AsyncLockError:
|
||||
raiseAssert "lock must have been acquired in line above"
|
||||
except AsyncLockError as e:
|
||||
raiseAssert "lock must have been acquired in line above: " & e.msg
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
@@ -198,7 +198,9 @@ proc internalConnect(
|
||||
try:
|
||||
self.connManager.getOutgoingSlot(forceDial)
|
||||
except TooManyConnectionsError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
let muxed =
|
||||
try:
|
||||
@@ -208,11 +210,15 @@ proc internalConnect(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
raise newException(
|
||||
DialFailedError, "Unable to establish outgoing link in internalConnect"
|
||||
)
|
||||
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
@@ -228,7 +234,11 @@ proc internalConnect(
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoing upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -260,7 +270,7 @@ method connect*(
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled!"
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
|
||||
)
|
||||
|
||||
return
|
||||
@@ -273,7 +283,7 @@ proc negotiateStream(
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
@@ -289,13 +299,13 @@ method tryDial*(
|
||||
try:
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
raise newException(DialFailedError, "No valid multiaddress in tryDial")
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
|
||||
|
||||
method dial*(
|
||||
self: Dialer, peerId: PeerId, protos: seq[string]
|
||||
@@ -309,14 +319,17 @@ method dial*(
|
||||
try:
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
|
||||
)
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled"
|
||||
trace "Dial canceled", description = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error dialing", description = exc.msg
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
@@ -347,17 +360,20 @@ method dial*(
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
|
||||
)
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
trace "Dial canceled", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
|
||||
|
||||
method addTransport*(self: Dialer, t: Transport) =
|
||||
self.transports &= t
|
||||
|
||||
@@ -113,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||
try:
|
||||
query.peers.putNoWait(pa)
|
||||
except AsyncQueueFullError as exc:
|
||||
debug "Cannot push discovered peer to queue"
|
||||
debug "Cannot push discovered peer to queue", description = exc.msg
|
||||
|
||||
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||
|
||||
@@ -27,6 +27,7 @@ import
|
||||
utility
|
||||
import stew/[base58, base32, endians2]
|
||||
export results, vbuffer, errors, utility
|
||||
import ./utils/sequninit
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multiaddress"
|
||||
@@ -223,7 +224,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
|
||||
|
||||
proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
## P2P address bufferToString() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninit[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -232,7 +233,7 @@ proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
|
||||
proc p2pVB(vb: var VBuffer): bool =
|
||||
## P2P address validateBuffer() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninit[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -555,7 +556,7 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
##
|
||||
## If current MultiAddress do not have argument value, then result array will
|
||||
## be empty.
|
||||
var buffer = newSeq[byte](len(ma.data.buffer))
|
||||
var buffer = newSeqUninit[byte](len(ma.data.buffer))
|
||||
let res = ?protoArgument(ma, buffer)
|
||||
buffer.setLen(res)
|
||||
ok(buffer)
|
||||
@@ -569,7 +570,7 @@ proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
|
||||
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninit[byte](0)
|
||||
var offset = 0
|
||||
var vb = ma
|
||||
var res: MultiAddress
|
||||
@@ -643,7 +644,7 @@ proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
|
||||
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||
## Iterates over all addresses inside of MultiAddress ``ma``.
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninit[byte](0)
|
||||
var vb = ma
|
||||
while true:
|
||||
if vb.data.isEmpty():
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
import tables
|
||||
import results
|
||||
import stew/[base32, base58, base64]
|
||||
import ./utils/sequninit
|
||||
|
||||
type
|
||||
MultiBaseStatus* {.pure.} = enum
|
||||
@@ -533,7 +534,7 @@ proc decode*(
|
||||
let empty: seq[byte] = @[]
|
||||
ok(empty) # empty
|
||||
else:
|
||||
var buffer = newSeq[byte](mb.decl(length - 1))
|
||||
var buffer = newSeqUninit[byte](mb.decl(length - 1))
|
||||
var outlen = 0
|
||||
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
|
||||
if res != MultiBaseStatus.Success:
|
||||
|
||||
@@ -567,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
|
||||
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
|
||||
## Create MultiHash from BASE58 encoded string representation ``data``.
|
||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
|
||||
|
||||
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
if len(a) != len(b):
|
||||
|
||||
@@ -11,8 +11,7 @@
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import
|
||||
./coder, ../muxer, ../../stream/[bufferstream, connection, streamseq], ../../peerinfo
|
||||
import ./coder, ../muxer, ../../stream/[bufferstream, connection], ../../peerinfo
|
||||
|
||||
export connection
|
||||
|
||||
@@ -87,7 +86,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
import ../muxer, ../../stream/connection
|
||||
import ../../utils/[zeroqueue, sequninit]
|
||||
|
||||
export muxer
|
||||
|
||||
@@ -151,7 +152,7 @@ type
|
||||
opened: bool
|
||||
isSending: bool
|
||||
sendQueue: seq[ToSend]
|
||||
recvQueue: seq[byte]
|
||||
recvQueue: ZeroQueue
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: AsyncEvent
|
||||
@@ -229,7 +230,6 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal and not channel.isSending:
|
||||
@@ -257,7 +257,7 @@ proc updateRecvWindow(
|
||||
return
|
||||
|
||||
let delta = channel.maxRecvWindow - inWindow
|
||||
channel.recvWindow.inc(delta)
|
||||
channel.recvWindow.inc(delta.int)
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
@@ -279,7 +279,7 @@ method readOnce*(
|
||||
newLPStreamConnDownError()
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
if channel.recvQueue.isEmpty():
|
||||
channel.receivedData.clear()
|
||||
let
|
||||
closedRemotelyFut = channel.closedRemotely.wait()
|
||||
@@ -290,28 +290,23 @@ method readOnce*(
|
||||
if not receivedDataFut.finished():
|
||||
await receivedDataFut.cancelAndWait()
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.len == 0:
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
|
||||
channel.isEof = true
|
||||
return
|
||||
0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0 ..< toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead ..^ 1]
|
||||
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
await channel.updateRecvWindow()
|
||||
channel.activity = true
|
||||
return toRead
|
||||
return consumed
|
||||
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel, b: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.recvQueue.push(b)
|
||||
channel.receivedData.fire()
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
|
||||
@@ -344,7 +339,7 @@ proc trySend(
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
sendBuffer = newSeqUninit[byte](toSend + 12)
|
||||
header = YamuxHeader.data(channel.id, toSend.uint32)
|
||||
inBuffer = 0
|
||||
|
||||
@@ -512,7 +507,15 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(isLocal = true)
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
channel.closedLocally = true
|
||||
channel.isReset = true
|
||||
channel.opened = false
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CancelledError as exc:
|
||||
@@ -578,7 +581,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
raise
|
||||
newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
var buffer = newSeqUninit[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
do:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
@@ -587,10 +590,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
except KeyError as e:
|
||||
raise newException(
|
||||
YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
|
||||
e.msg,
|
||||
e,
|
||||
)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
@@ -602,7 +607,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow")
|
||||
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
var buffer = newSeqUninit[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", description = shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
@@ -15,7 +15,8 @@ import
|
||||
chronicles,
|
||||
stew/byteutils,
|
||||
dnsclientpkg/[protocol, types],
|
||||
../utility
|
||||
../utility,
|
||||
../utils/sequninit
|
||||
|
||||
import nameresolver
|
||||
|
||||
@@ -37,18 +38,18 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
||||
let dataLen = requestStream.getPosition()
|
||||
requestStream.setPosition(0)
|
||||
|
||||
var buf = newSeq[byte](dataLen)
|
||||
var buf = newSeqUninit[byte](dataLen)
|
||||
discard requestStream.readData(addr buf[0], dataLen)
|
||||
buf
|
||||
except IOError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninit[byte](0)
|
||||
except OSError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninit[byte](0)
|
||||
except ValueError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninit[byte](0)
|
||||
|
||||
proc getDnsResponse(
|
||||
dnsServer: TransportAddress, address: string, kind: QKind
|
||||
@@ -78,23 +79,23 @@ proc getDnsResponse(
|
||||
|
||||
try:
|
||||
await receivedDataFuture.wait(5.seconds) #unix default
|
||||
except AsyncTimeoutError:
|
||||
raise newException(IOError, "DNS server timeout")
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(IOError, "DNS server timeout: " & e.msg, e)
|
||||
|
||||
let rawResponse = sock.getMessage()
|
||||
try:
|
||||
parseResponse(string.fromBytes(rawResponse))
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except ValueError as exc:
|
||||
raise exc
|
||||
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception parsing DN response: " & exc.msg
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ import
|
||||
./multicodec,
|
||||
./multihash,
|
||||
./vbuffer,
|
||||
./protobuf/minprotobuf
|
||||
./protobuf/minprotobuf,
|
||||
./utils/sequninit
|
||||
|
||||
export results, utility
|
||||
|
||||
@@ -142,7 +143,7 @@ func init*(pid: var PeerId, data: string): bool =
|
||||
## Initialize peer id from base58 encoded string representation.
|
||||
##
|
||||
## Returns ``true`` if peer was successfully initialiazed.
|
||||
var p = newSeq[byte](len(data) + 4)
|
||||
var p = newSeqUninit[byte](len(data) + 4)
|
||||
var length = 0
|
||||
if Base58.decode(data, p, length) == Base58Status.Success:
|
||||
p.setLen(length)
|
||||
|
||||
345
libp2p/peeridauth/client.nim
Normal file
345
libp2p/peeridauth/client.nim
Normal file
@@ -0,0 +1,345 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import base64, json, strutils, uri, times, stew/byteutils
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles
|
||||
import ../peerinfo, ../crypto/crypto, ../varint.nim
|
||||
|
||||
logScope:
|
||||
topics = "libp2p peeridauth"
|
||||
|
||||
const
|
||||
NimLibp2pUserAgent = "nim-libp2p"
|
||||
PeerIDAuthPrefix* = "libp2p-PeerID"
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
export Domain
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
type PeerIDAuthError* = object of LPError
|
||||
|
||||
type PeerIDAuthResponse* = object
|
||||
status*: int
|
||||
headers*: HttpTable
|
||||
body*: seq[byte]
|
||||
|
||||
type BearerToken* = object
|
||||
token*: string
|
||||
expires*: Opt[DateTime]
|
||||
|
||||
type PeerIDAuthOpaque* = string
|
||||
type PeerIDAuthSignature* = string
|
||||
type PeerIDAuthChallenge* = string
|
||||
|
||||
type PeerIDAuthAuthenticationResponse* = object
|
||||
challengeClient*: PeerIDAuthChallenge
|
||||
opaque*: PeerIDAuthOpaque
|
||||
serverPubkey*: PublicKey
|
||||
|
||||
type PeerIDAuthAuthorizationResponse* = object
|
||||
sig*: PeerIDAuthSignature
|
||||
bearer*: BearerToken
|
||||
response*: PeerIDAuthResponse
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext = newRng()
|
||||
): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
ctx: var HmacDrbgContext, choices: string
|
||||
): char {.raises: [ValueError].} =
|
||||
## Samples a random character from the input string using the DRBG context
|
||||
if choices.len == 0:
|
||||
raise newException(ValueError, "Cannot sample from an empty string")
|
||||
var idx: uint32
|
||||
ctx.generate(idx)
|
||||
return choices[uint32(idx mod uint32(choices.len))]
|
||||
|
||||
proc randomChallenge(
|
||||
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
|
||||
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
|
||||
var rng = rng[]
|
||||
var challenge = ""
|
||||
try:
|
||||
for _ in 0 ..< challengeLen:
|
||||
challenge.add(rng.sampleChar(ChallengeCharset))
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
|
||||
PeerIDAuthChallenge(challenge)
|
||||
|
||||
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
|
||||
# Helper to extract quoted value from key
|
||||
for segment in data.split(","):
|
||||
if key in segment:
|
||||
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
|
||||
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
|
||||
|
||||
proc genDataToSign(
|
||||
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
|
||||
): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
var buf: seq[byte] = prefix.toBytes()
|
||||
for p in parts:
|
||||
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
|
||||
raise newException(PeerIDAuthError, "could not encode fields length to varint")
|
||||
buf.add varintLen
|
||||
buf.add (p.k & "=").toBytes()
|
||||
buf.add p.v
|
||||
return buf
|
||||
|
||||
proc getSigParams(
|
||||
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
|
||||
): seq[SigParam] =
|
||||
if clientSender:
|
||||
@[
|
||||
SigParam(k: "challenge-client", v: challenge.toBytes()),
|
||||
SigParam(k: "hostname", v: hostname.toBytes()),
|
||||
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
|
||||
]
|
||||
else:
|
||||
@[
|
||||
SigParam(k: "challenge-server", v: challenge.toBytes()),
|
||||
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
|
||||
SigParam(k: "hostname", v: hostname.toBytes()),
|
||||
]
|
||||
|
||||
proc sign(
|
||||
privateKey: PrivateKey,
|
||||
challenge: PeerIDAuthChallenge,
|
||||
publicKey: PublicKey,
|
||||
hostname: string,
|
||||
clientSender: bool = true,
|
||||
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
|
||||
PeerIDAuthSignature(
|
||||
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
|
||||
)
|
||||
|
||||
proc checkSignature*(
|
||||
serverSig: PeerIDAuthSignature,
|
||||
serverPublicKey: PublicKey,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
clientPublicKey: PublicKey,
|
||||
hostname: string,
|
||||
): bool {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
|
||||
var serverSignature: Signature
|
||||
try:
|
||||
if not serverSignature.init(base64.decode(serverSig).toBytes()):
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
|
||||
|
||||
serverSignature.verify(
|
||||
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
$uri,
|
||||
body = payload,
|
||||
headers = [
|
||||
("Content-Type", "application/json"),
|
||||
("User-Agent", NimLibp2pUserAgent),
|
||||
("Authorization", authHeader),
|
||||
],
|
||||
)
|
||||
.get()
|
||||
.send()
|
||||
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.
|
||||
async: (raises: [PeerIDAuthError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
if self.session.isNil():
|
||||
raise newException(PeerIDAuthError, "Session is nil")
|
||||
let req = HttpClientRequestRef.get(self.session, $uri).valueOr:
|
||||
raise newException(PeerIDAuthError, "Could not get request obj")
|
||||
let rawResponse = await req.send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
proc requestAuthentication*(
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthAuthenticationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let response =
|
||||
try:
|
||||
await self.get(uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
|
||||
if wwwAuthenticate == "":
|
||||
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
|
||||
|
||||
let serverPubkey: PublicKey =
|
||||
try:
|
||||
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toBytes()).valueOr:
|
||||
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
|
||||
|
||||
PeerIDAuthAuthenticationResponse(
|
||||
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
|
||||
opaque: extractField(wwwAuthenticate, "opaque"),
|
||||
serverPubkey: serverPubkey,
|
||||
)
|
||||
|
||||
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
try:
|
||||
pubkey.getBytes().valueOr:
|
||||
raise
|
||||
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
|
||||
except ValueError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
|
||||
)
|
||||
|
||||
proc parse3339DateTime(
|
||||
timeStr: string
|
||||
): DateTime {.raises: [ValueError, TimeParseError].} =
|
||||
let parts = timeStr.split('.')
|
||||
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
|
||||
let millis = parseInt(parts[1].strip(chars = {'Z'}))
|
||||
result = base + initDuration(milliseconds = millis)
|
||||
|
||||
proc requestAuthorization*(
|
||||
self: PeerIDAuthClient,
|
||||
peerInfo: PeerInfo,
|
||||
uri: Uri,
|
||||
challengeClient: PeerIDAuthChallenge,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
serverPubkey: PublicKey,
|
||||
opaque: PeerIDAuthOpaque,
|
||||
payload: auto,
|
||||
): Future[PeerIDAuthAuthorizationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
|
||||
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
|
||||
let authHeader =
|
||||
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
)
|
||||
|
||||
let authenticationInfo = response.headers.getString("authentication-info")
|
||||
|
||||
let bearerExpires =
|
||||
try:
|
||||
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
|
||||
except ValueError, PeerIDAuthError, TimeParseError:
|
||||
Opt.none(DateTime)
|
||||
|
||||
PeerIDAuthAuthorizationResponse(
|
||||
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
|
||||
bearer: BearerToken(
|
||||
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
|
||||
),
|
||||
response: response,
|
||||
)
|
||||
|
||||
proc sendWithoutBearer(
|
||||
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
# Authenticate in three ways as per the PeerID Auth spec
|
||||
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
|
||||
|
||||
let authenticationResponse = await self.requestAuthentication(uri)
|
||||
|
||||
let challengeServer = self.rng.randomChallenge()
|
||||
let authorizationResponse = await self.requestAuthorization(
|
||||
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
|
||||
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
|
||||
)
|
||||
|
||||
if not checkSignature(
|
||||
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
|
||||
peerInfo.publicKey, uri.hostname,
|
||||
):
|
||||
raise newException(PeerIDAuthError, "Failed to validate server's signature")
|
||||
|
||||
return (authorizationResponse.bearer, authorizationResponse.response)
|
||||
|
||||
proc sendWithBearer(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken,
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome() and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
)
|
||||
return (bearer, response)
|
||||
|
||||
proc send*(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: Opt[BearerToken] = Opt.none(BearerToken),
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.isSome():
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer.get)
|
||||
else:
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
40
libp2p/peeridauth/mockclient.nim
Normal file
40
libp2p/peeridauth/mockclient.nim
Normal file
@@ -0,0 +1,40 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto, ./client
|
||||
|
||||
export client
|
||||
|
||||
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
|
||||
mockedStatus*: int
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
|
||||
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
@@ -53,11 +53,7 @@ chronicles.formatIt(PeerInfo):
|
||||
shortLog(it)
|
||||
|
||||
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
|
||||
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
|
||||
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
|
||||
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.
|
||||
if p.addrs.len == 0 or p.addressMappers.len == 0:
|
||||
p.addrs = p.listenAddrs
|
||||
p.addrs = p.listenAddrs
|
||||
for mapper in p.addressMappers:
|
||||
p.addrs = await mapper(p.addrs)
|
||||
|
||||
@@ -101,8 +97,10 @@ proc new*(
|
||||
let pubkey =
|
||||
try:
|
||||
key.getPublicKey().tryGet()
|
||||
except CatchableError:
|
||||
raise newException(PeerInfoError, "invalid private key")
|
||||
except CatchableError as e:
|
||||
raise newException(
|
||||
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
|
||||
)
|
||||
|
||||
let peerId = PeerId.init(key).tryGet()
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/endians2, results
|
||||
import ../utils/sequninit
|
||||
export results, utility
|
||||
|
||||
{.push public.}
|
||||
@@ -142,18 +143,17 @@ proc initProtoBuffer*(
|
||||
result.options = options
|
||||
|
||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
||||
result.buffer = newSeq[byte]()
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``
|
||||
result.options = options
|
||||
if WithVarintLength in options:
|
||||
# Our buffer will start from position 10, so we can store length of buffer
|
||||
# in [0, 9].
|
||||
result.buffer.setLen(10)
|
||||
result.buffer = newSeqUninit[byte](10)
|
||||
result.offset = 10
|
||||
elif {WithUint32LeLength, WithUint32BeLength} * options != {}:
|
||||
# Our buffer will start from position 4, so we can store length of buffer
|
||||
# in [0, 3].
|
||||
result.buffer.setLen(4)
|
||||
result.buffer = newSeqUninit[byte](4)
|
||||
result.offset = 4
|
||||
|
||||
proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
|
||||
|
||||
@@ -87,7 +87,7 @@ method dialMe*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(AutonatError, "read Dial response failed", e)
|
||||
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
|
||||
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
|
||||
|
||||
|
||||
@@ -107,7 +107,9 @@ proc startSync*(
|
||||
description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
|
||||
err.msg,
|
||||
err,
|
||||
)
|
||||
finally:
|
||||
if stream != nil:
|
||||
|
||||
@@ -148,7 +148,7 @@ proc dialPeerV1*(
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
trace "error writing hop request", description = exc.msg
|
||||
raise newException(RelayV1DialError, "error writing hop request", exc)
|
||||
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
|
||||
|
||||
let msgRcvFromRelayOpt =
|
||||
try:
|
||||
@@ -158,7 +158,8 @@ proc dialPeerV1*(
|
||||
except LPStreamError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, "error reading stop response", exc)
|
||||
raise
|
||||
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
|
||||
|
||||
try:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
@@ -173,10 +174,16 @@ proc dialPeerV1*(
|
||||
)
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
raise newException(
|
||||
RelayV1DialError,
|
||||
"Hop can't open destination stream after sendStatus: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
raise newException(
|
||||
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
|
||||
)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -199,7 +206,8 @@ proc dialPeerV2*(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
raise newException(RelayV2DialError, exc.msg)
|
||||
raise
|
||||
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
|
||||
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
|
||||
@@ -76,7 +76,7 @@ proc dial*(
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayDialError, "Destination doesn't exist")
|
||||
except RelayDialError as e:
|
||||
raise e
|
||||
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
|
||||
except CatchableError:
|
||||
raise newException(RelayDialError, "dial address not valid")
|
||||
|
||||
@@ -100,13 +100,13 @@ proc dial*(
|
||||
raise e
|
||||
except DialFailedError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayDialError, "dial relay peer failed", e)
|
||||
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
|
||||
except RelayV1DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
|
||||
except RelayV2DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
|
||||
|
||||
method dial*(
|
||||
self: RelayTransport,
|
||||
@@ -121,7 +121,8 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise
|
||||
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
|
||||
@@ -69,8 +69,8 @@ proc bridge*(
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
|
||||
3
libp2p/protocols/kademlia.nim
Normal file
3
libp2p/protocols/kademlia.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
import ./kademlia/kademlia
|
||||
|
||||
export kademlia
|
||||
6
libp2p/protocols/kademlia/consts.nim
Normal file
6
libp2p/protocols/kademlia/consts.nim
Normal file
@@ -0,0 +1,6 @@
|
||||
const
|
||||
IdLength* = 32 # 256-bit IDs
|
||||
k* = 20 # replication parameter
|
||||
maxBuckets* = 256
|
||||
|
||||
const KadCodec* = "/ipfs/kad/1.0.0"
|
||||
74
libp2p/protocols/kademlia/kademlia.nim
Normal file
74
libp2p/protocols/kademlia/kademlia.nim
Normal file
@@ -0,0 +1,74 @@
|
||||
import chronos
|
||||
import chronicles
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import ./routingtable
|
||||
import ../protocol
|
||||
import ../../switch
|
||||
import ./protobuf
|
||||
import ../../utils/heartbeat
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht"
|
||||
|
||||
type KadDHT* = ref object of LPProtocol
|
||||
switch: Switch
|
||||
rng: ref HmacDrbgContext
|
||||
rtable*: RoutingTable
|
||||
maintenanceLoop: Future[void]
|
||||
|
||||
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "refresh buckets", 10.minutes:
|
||||
debug "TODO: implement bucket maintenance"
|
||||
|
||||
proc new*(
|
||||
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
|
||||
): T {.raises: [].} =
|
||||
var rtable = RoutingTable.init(switch.peerInfo.peerId)
|
||||
let kad = T(rng: rng, switch: switch, rtable: rtable)
|
||||
|
||||
kad.codec = KadCodec
|
||||
kad.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
while not conn.atEof:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msg = Message.decode(buf).tryGet()
|
||||
|
||||
# TODO: handle msg.msgType
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
error "could not handle request",
|
||||
peerId = conn.PeerId, err = getCurrentExceptionMsg()
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return kad
|
||||
|
||||
method start*(
|
||||
kad: KadDHT
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if kad.started:
|
||||
warn "Starting kad-dht twice"
|
||||
return fut
|
||||
|
||||
kad.maintenanceLoop = kad.maintainBuckets()
|
||||
kad.started = true
|
||||
|
||||
info "kad-dht started"
|
||||
|
||||
fut
|
||||
|
||||
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
|
||||
if not kad.started:
|
||||
return
|
||||
|
||||
kad.started = false
|
||||
kad.maintenanceLoop.cancelSoon()
|
||||
kad.maintenanceLoop = nil
|
||||
return
|
||||
48
libp2p/protocols/kademlia/keys.nim
Normal file
48
libp2p/protocols/kademlia/keys.nim
Normal file
@@ -0,0 +1,48 @@
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import chronicles
|
||||
import stew/byteutils
|
||||
|
||||
type
|
||||
KeyType* {.pure.} = enum
|
||||
Unhashed
|
||||
Raw
|
||||
PeerId
|
||||
|
||||
Key* = object
|
||||
case kind*: KeyType
|
||||
of KeyType.PeerId:
|
||||
peerId*: PeerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
data*: array[IdLength, byte]
|
||||
|
||||
proc toKey*(s: seq[byte]): Key =
|
||||
doAssert s.len == IdLength
|
||||
var data: array[IdLength, byte]
|
||||
for i in 0 ..< IdLength:
|
||||
data[i] = s[i]
|
||||
return Key(kind: KeyType.Raw, data: data)
|
||||
|
||||
proc toKey*(p: PeerId): Key =
|
||||
return Key(kind: KeyType.PeerId, peerId: p)
|
||||
|
||||
proc getBytes*(k: Key): seq[byte] =
|
||||
return
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
k.peerId.getBytes()
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
@(k.data)
|
||||
|
||||
template `==`*(a, b: Key): bool =
|
||||
a.getBytes() == b.getBytes() and a.kind == b.kind
|
||||
|
||||
proc shortLog*(k: Key): string =
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
"PeerId:" & $k.peerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
$k.kind & ":" & toHex(k.data)
|
||||
|
||||
chronicles.formatIt(Key):
|
||||
shortLog(it)
|
||||
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
@@ -0,0 +1,159 @@
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../varint
|
||||
import ../../utility
|
||||
import results
|
||||
import ../../multiaddress
|
||||
import stew/objects
|
||||
import stew/assign2
|
||||
import options
|
||||
|
||||
type
|
||||
Record* {.public.} = object
|
||||
key*: Option[seq[byte]]
|
||||
value*: Option[seq[byte]]
|
||||
timeReceived*: Option[string]
|
||||
|
||||
MessageType* = enum
|
||||
putValue = 0
|
||||
getValue = 1
|
||||
addProvider = 2
|
||||
getProviders = 3
|
||||
findNode = 4
|
||||
ping = 5 # Deprecated
|
||||
|
||||
ConnectionType* = enum
|
||||
notConnected = 0
|
||||
connected = 1
|
||||
canConnect = 2 # Unused
|
||||
cannotConnect = 3 # Unused
|
||||
|
||||
Peer* {.public.} = object
|
||||
id*: seq[byte]
|
||||
addrs*: seq[MultiAddress]
|
||||
connection*: ConnectionType
|
||||
|
||||
Message* {.public.} = object
|
||||
msgType*: MessageType
|
||||
key*: Option[seq[byte]]
|
||||
record*: Option[Record]
|
||||
closerPeers*: seq[Peer]
|
||||
providerPeers*: seq[Peer]
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
|
||||
|
||||
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.writeOpt(1, record.key)
|
||||
pb.writeOpt(2, record.value)
|
||||
pb.writeOpt(5, record.timeReceived)
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, peer.id)
|
||||
for address in peer.addrs:
|
||||
pb.write(2, address.data.buffer)
|
||||
pb.write(3, uint32(ord(peer.connection)))
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, uint32(ord(msg.msgType)))
|
||||
|
||||
pb.writeOpt(2, msg.key)
|
||||
|
||||
msg.record.withValue(record):
|
||||
pb.writeOpt(3, msg.record)
|
||||
|
||||
for peer in msg.closerPeers:
|
||||
pb.write(8, peer.encode())
|
||||
|
||||
for peer in msg.providerPeers:
|
||||
pb.write(9, peer.encode())
|
||||
|
||||
pb.finish()
|
||||
|
||||
return pb
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
|
||||
opt.withValue(v):
|
||||
pb.write(field, v)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
|
||||
pb.write(field, value.encode())
|
||||
|
||||
proc getOptionField[T: ProtoScalar | string | seq[byte]](
|
||||
pb: ProtoBuffer, field: int, output: var Option[T]
|
||||
): ProtoResult[void] =
|
||||
var f: T
|
||||
if ?pb.getField(field, f):
|
||||
assign(output, some(f))
|
||||
ok()
|
||||
|
||||
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var r: Record
|
||||
?pb.getOptionField(1, r.key)
|
||||
?pb.getOptionField(2, r.value)
|
||||
?pb.getOptionField(5, r.timeReceived)
|
||||
return ok(some(r))
|
||||
|
||||
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var
|
||||
p: Peer
|
||||
id: seq[byte]
|
||||
|
||||
?pb.getRequiredField(1, p.id)
|
||||
|
||||
discard ?pb.getRepeatedField(2, p.addrs)
|
||||
|
||||
var connVal: uint32
|
||||
if ?pb.getField(3, connVal):
|
||||
var connType: ConnectionType
|
||||
if not checkedEnumAssign(connType, connVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
p.connection = connType
|
||||
|
||||
return ok(some(p))
|
||||
|
||||
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
|
||||
var
|
||||
m: Message
|
||||
key: seq[byte]
|
||||
recPb: seq[byte]
|
||||
closerPbs: seq[seq[byte]]
|
||||
providerPbs: seq[seq[byte]]
|
||||
|
||||
var pb = initProtoBuffer(buf)
|
||||
|
||||
var msgTypeVal: uint32
|
||||
?pb.getRequiredField(1, msgTypeVal)
|
||||
|
||||
var msgType: MessageType
|
||||
if not checkedEnumAssign(msgType, msgTypeVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
|
||||
m.msgType = msgType
|
||||
|
||||
?pb.getOptionField(2, m.key)
|
||||
|
||||
if ?pb.getField(3, recPb):
|
||||
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
|
||||
|
||||
discard ?pb.getRepeatedField(8, closerPbs)
|
||||
for ppb in closerPbs:
|
||||
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peerOpt.withValue(peer):
|
||||
m.closerPeers.add(peer)
|
||||
|
||||
discard ?pb.getRepeatedField(9, providerPbs)
|
||||
for ppb in providerPbs:
|
||||
let peer = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peer.withValue(peer):
|
||||
m.providerPeers.add(peer)
|
||||
|
||||
return ok(some(m))
|
||||
130
libp2p/protocols/kademlia/routingtable.nim
Normal file
130
libp2p/protocols/kademlia/routingtable.nim
Normal file
@@ -0,0 +1,130 @@
|
||||
import algorithm
|
||||
import bearssl/rand
|
||||
import chronos
|
||||
import chronicles
|
||||
import ./consts
|
||||
import ./keys
|
||||
import ./xordistance
|
||||
import ../../peerid
|
||||
import sequtils
|
||||
import ../../utils/sequninit
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht rtable"
|
||||
|
||||
type
|
||||
NodeEntry* = object
|
||||
nodeId*: Key
|
||||
lastSeen*: Moment
|
||||
|
||||
Bucket* = object
|
||||
peers*: seq[NodeEntry]
|
||||
|
||||
RoutingTable* = ref object
|
||||
selfId*: Key
|
||||
buckets*: seq[Bucket]
|
||||
|
||||
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
|
||||
return RoutingTable(selfId: selfId, buckets: @[])
|
||||
|
||||
proc bucketIndex*(selfId, key: Key): int =
|
||||
return xorDistance(selfId, key).leadingZeros
|
||||
|
||||
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
|
||||
for i, p in bucket.peers:
|
||||
if p.nodeId == nodeId:
|
||||
return Opt.some(i)
|
||||
return Opt.none(int)
|
||||
|
||||
proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
|
||||
if nodeId == rtable.selfId:
|
||||
return false # No self insertion
|
||||
|
||||
let idx = bucketIndex(rtable.selfId, nodeId)
|
||||
if idx >= maxBuckets:
|
||||
trace "cannot insert node. max buckets have been reached",
|
||||
nodeId, bucketIdx = idx, maxBuckets
|
||||
return false
|
||||
|
||||
if idx >= rtable.buckets.len:
|
||||
# expand buckets lazily if needed
|
||||
rtable.buckets.setLen(idx + 1)
|
||||
|
||||
var bucket = rtable.buckets[idx]
|
||||
let keyx = peerIndexInBucket(bucket, nodeId)
|
||||
if keyx.isSome:
|
||||
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
|
||||
elif bucket.peers.len < k:
|
||||
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
|
||||
else:
|
||||
# TODO: eviction policy goes here, rn we drop the node
|
||||
trace "cannot insert node in bucket, dropping node",
|
||||
nodeId, bucket = k, bucketIdx = idx
|
||||
return false
|
||||
|
||||
rtable.buckets[idx] = bucket
|
||||
return true
|
||||
|
||||
proc insert*(rtable: var RoutingTable, peerId: PeerId): bool =
|
||||
insert(rtable, peerId.toKey())
|
||||
|
||||
proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
|
||||
var allNodes: seq[Key] = @[]
|
||||
|
||||
for bucket in rtable.buckets:
|
||||
for p in bucket.peers:
|
||||
allNodes.add(p.nodeId)
|
||||
|
||||
allNodes.sort(
|
||||
proc(a, b: Key): int =
|
||||
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
|
||||
)
|
||||
|
||||
return allNodes[0 ..< min(count, allNodes.len)]
|
||||
|
||||
proc findClosestPeers*(rtable: RoutingTable, targetId: Key, count: int): seq[PeerId] =
|
||||
findClosest(rtable, targetId, count).mapIt(it.peerId)
|
||||
|
||||
proc isStale*(bucket: Bucket): bool =
|
||||
if bucket.peers.len == 0:
|
||||
return true
|
||||
for p in bucket.peers:
|
||||
if Moment.now() - p.lastSeen > 30.minutes:
|
||||
return true
|
||||
return false
|
||||
|
||||
proc randomKeyInBucketRange*(
|
||||
selfId: Key, bucketIndex: int, rng: ref HmacDrbgContext
|
||||
): Key =
|
||||
var raw = selfId.getBytes()
|
||||
|
||||
# zero out higher bits
|
||||
for i in 0 ..< bucketIndex:
|
||||
let byteIdx = i div 8
|
||||
let bitInByte = 7 - (i mod 8)
|
||||
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
|
||||
|
||||
# flip the target bit
|
||||
let tgtByte = bucketIndex div 8
|
||||
let tgtBitInByte = 7 - (bucketIndex mod 8)
|
||||
raw[tgtByte] = raw[tgtByte] xor (1'u8 shl tgtBitInByte)
|
||||
|
||||
# randomize all less significant bits
|
||||
let totalBits = raw.len * 8
|
||||
let lsbStart = bucketIndex + 1
|
||||
let lsbBytes = (totalBits - lsbStart + 7) div 8
|
||||
var randomBuf = newSeqUninit[byte](lsbBytes)
|
||||
hmacDrbgGenerate(rng[], randomBuf)
|
||||
|
||||
for i in lsbStart ..< totalBits:
|
||||
let byteIdx = i div 8
|
||||
let bitInByte = 7 - (i mod 8)
|
||||
let lsbByte = (i - lsbStart) div 8
|
||||
let lsbBit = 7 - ((i - lsbStart) mod 8)
|
||||
let randBit = (randomBuf[lsbByte] shr lsbBit) and 1
|
||||
if randBit == 1:
|
||||
raw[byteIdx] = raw[byteIdx] or (1'u8 shl bitInByte)
|
||||
else:
|
||||
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
|
||||
|
||||
return raw.toKey()
|
||||
55
libp2p/protocols/kademlia/xordistance.nim
Normal file
55
libp2p/protocols/kademlia/xordistance.nim
Normal file
@@ -0,0 +1,55 @@
|
||||
import ./consts
|
||||
import ./keys
|
||||
import nimcrypto/sha2
|
||||
import ../../peerid
|
||||
|
||||
type XorDistance* = array[IdLength, byte]
|
||||
|
||||
proc countLeadingZeroBits*(b: byte): int =
|
||||
for i in 0 .. 7:
|
||||
if (b and (0x80'u8 shr i)) != 0:
|
||||
return i
|
||||
return 8
|
||||
|
||||
proc leadingZeros*(dist: XorDistance): int =
|
||||
for i in 0 ..< dist.len:
|
||||
if dist[i] != 0:
|
||||
return i * 8 + countLeadingZeroBits(dist[i])
|
||||
return dist.len * 8
|
||||
|
||||
proc cmp*(a, b: XorDistance): int =
|
||||
for i in 0 ..< IdLength:
|
||||
if a[i] < b[i]:
|
||||
return -1
|
||||
elif a[i] > b[i]:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
proc `<`*(a, b: XorDistance): bool =
|
||||
cmp(a, b) < 0
|
||||
|
||||
proc `<=`*(a, b: XorDistance): bool =
|
||||
cmp(a, b) <= 0
|
||||
|
||||
proc hashFor(k: Key): seq[byte] =
|
||||
return
|
||||
@(
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
sha256.digest(k.peerId.getBytes()).data
|
||||
of KeyType.Raw:
|
||||
sha256.digest(k.data).data
|
||||
of KeyType.Unhashed:
|
||||
k.data
|
||||
)
|
||||
|
||||
proc xorDistance*(a, b: Key): XorDistance =
|
||||
let hashA = a.hashFor()
|
||||
let hashB = b.hashFor()
|
||||
var response: XorDistance
|
||||
for i in 0 ..< hashA.len:
|
||||
response[i] = hashA[i] xor hashB[i]
|
||||
return response
|
||||
|
||||
proc xorDistance*(a: PeerId, b: Key): XorDistance =
|
||||
xorDistance(a.toKey(), b)
|
||||
@@ -12,39 +12,85 @@
|
||||
import chronos, chronicles, sequtils
|
||||
import stew/endians2
|
||||
import ./core, ../../stream/connection
|
||||
when defined(libp2p_quic_support):
|
||||
import ../../transports/quictransport
|
||||
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
type Stats* = object
|
||||
isFinal*: bool
|
||||
uploadBytes*: uint
|
||||
downloadBytes*: uint
|
||||
duration*: Duration
|
||||
|
||||
type PerfClient* = ref object
|
||||
stats: Stats
|
||||
|
||||
proc new*(T: typedesc[PerfClient]): T =
|
||||
return T()
|
||||
|
||||
proc currentStats*(p: PerfClient): Stats =
|
||||
return p.stats
|
||||
|
||||
proc perf*(
|
||||
_: typedesc[PerfClient],
|
||||
conn: Connection,
|
||||
sizeToWrite: uint64 = 0,
|
||||
sizeToRead: uint64 = 0,
|
||||
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
|
||||
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite
|
||||
p.stats = Stats()
|
||||
|
||||
await conn.close()
|
||||
try:
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
|
||||
size = sizeToRead
|
||||
let start = Moment.now()
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite.uint
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.uploadBytes += toWrite.uint
|
||||
p.stats = statsCopy
|
||||
|
||||
# Close connection after writing for TCP, but not for QUIC
|
||||
when defined(libp2p_quic_support):
|
||||
if not (conn of QuicStream):
|
||||
await conn.close()
|
||||
# For QUIC streams, don't close yet - let server manage lifecycle
|
||||
else:
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead.uint
|
||||
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.downloadBytes += toRead.uint
|
||||
p.stats = statsCopy
|
||||
|
||||
# Close QUIC connections after read phase
|
||||
when defined(libp2p_quic_support):
|
||||
if conn of QuicStream:
|
||||
await conn.close()
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
raise e
|
||||
finally:
|
||||
p.stats.isFinal = true
|
||||
|
||||
trace "finishing performance benchmark", duration = p.stats.duration
|
||||
|
||||
return p.stats.duration
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
import chronos, chronicles
|
||||
import stew/endians2
|
||||
import ./core, ../protocol, ../../stream/connection, ../../utility
|
||||
when defined(libp2p_quic_support):
|
||||
import ../../transports/quictransport
|
||||
|
||||
export chronicles, connection
|
||||
|
||||
@@ -36,9 +38,33 @@ proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
|
||||
var toReadBuffer: array[PerfSize, byte]
|
||||
try:
|
||||
while true:
|
||||
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
except CatchableError as exc:
|
||||
# Different handling for QUIC vs TCP streams
|
||||
when defined(libp2p_quic_support):
|
||||
if conn of QuicStream:
|
||||
# QUIC needs timeout-based approach to detect end of upload
|
||||
while not conn.atEof:
|
||||
let readFut = conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
if not await readFut.withTimeout(100.milliseconds):
|
||||
break
|
||||
let read = readFut.read()
|
||||
if read == 0:
|
||||
break
|
||||
bytesRead += read
|
||||
else:
|
||||
# TCP streams handle EOF properly
|
||||
while true:
|
||||
let read = await conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
if read == 0:
|
||||
break
|
||||
bytesRead += read
|
||||
else:
|
||||
# TCP streams handle EOF properly
|
||||
while true:
|
||||
let read = await conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
if read == 0:
|
||||
break
|
||||
bytesRead += read
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
var buf: array[PerfSize, byte]
|
||||
|
||||
37
libp2p/protocols/pubsub/bandwidth.nim
Normal file
37
libp2p/protocols/pubsub/bandwidth.nim
Normal file
@@ -0,0 +1,37 @@
|
||||
import chronos
|
||||
import std/atomics
|
||||
|
||||
const DefaultAlpha = 0.3
|
||||
const InitialRate = 2_500_000 #bytes per second
|
||||
|
||||
type
|
||||
ExponentialMovingAverage* = ref object
|
||||
alpha: float
|
||||
value: Atomic[float64]
|
||||
|
||||
BandwidthTracking* = ref object
|
||||
download*: ExponentialMovingAverage
|
||||
|
||||
proc init*(T: type[ExponentialMovingAverage], alpha: float = DefaultAlpha): T =
|
||||
let e = ExponentialMovingAverage(alpha: alpha)
|
||||
e.value.store(InitialRate)
|
||||
return e
|
||||
|
||||
proc init*(T: type[BandwidthTracking], alpha: float = DefaultAlpha): T =
|
||||
BandwidthTracking(download: ExponentialMovingAverage())
|
||||
|
||||
proc update*(e: var ExponentialMovingAverage, startAt: Moment, bytes: int) =
|
||||
let elapsedTime = Moment.now() - startAt
|
||||
let curSample = float(bytes * 1000) / elapsedTime.milliseconds.float
|
||||
let oldSample = e.value.load()
|
||||
let ema = e.alpha * curSample + (1.0 - e.alpha) * oldSample
|
||||
e.value.store(ema)
|
||||
|
||||
proc value*(e: var ExponentialMovingAverage): float =
|
||||
e.value.load()
|
||||
|
||||
proc calculateReceiveTimeMs*(msgLen: int64, dataRate: int64 = InitialRate): int64 =
|
||||
let txTime = ((msgLen * 1000) div dataRate)
|
||||
#ideally (RTT * 2) + 5% TxTime ? Need many testruns to precisely adjust safety margin
|
||||
let margin = 250 + (txTime.float64 * 0.05)
|
||||
result = txTime + margin.int64
|
||||
@@ -185,14 +185,17 @@ method init*(f: FloodSub) =
|
||||
try:
|
||||
await f.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in floodsub handler", conn
|
||||
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
f.handler = handler
|
||||
f.codec = FloodSubCodec
|
||||
|
||||
method publish*(
|
||||
f: FloodSub, topic: string, data: seq[byte]
|
||||
f: FloodSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(f).publish(topic, data)
|
||||
|
||||
@@ -218,7 +218,7 @@ method init*(g: GossipSub) =
|
||||
try:
|
||||
await g.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in gossipsub handler", conn
|
||||
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
g.handler = handler
|
||||
@@ -702,24 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
# Send unsubscribe (in reverse order to sub/graft)
|
||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||
|
||||
method publish*(
|
||||
proc makePeersForPublishUsingCustomConn(
|
||||
g: GossipSub, topic: string
|
||||
): HashSet[PubSubPeer] =
|
||||
assert g.customConnCallbacks.isSome,
|
||||
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
|
||||
|
||||
trace "Selecting peers via custom connection callback"
|
||||
|
||||
return g.customConnCallbacks.get().customPeerSelectionCB(
|
||||
g.gossipsub.getOrDefault(topic),
|
||||
g.subscribedDirectPeers.getOrDefault(topic),
|
||||
g.mesh.getOrDefault(topic),
|
||||
g.fanout.getOrDefault(topic),
|
||||
)
|
||||
|
||||
proc makePeersForPublishDefault(
|
||||
g: GossipSub, topic: string, data: seq[byte]
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
): HashSet[PubSubPeer] =
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
# add always direct peers
|
||||
# Always include direct peers
|
||||
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
||||
|
||||
if topic in g.topics: # if we're subscribed use the mesh
|
||||
@@ -769,6 +772,34 @@ method publish*(
|
||||
# ultimately is not sent)
|
||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||
|
||||
return peers
|
||||
|
||||
method publish*(
|
||||
g: GossipSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
let pubParams = publishParams.get(PublishParams())
|
||||
|
||||
let peers =
|
||||
if pubParams.useCustomConn:
|
||||
g.makePeersForPublishUsingCustomConn(topic)
|
||||
else:
|
||||
g.makePeersForPublishDefault(topic, data)
|
||||
|
||||
if peers.len == 0:
|
||||
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
|
||||
debug "No peers for topic, skipping publish",
|
||||
@@ -802,12 +833,18 @@ method publish*(
|
||||
trace "Dropping already-seen message"
|
||||
return 0
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
if not pubParams.skipMCache:
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
|
||||
g.sendIDontWant(msg, msgId, peers)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
g.broadcast(
|
||||
peers,
|
||||
RPCMsg(messages: @[msg]),
|
||||
isHighPriority = true,
|
||||
useCustomConn = pubParams.useCustomConn,
|
||||
)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
|
||||
@@ -305,7 +305,7 @@ proc handleIHave*(
|
||||
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIDs:
|
||||
if peer.iDontWants[0].len > 1000:
|
||||
if peer.iDontWants[0].len >= IDontWantMaxCount:
|
||||
break
|
||||
peer.iDontWants[0].incl(g.salt(messageId))
|
||||
|
||||
@@ -457,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
prunes = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# avoid pruning peers we are currently grafting in this heartbeat
|
||||
prunes.keepIf do(x: PubSubPeer) -> bool:
|
||||
@@ -513,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
var peers = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# grafting so high score has priority
|
||||
peers.sort(byScore, SortOrder.Descending)
|
||||
@@ -538,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
it.peerId notin backingOff:
|
||||
avail.add(it)
|
||||
|
||||
# by spec, grab only 2
|
||||
if avail.len > 1:
|
||||
# by spec, grab only up to MaxOpportunisticGraftPeers
|
||||
if avail.len >= MaxOpportunisticGraftPeers:
|
||||
break
|
||||
|
||||
for peer in avail:
|
||||
@@ -690,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
for msgId in ihave.messageIDs:
|
||||
peer.sentIHaves[^1].incl(msgId)
|
||||
peer.sentIHaves[0].incl(msgId)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
|
||||
104
libp2p/protocols/pubsub/gossipsub/preamblestore.nim
Normal file
104
libp2p/protocols/pubsub/gossipsub/preamblestore.nim
Normal file
@@ -0,0 +1,104 @@
|
||||
import std/[tables, heapqueue, sets, options]
|
||||
import ./types
|
||||
import chronos
|
||||
import ../rpc/messages
|
||||
import ../../../peerid
|
||||
import ../pubsubpeer
|
||||
|
||||
proc `<`(a, b: PreambleInfo): bool =
|
||||
a.expiresAt < b.expiresAt
|
||||
|
||||
proc init*(_: typedesc[PeerSet]): PeerSet =
|
||||
PeerSet(order: @[], peers: initHashSet[PeerId]())
|
||||
|
||||
proc init*(
|
||||
_: typedesc[PreambleInfo],
|
||||
preamble: ControlPreamble,
|
||||
sender: PubSubPeer,
|
||||
startsAt: Moment,
|
||||
expiresAt: Moment,
|
||||
): PreambleInfo =
|
||||
PreambleInfo(
|
||||
messageId: preamble.messageID,
|
||||
messageLength: preamble.messageLength,
|
||||
topicId: preamble.topicID,
|
||||
sender: sender,
|
||||
startsAt: startsAt,
|
||||
expiresAt: expiresAt,
|
||||
peerSet: PeerSet.init(),
|
||||
)
|
||||
|
||||
proc init*(T: typedesc[PreambleStore]): T =
|
||||
result.byId = initTable[MessageId, PreambleInfo]()
|
||||
result.heap = initHeapQueue[PreambleInfo]()
|
||||
|
||||
proc insert*(ps: var PreambleStore, msgId: MessageId, info: PreambleInfo) =
|
||||
try:
|
||||
if ps.byId.hasKey(msgId):
|
||||
ps.byId[msgId].deleted = true
|
||||
ps.byId[msgId] = info
|
||||
ps.heap.push(info)
|
||||
except KeyError:
|
||||
assert false, "checked with hasKey"
|
||||
|
||||
proc hasKey*(ps: var PreambleStore, msgId: MessageId): bool =
|
||||
return ps.byId.hasKey(msgId)
|
||||
|
||||
proc `[]`*(ps: var PreambleStore, msgId: MessageId): PreambleInfo =
|
||||
ps.byId[msgId]
|
||||
|
||||
proc `[]=`*(ps: var PreambleStore, msgId: MessageId, entry: PreambleInfo) =
|
||||
insert(ps, msgId, entry)
|
||||
|
||||
proc del*(ps: var PreambleStore, msgId: MessageId) =
|
||||
try:
|
||||
if ps.byId.hasKey(msgId):
|
||||
ps.byId[msgId].deleted = true
|
||||
ps.byId.del(msgId)
|
||||
except KeyError:
|
||||
assert false, "checked with hasKey"
|
||||
|
||||
proc len*(ps: var PreambleStore): int =
|
||||
return ps.byId.len
|
||||
|
||||
proc popExpired*(ps: var PreambleStore, now: Moment): Option[PreambleInfo] =
|
||||
while ps.heap.len > 0:
|
||||
if ps.heap[0].deleted:
|
||||
discard ps.heap.pop()
|
||||
elif ps.heap[0].expiresAt <= now:
|
||||
let top = ps.heap.pop()
|
||||
ps.byId.del(top.messageId)
|
||||
return some(top)
|
||||
else:
|
||||
return none(PreambleInfo)
|
||||
|
||||
template withValue*(ps: var PreambleStore, key: MessageId, value, body: untyped) =
|
||||
try:
|
||||
if ps.hasKey(key):
|
||||
let value {.inject.} = ps.byId[key]
|
||||
body
|
||||
except system.KeyError:
|
||||
assert false, "checked with in"
|
||||
|
||||
const maxPossiblePeersOnPeerSet = 6
|
||||
|
||||
proc addPossiblePeerToQuery*(
|
||||
ps: var PreambleStore, msgId: MessageId, peer: PubSubPeer
|
||||
) =
|
||||
if not ps.hasKey(msgId):
|
||||
return
|
||||
|
||||
try:
|
||||
var preamble = ps[msgId]
|
||||
if not preamble.peerSet.peers.contains(peer.peerId):
|
||||
if preamble.peerSet.order.len == maxPossiblePeersOnPeerSet:
|
||||
let evicted: PeerId = preamble.peerSet.order[0]
|
||||
preamble.peerSet.order.delete(0)
|
||||
preamble.peerSet.peers.excl(evicted)
|
||||
preamble.peerSet.order.add(peer.peerId)
|
||||
preamble.peerSet.peers.incl(peer.peerId)
|
||||
except KeyError:
|
||||
assert false, "checked with hasKey"
|
||||
|
||||
proc possiblePeersToQuery*(preamble: PreambleInfo): seq[PeerId] =
|
||||
preamble.peerSet.order
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import std/[options, tables, sets]
|
||||
import std/[options, tables, sets, heapqueue]
|
||||
import ".."/[floodsub, peertable, mcache, pubsubpeer]
|
||||
import "../rpc"/[messages]
|
||||
import "../../.."/[peerid, multiaddress, utility]
|
||||
@@ -18,6 +18,7 @@ import "../../.."/[peerid, multiaddress, utility]
|
||||
export options, tables, sets
|
||||
|
||||
const
|
||||
GossipSubCodec_14* = "/meshsub/1.4.0"
|
||||
GossipSubCodec_12* = "/meshsub/1.2.0"
|
||||
GossipSubCodec_11* = "/meshsub/1.1.0"
|
||||
GossipSubCodec_10* = "/meshsub/1.0.0"
|
||||
@@ -46,10 +47,15 @@ const
|
||||
BackoffSlackTime* = 2 # seconds
|
||||
PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
|
||||
IHavePeerBudget* = 10
|
||||
PreamblePeerBudget* = 10
|
||||
PullOperation* = true
|
||||
# the max amount of IHave to expose, not by spec, but go as example
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
|
||||
IHaveMaxLength* = 5000
|
||||
IDontWantMaxCount* = 1000
|
||||
# maximum number of IDontWant messages in one slot of the history
|
||||
MaxOpportunisticGraftPeers* = 2
|
||||
|
||||
type
|
||||
TopicInfo* = object # gossip 1.1 related
|
||||
@@ -62,6 +68,24 @@ type
|
||||
meshFailurePenalty*: float64
|
||||
invalidMessageDeliveries*: float64
|
||||
|
||||
PeerSet* = object
|
||||
order*: seq[PeerId]
|
||||
peers*: HashSet[PeerId]
|
||||
|
||||
PreambleInfo* = ref object
|
||||
messageId*: MessageId
|
||||
messageLength*: uint32
|
||||
topicId*: string
|
||||
sender*: PubSubPeer
|
||||
startsAt*: Moment
|
||||
expiresAt*: Moment
|
||||
deleted*: bool # tombstone marker
|
||||
peerSet*: PeerSet
|
||||
|
||||
PreambleStore* = object
|
||||
byId*: Table[MessageId, PreambleInfo]
|
||||
heap*: HeapQueue[PreambleInfo]
|
||||
|
||||
TopicParams* {.public.} = object
|
||||
topicWeight*: float64
|
||||
|
||||
@@ -159,6 +183,7 @@ type
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
|
||||
OngoingReceivesStore* = PreambleStore
|
||||
|
||||
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
|
||||
RoutingRecordsHandler* = proc(
|
||||
@@ -178,6 +203,9 @@ type
|
||||
mcache*: MCache # messages cache
|
||||
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
preambleExpirationFut*: Future[void]
|
||||
# cancellation future for preamble expiration heartbeat interval
|
||||
scoringHeartbeatFut*: Future[void]
|
||||
# cancellation future for scoring heartbeat interval
|
||||
heartbeatRunning*: bool
|
||||
@@ -191,6 +219,11 @@ type
|
||||
|
||||
heartbeatEvents*: seq[AsyncEvent]
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
ongoingReceives*: OngoingReceivesStore # list of messages we are receiving
|
||||
ongoingIWantReceives*: OngoingReceivesStore
|
||||
# list of iwant replies we are receiving
|
||||
|
||||
MeshMetrics* = object # scratch buffers for metrics
|
||||
otherPeersPerTopicMesh*: int64
|
||||
otherPeersPerTopicFanout*: int64
|
||||
|
||||
@@ -145,6 +145,10 @@ type
|
||||
## we have to store it, which may be an attack vector.
|
||||
## This callback can be used to reject topic we're not interested in
|
||||
|
||||
PublishParams* = object
|
||||
useCustomConn*: bool
|
||||
skipMCache*: bool
|
||||
|
||||
PubSub* {.public.} = ref object of LPProtocol
|
||||
switch*: Switch # the switch used to dial/connect to peers
|
||||
peerInfo*: PeerInfo # this peer's info
|
||||
@@ -176,6 +180,7 @@ type
|
||||
rng*: ref HmacDrbgContext
|
||||
|
||||
knownTopics*: HashSet[string]
|
||||
customConnCallbacks*: Option[CustomConnectionCallbacks]
|
||||
|
||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
## handle peer disconnects
|
||||
@@ -187,7 +192,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(
|
||||
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
|
||||
p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
@@ -200,13 +209,14 @@ proc send*(
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
@@ -261,12 +271,12 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg, isHighPriority)
|
||||
p.send(peer, msg, isHighPriority, useCustomConn)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
|
||||
|
||||
proc sendSubs*(
|
||||
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
|
||||
@@ -373,8 +383,14 @@ method getOrCreatePeer*(
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
let pubSubPeer =
|
||||
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
|
||||
let pubSubPeer = PubSubPeer.new(
|
||||
peerId,
|
||||
getConn,
|
||||
onEvent,
|
||||
protoNegotiated,
|
||||
p.maxMessageSize,
|
||||
customConnCallbacks = p.customConnCallbacks,
|
||||
)
|
||||
debug "created new pubsub peer", peerId
|
||||
|
||||
p.peers[peerId] = pubSubPeer
|
||||
@@ -558,7 +574,10 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
|
||||
p.updateTopicMetrics(topic)
|
||||
|
||||
method publish*(
|
||||
p: PubSub, topic: string, data: seq[byte]
|
||||
p: PubSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.base, async: (raises: []), public.} =
|
||||
## publish to a ``topic``
|
||||
##
|
||||
@@ -648,6 +667,8 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
parameters: PubParams = false,
|
||||
customConnCallbacks: Option[CustomConnectionCallbacks] =
|
||||
none(CustomConnectionCallbacks),
|
||||
): P {.raises: [InitializationError], public.} =
|
||||
let pubsub =
|
||||
when PubParams is bool:
|
||||
@@ -663,6 +684,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
else:
|
||||
P(
|
||||
@@ -678,6 +700,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
|
||||
proc peerEventHandler(
|
||||
|
||||
@@ -20,7 +20,8 @@ import
|
||||
../../stream/connection,
|
||||
../../crypto/crypto,
|
||||
../../protobuf/minprotobuf,
|
||||
../../utility
|
||||
../../utility,
|
||||
../../utils/sequninit
|
||||
|
||||
export peerid, connection, deques
|
||||
|
||||
@@ -95,6 +96,21 @@ type
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
|
||||
CustomConnCreationProc* = proc(
|
||||
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
|
||||
): Connection {.gcsafe, raises: [].}
|
||||
|
||||
CustomPeerSelectionProc* = proc(
|
||||
allPeers: HashSet[PubSubPeer],
|
||||
directPeers: HashSet[PubSubPeer],
|
||||
meshPeers: HashSet[PubSubPeer],
|
||||
fanoutPeers: HashSet[PubSubPeer],
|
||||
): HashSet[PubSubPeer] {.gcsafe, raises: [].}
|
||||
|
||||
CustomConnectionCallbacks* = object
|
||||
customConnCreationCB*: CustomConnCreationProc
|
||||
customPeerSelectionCB*: CustomPeerSelectionProc
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
onEvent*: OnEvent # Connectivity updates for peer
|
||||
@@ -106,6 +122,9 @@ type
|
||||
handler*: RPCHandler
|
||||
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
bandwidthTracking*: BandwidthTracking
|
||||
|
||||
score*: float64
|
||||
sentIHaves*: Deque[HashSet[MessageId]]
|
||||
iDontWants*: Deque[HashSet[SaltedId]]
|
||||
@@ -119,10 +138,16 @@ type
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
preambleBudget*: int
|
||||
heIsReceivings*: Table[MessageId, uint32]
|
||||
heIsSendings*: Table[MessageId, Moment]
|
||||
|
||||
rpcmessagequeue: RpcMessageQueue
|
||||
maxNumElementsInNonPriorityQueue*: int
|
||||
# The max number of elements allowed in the non-priority queue.
|
||||
disconnected: bool
|
||||
customConnCallbacks*: Option[CustomConnectionCallbacks]
|
||||
|
||||
RPCHandler* =
|
||||
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
|
||||
@@ -205,7 +230,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
|
||||
conn, peer = p, closed = conn.closed, data = data.shortLog
|
||||
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
data = newSeqUninit[byte](0) # Release memory
|
||||
except PeerRateLimitError as exc:
|
||||
debug "Peer rate limit exceeded, exiting read while",
|
||||
conn, peer = p, description = exc.msg
|
||||
@@ -214,10 +239,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
|
||||
conn, peer = p, closed = conn.closed, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
except CancelledError as e:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
trace "Unexpected cancellation in PubSubPeer.handle"
|
||||
trace "Unexpected cancellation in PubSubPeer.handle", description = e.msg
|
||||
finally:
|
||||
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
|
||||
|
||||
@@ -250,7 +275,7 @@ proc connectOnce(
|
||||
await p.getConn().wait(5.seconds)
|
||||
except AsyncTimeoutError as error:
|
||||
trace "getConn timed out", description = error.msg
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
|
||||
|
||||
# When the send channel goes up, subscriptions need to be sent to the
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
@@ -356,21 +381,43 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
|
||||
if p.sendConn != nil and not p.sendConn.closed():
|
||||
# Fast path that avoids copying msg (which happens for {.async.})
|
||||
let conn = p.sendConn
|
||||
proc sendMsg(
|
||||
p: PubSubPeer, msg: seq[byte], useCustomConn: bool = false
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
type ConnectionType = enum
|
||||
ctCustom
|
||||
ctSend
|
||||
ctSlow
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
var slowPath = false
|
||||
let (conn, connType) =
|
||||
if useCustomConn and p.customConnCallbacks.isSome:
|
||||
let address = p.address
|
||||
(
|
||||
p.customConnCallbacks.get().customConnCreationCB(address, p.peerId, p.codec),
|
||||
ctCustom,
|
||||
)
|
||||
elif p.sendConn != nil and not p.sendConn.closed():
|
||||
(p.sendConn, ctSend)
|
||||
else:
|
||||
slowPath = true
|
||||
(nil, ctSlow)
|
||||
|
||||
if not slowPath:
|
||||
trace "sending encoded msg to peer",
|
||||
conntype = $connType, conn = conn, encoded = shortLog(msg)
|
||||
let f = conn.writeLp(msg)
|
||||
if not f.completed():
|
||||
sendMsgContinue(conn, f)
|
||||
else:
|
||||
f
|
||||
else:
|
||||
trace "sending encoded msg to peer via slow path"
|
||||
sendMsgSlow(p, msg)
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||
proc sendEncoded*(
|
||||
p: PubSubPeer, msg: seq[byte], isHighPriority: bool, useCustomConn: bool = false
|
||||
): Future[void] =
|
||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||
##
|
||||
## Parameters:
|
||||
@@ -399,7 +446,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
|
||||
maxSize = p.maxMessageSize, msgSize = msg.len
|
||||
Future[void].completed()
|
||||
elif isHighPriority or emptyQueues:
|
||||
let f = p.sendMsg(msg)
|
||||
let f = p.sendMsg(msg, useCustomConn)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
@@ -458,7 +505,11 @@ iterator splitRPCMsg(
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(
|
||||
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
|
||||
p: PubSubPeer,
|
||||
msg: RPCMsg,
|
||||
anonymize: bool,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
||||
##
|
||||
@@ -489,11 +540,11 @@ proc send*(
|
||||
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority, useCustomConn)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -552,6 +603,8 @@ proc new*(
|
||||
maxMessageSize: int,
|
||||
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
|
||||
customConnCallbacks: Option[CustomConnectionCallbacks] =
|
||||
none(CustomConnectionCallbacks),
|
||||
): T =
|
||||
result = T(
|
||||
getConn: getConn,
|
||||
@@ -563,7 +616,13 @@ proc new*(
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(),
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
result.bandwidthTracking =
|
||||
BandwidthTracking(download: ExponentialMovingAverage.init())
|
||||
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.iDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
result.startSendNonPriorityTask()
|
||||
|
||||
@@ -63,6 +63,9 @@ type
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
idontwant*: seq[ControlIWant]
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
preamble*: seq[ControlPreamble]
|
||||
imreceiving*: seq[ControlIMReceiving]
|
||||
|
||||
ControlIHave* = object
|
||||
topicID*: string
|
||||
@@ -79,6 +82,15 @@ type
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
|
||||
ControlPreamble* = object
|
||||
topicID*: string
|
||||
messageID*: MessageId
|
||||
messageLength*: uint32
|
||||
|
||||
ControlIMReceiving* = object
|
||||
messageID*: MessageId
|
||||
messageLength*: uint32
|
||||
|
||||
RPCMsg* = object
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
@@ -101,13 +113,29 @@ func shortLog*(s: ControlGraft): auto =
|
||||
func shortLog*(s: ControlPrune): auto =
|
||||
(topic: s.topicID.shortLog)
|
||||
|
||||
func shortLog*(s: ControlPreamble): auto =
|
||||
(topic: s.topicID.shortLog, messageID: s.messageID.shortLog)
|
||||
|
||||
func shortLog*(s: ControlIMReceiving): auto =
|
||||
(messageID: s.messageID.shortLog)
|
||||
|
||||
func shortLog*(c: ControlMessage): auto =
|
||||
(
|
||||
ihave: mapIt(c.ihave, it.shortLog),
|
||||
iwant: mapIt(c.iwant, it.shortLog),
|
||||
graft: mapIt(c.graft, it.shortLog),
|
||||
prune: mapIt(c.prune, it.shortLog),
|
||||
)
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
(
|
||||
ihave: mapIt(c.ihave, it.shortLog),
|
||||
iwant: mapIt(c.iwant, it.shortLog),
|
||||
graft: mapIt(c.graft, it.shortLog),
|
||||
prune: mapIt(c.prune, it.shortLog),
|
||||
preamble: mapIt(c.preamble, it.shortLog),
|
||||
imreceiving: mapIt(c.imreceiving, it.shortLog),
|
||||
)
|
||||
else:
|
||||
(
|
||||
ihave: mapIt(c.ihave, it.shortLog),
|
||||
iwant: mapIt(c.iwant, it.shortLog),
|
||||
graft: mapIt(c.graft, it.shortLog),
|
||||
prune: mapIt(c.prune, it.shortLog),
|
||||
)
|
||||
|
||||
func shortLog*(msg: Message): auto =
|
||||
(
|
||||
@@ -173,11 +201,41 @@ proc byteSize(controlPrune: ControlPrune): int =
|
||||
# 8 bytes for uint64
|
||||
|
||||
static:
|
||||
expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
|
||||
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
|
||||
control.idontwant.foldl(a + b.byteSize, 0)
|
||||
expectedFields(ControlPreamble, @["topicID", "messageID", "messageLength"])
|
||||
proc byteSize(controlPreamble: ControlPreamble): int =
|
||||
controlPreamble.topicID.len + controlPreamble.messageID.len + 4 # 4 bytes for uint32
|
||||
|
||||
proc byteSize*(preambles: seq[ControlPreamble]): int =
|
||||
preambles.foldl(a + b.byteSize, 0)
|
||||
|
||||
static:
|
||||
expectedFields(ControlIMReceiving, @["messageID", "messageLength"])
|
||||
proc byteSize(controlIMreceiving: ControlIMReceiving): int =
|
||||
controlIMreceiving.messageID.len + 4 # 4 bytes for uint32
|
||||
|
||||
proc byteSize*(imreceivings: seq[ControlIMReceiving]): int =
|
||||
imreceivings.foldl(a + b.byteSize, 0)
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
static:
|
||||
expectedFields(
|
||||
ControlMessage,
|
||||
@["ihave", "iwant", "graft", "prune", "idontwant", "preamble", "imreceiving"],
|
||||
)
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
|
||||
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
|
||||
control.idontwant.foldl(a + b.byteSize, 0) +
|
||||
control.preamble.foldl(a + b.byteSize, 0) +
|
||||
control.imreceiving.foldl(a + b.byteSize, 0)
|
||||
|
||||
else:
|
||||
static:
|
||||
expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
|
||||
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
|
||||
control.idontwant.foldl(a + b.byteSize, 0)
|
||||
|
||||
static:
|
||||
expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
|
||||
|
||||
@@ -77,6 +77,31 @@ proc write*(pb: var ProtoBuffer, field: int, iwant: ControlIWant) =
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["iwant"])
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, preamble: ControlPreamble) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, preamble.topicID)
|
||||
ipb.write(2, preamble.messageID)
|
||||
ipb.write(3, preamble.messageLength)
|
||||
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["preamble"])
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, imreceiving: ControlIMReceiving) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, imreceiving.messageID)
|
||||
ipb.write(2, imreceiving.messageLength)
|
||||
|
||||
if ipb.buffer.len > 0:
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["imreceiving"])
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
|
||||
var ipb = initProtoBuffer()
|
||||
for ihave in control.ihave:
|
||||
@@ -89,6 +114,11 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
|
||||
ipb.write(4, prune)
|
||||
for idontwant in control.idontwant:
|
||||
ipb.write(5, idontwant)
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
for preamble in control.preamble:
|
||||
ipb.write(6, preamble)
|
||||
for imreceiving in control.imreceiving:
|
||||
ipb.write(7, imreceiving)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
@@ -197,6 +227,43 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
|
||||
trace "decodeIWant: no messageIDs"
|
||||
ok(control)
|
||||
|
||||
proc decodePreamble*(pb: ProtoBuffer): ProtoResult[ControlPreamble] {.inline.} =
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["preamble"])
|
||||
|
||||
trace "decodePreamble: decoding message"
|
||||
var control = ControlPreamble()
|
||||
if ?pb.getField(1, control.topicID):
|
||||
trace "decodePreamble: read topicID", topic = control.topicID
|
||||
else:
|
||||
trace "decodePreamble: topicID is missing"
|
||||
if ?pb.getField(2, control.messageID):
|
||||
trace "decodePreamble: read messageID", message_id = control.messageID
|
||||
else:
|
||||
trace "decodePreamble: messageID is missing"
|
||||
if ?pb.getField(3, control.messageLength):
|
||||
trace "decodePreamble: read message Length", message_length = control.messageLength
|
||||
else:
|
||||
trace "decodePreamble: message Length is missing"
|
||||
ok(control)
|
||||
|
||||
proc decodeIMReceiving*(pb: ProtoBuffer): ProtoResult[ControlIMReceiving] {.inline.} =
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["imreceiving"])
|
||||
|
||||
trace "decodeIMReceiving: decoding message"
|
||||
var control = ControlIMReceiving()
|
||||
if ?pb.getField(1, control.messageID):
|
||||
trace "decodeIMReceiving: read messageID", message_id = control.messageID
|
||||
else:
|
||||
trace "decodeIMReceiving: messageID is missing"
|
||||
if ?pb.getField(2, control.messageLength):
|
||||
trace "decodeIMReceiving: read message Length",
|
||||
message_length = control.messageLength
|
||||
else:
|
||||
trace "decodeIMReceiving: message Length is missing"
|
||||
ok(control)
|
||||
|
||||
proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inline.} =
|
||||
trace "decodeControl: decoding message"
|
||||
var buffer: seq[byte]
|
||||
@@ -208,6 +275,10 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inli
|
||||
var graftpbs: seq[seq[byte]]
|
||||
var prunepbs: seq[seq[byte]]
|
||||
var idontwant: seq[seq[byte]]
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
var preamble: seq[seq[byte]]
|
||||
var imreceiving: seq[seq[byte]]
|
||||
|
||||
if ?cpb.getRepeatedField(1, ihavepbs):
|
||||
for item in ihavepbs:
|
||||
control.ihave.add(?decodeIHave(initProtoBuffer(item)))
|
||||
@@ -223,6 +294,15 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inli
|
||||
if ?cpb.getRepeatedField(5, idontwant):
|
||||
for item in idontwant:
|
||||
control.idontwant.add(?decodeIWant(initProtoBuffer(item)))
|
||||
|
||||
when defined(libp2p_gossipsub_1_4):
|
||||
if ?cpb.getRepeatedField(6, preamble):
|
||||
for item in preamble:
|
||||
control.preamble.add(?decodePreamble(initProtoBuffer(item)))
|
||||
if ?cpb.getRepeatedField(7, imreceiving):
|
||||
for item in imreceiving:
|
||||
control.imreceiving.add(?decodeIMReceiving(initProtoBuffer(item)))
|
||||
|
||||
trace "decodeControl: message statistics",
|
||||
graft_count = len(control.graft),
|
||||
prune_count = len(control.prune),
|
||||
|
||||
@@ -419,8 +419,8 @@ proc save(
|
||||
)
|
||||
rdv.namespaces[nsSalted].add(rdv.registered.high)
|
||||
# rdv.registerEvent.fire()
|
||||
except KeyError:
|
||||
doAssert false, "Should have key"
|
||||
except KeyError as e:
|
||||
doAssert false, "Should have key: " & e.msg
|
||||
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
|
||||
@@ -15,11 +15,12 @@ import chronicles
|
||||
import bearssl/[rand, hash]
|
||||
import stew/[endians2, byteutils]
|
||||
import nimcrypto/[utils, sha2, hmac]
|
||||
import ../../stream/[connection, streamseq]
|
||||
import ../../stream/[connection]
|
||||
import ../../peerid
|
||||
import ../../peerinfo
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../utility
|
||||
import ../../utils/[bytesview, sequninit]
|
||||
|
||||
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
||||
|
||||
@@ -237,13 +238,14 @@ template write_e(): untyped =
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR().
|
||||
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
hs.e = genKeyPair(p.rng[])
|
||||
msg.add hs.e.publicKey
|
||||
hs.ss.mixHash(hs.e.publicKey)
|
||||
|
||||
hs.e.publicKey.getBytes
|
||||
|
||||
template write_s(): untyped =
|
||||
trace "noise write s"
|
||||
# Appends EncryptAndHash(s.public_key) to the buffer.
|
||||
msg.add hs.ss.encryptAndHash(hs.s.publicKey)
|
||||
hs.ss.encryptAndHash(hs.s.publicKey)
|
||||
|
||||
template dh_ee(): untyped =
|
||||
trace "noise dh ee"
|
||||
@@ -281,9 +283,10 @@ template read_e(): untyped =
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message.
|
||||
# Calls MixHash(re.public_key).
|
||||
hs.re[0 .. Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
|
||||
msg.consume(Curve25519Key.len)
|
||||
hs.ss.mixHash(hs.re)
|
||||
|
||||
Curve25519Key.len
|
||||
|
||||
template read_s(): untyped =
|
||||
trace "noise read s", size = msg.len
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
|
||||
@@ -300,7 +303,7 @@ template read_s(): untyped =
|
||||
Curve25519Key.len
|
||||
hs.rs[0 .. Curve25519Key.high] = hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
|
||||
|
||||
msg.consume(rsLen)
|
||||
rsLen
|
||||
|
||||
proc readFrame(
|
||||
sconn: Connection
|
||||
@@ -312,32 +315,29 @@ proc readFrame(
|
||||
if size == 0:
|
||||
return
|
||||
|
||||
var buffer = newSeqUninitialized[byte](size)
|
||||
var buffer = newSeqUninit[byte](size)
|
||||
await sconn.readExactly(addr buffer[0], buffer.len)
|
||||
return buffer
|
||||
|
||||
proc writeFrame(
|
||||
sconn: Connection, buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
doAssert buf.len <= uint16.high.int
|
||||
var
|
||||
lesize = buf.len.uint16
|
||||
besize = lesize.toBytesBE
|
||||
outbuf = newSeqOfCap[byte](besize.len + buf.len)
|
||||
trace "writeFrame", sconn, size = lesize, data = shortLog(buf)
|
||||
outbuf &= besize
|
||||
outbuf &= buf
|
||||
sconn.write(outbuf)
|
||||
|
||||
proc receiveHSMessage(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
readFrame(sconn)
|
||||
|
||||
proc sendHSMessage(
|
||||
sconn: Connection, buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
writeFrame(sconn, buf)
|
||||
template sendHSMessage(sconn: Connection, parts: varargs[seq[byte]]): untyped =
|
||||
# sends message (message frame) using multiple seq[byte] that
|
||||
# concatenated represent entire mesage.
|
||||
|
||||
var msgSize: int
|
||||
for p in parts:
|
||||
msgSize += p.len
|
||||
|
||||
trace "sendHSMessage", sconn, size = msgSize
|
||||
doAssert msgSize <= uint16.high.int
|
||||
|
||||
await sconn.write(@(msgSize.uint16.toBytesBE))
|
||||
for p in parts:
|
||||
await sconn.write(p)
|
||||
|
||||
proc handshakeXXOutbound(
|
||||
p: Noise, conn: Connection, p2pSecret: seq[byte]
|
||||
@@ -348,38 +348,30 @@ proc handshakeXXOutbound(
|
||||
try:
|
||||
hs.ss.mixHash(p.commonPrologue)
|
||||
hs.s = p.noiseKeys
|
||||
var remoteP2psecret: seq[byte]
|
||||
|
||||
# -> e
|
||||
var msg: StreamSeq
|
||||
block: # -> e
|
||||
let ebytes = write_e()
|
||||
# IK might use this btw!
|
||||
let hbytes = hs.ss.encryptAndHash([])
|
||||
|
||||
write_e()
|
||||
conn.sendHSMessage(ebytes, hbytes)
|
||||
|
||||
# IK might use this btw!
|
||||
msg.add hs.ss.encryptAndHash([])
|
||||
block: # <- e, ee, s, es
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_e())
|
||||
dh_ee()
|
||||
msg.consume(read_s())
|
||||
dh_es()
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
block: # -> s, se
|
||||
let sbytes = write_s()
|
||||
dh_se()
|
||||
# last payload must follow the encrypted way of sending
|
||||
let hbytes = hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
# <- e, ee, s, es
|
||||
|
||||
msg.assign(await conn.receiveHSMessage())
|
||||
|
||||
read_e()
|
||||
dh_ee()
|
||||
read_s()
|
||||
dh_es()
|
||||
|
||||
let remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
msg.clear()
|
||||
|
||||
# -> s, se
|
||||
|
||||
write_s()
|
||||
dh_se()
|
||||
|
||||
# last payload must follow the encrypted way of sending
|
||||
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
conn.sendHSMessage(sbytes, hbytes)
|
||||
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return
|
||||
@@ -397,41 +389,30 @@ proc handshakeXXInbound(
|
||||
try:
|
||||
hs.ss.mixHash(p.commonPrologue)
|
||||
hs.s = p.noiseKeys
|
||||
var remoteP2psecret: seq[byte]
|
||||
|
||||
# -> e
|
||||
block: # <- e
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_e())
|
||||
# we might use this early data one day, keeping it here for clarity
|
||||
let earlyData {.used.} = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
var msg: StreamSeq
|
||||
msg.add(await conn.receiveHSMessage())
|
||||
block: # -> e, ee, s, es
|
||||
let ebytes = write_e()
|
||||
dh_ee()
|
||||
let sbytes = write_s()
|
||||
dh_es()
|
||||
let hbytes = hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
read_e()
|
||||
conn.sendHSMessage(ebytes, sbytes, hbytes)
|
||||
|
||||
# we might use this early data one day, keeping it here for clarity
|
||||
let earlyData {.used.} = hs.ss.decryptAndHash(msg.data)
|
||||
block: # <- s, se
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_s())
|
||||
dh_se()
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
# <- e, ee, s, es
|
||||
|
||||
msg.consume(msg.len)
|
||||
|
||||
write_e()
|
||||
dh_ee()
|
||||
write_s()
|
||||
dh_es()
|
||||
|
||||
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
msg.clear()
|
||||
|
||||
# -> s, se
|
||||
|
||||
msg.add(await conn.receiveHSMessage())
|
||||
|
||||
read_s()
|
||||
dh_se()
|
||||
|
||||
let
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
(cs1, cs2) = hs.ss.split()
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return
|
||||
HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
@@ -477,7 +458,7 @@ method write*(
|
||||
let frames = (message.len + MaxPlainSize - 1) div MaxPlainSize
|
||||
|
||||
var
|
||||
cipherFrames = newSeqUninitialized[byte](message.len + frames * FramingSize)
|
||||
cipherFrames = newSeqUninit[byte](message.len + frames * FramingSize)
|
||||
left = message.len
|
||||
offset = 0
|
||||
woffset = 0
|
||||
|
||||
@@ -15,7 +15,7 @@ import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../protocol,
|
||||
../../stream/streamseq,
|
||||
../../utils/zeroqueue,
|
||||
../../stream/connection,
|
||||
../../multiaddress,
|
||||
../../peerinfo
|
||||
@@ -32,7 +32,7 @@ type
|
||||
|
||||
SecureConn* = ref object of Connection
|
||||
stream*: Connection
|
||||
buf: StreamSeq
|
||||
buf: ZeroQueue
|
||||
|
||||
func shortLog*(conn: SecureConn): auto =
|
||||
try:
|
||||
@@ -110,8 +110,8 @@ proc handleConn(
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished:
|
||||
await fut1.cancelAndWait()
|
||||
@@ -174,22 +174,21 @@ method readOnce*(
|
||||
if s.isEof:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
if s.buf.data().len() == 0:
|
||||
if s.buf.isEmpty:
|
||||
try:
|
||||
let buf = await s.readMessage() # Always returns >0 bytes or raises
|
||||
s.activity = true
|
||||
s.buf.add(buf)
|
||||
s.buf.push(buf)
|
||||
except LPStreamEOFError as err:
|
||||
s.isEof = true
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamEOFError, "Secure connection EOF: " & err.msg, err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name, message = err.msg, connection = s
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
return s.buf.consumeTo(pbytes, nbytes)
|
||||
|
||||
@@ -55,7 +55,7 @@ proc tryStartingDirectConn(
|
||||
if not isRelayed.get(false) and address.isPublicMA():
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
debug "Failed to create direct connection.", description = err.msg
|
||||
continue
|
||||
return false
|
||||
|
||||
@@ -91,7 +91,7 @@ proc newConnectedPeerHandler(
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
debug "Hole punching failed during dcutr", err = err.msg
|
||||
debug "Hole punching failed during dcutr", description = err.msg
|
||||
|
||||
method setup*(
|
||||
self: HPService, switch: Switch
|
||||
@@ -104,7 +104,7 @@ method setup*(
|
||||
let dcutrProto = Dcutr.new(switch)
|
||||
switch.mount(dcutrProto)
|
||||
except LPError as err:
|
||||
error "Failed to mount Dcutr", err = err.msg
|
||||
error "Failed to mount Dcutr", description = err.msg
|
||||
|
||||
self.newConnectedPeerHandler = proc(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
|
||||
@@ -10,10 +10,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import stew/byteutils
|
||||
import chronos, chronicles, metrics
|
||||
import ../stream/connection
|
||||
import ./streamseq
|
||||
import ../utils/zeroqueue
|
||||
|
||||
export connection
|
||||
|
||||
@@ -24,7 +23,7 @@ const BufferStreamTrackerName* = "BufferStream"
|
||||
|
||||
type BufferStream* = ref object of Connection
|
||||
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
|
||||
readBuf*: StreamSeq # overflow buffer for readOnce
|
||||
readBuf: ZeroQueue # zero queue buffer for readOnce
|
||||
pushing*: bool # number of ongoing push operations
|
||||
reading*: bool # is there an ongoing read? (only allow one)
|
||||
pushedEof*: bool # eof marker has been put on readQueue
|
||||
@@ -43,7 +42,7 @@ chronicles.formatIt(BufferStream):
|
||||
shortLog(it)
|
||||
|
||||
proc len*(s: BufferStream): int =
|
||||
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
|
||||
s.readBuf.len.int + (if s.readQueue.len > 0: s.readQueue[0].len()
|
||||
else: 0)
|
||||
|
||||
method initStream*(s: BufferStream) =
|
||||
@@ -62,7 +61,7 @@ proc new*(T: typedesc[BufferStream], timeout: Duration = DefaultConnectionTimeou
|
||||
bufferStream
|
||||
|
||||
method pushData*(
|
||||
s: BufferStream, data: seq[byte]
|
||||
s: BufferStream, data: sink seq[byte]
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Write bytes to internal read buffer, use this to fill up the
|
||||
## buffer with data.
|
||||
@@ -107,7 +106,7 @@ method pushEof*(
|
||||
s.pushing = false
|
||||
|
||||
method atEof*(s: BufferStream): bool =
|
||||
s.isEof and s.readBuf.len == 0
|
||||
s.isEof and s.readBuf.isEmpty
|
||||
|
||||
method readOnce*(
|
||||
s: BufferStream, pbytes: pointer, nbytes: int
|
||||
@@ -118,20 +117,12 @@ method readOnce*(
|
||||
if s.returnedEof:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
|
||||
# First consume leftovers from previous read
|
||||
var rbytes = s.readBuf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
|
||||
if rbytes < nbytes and not s.isEof:
|
||||
# There's space in the buffer - consume some data from the read queue
|
||||
s.reading = true
|
||||
if not s.isEof and s.readBuf.len < nbytes:
|
||||
let buf =
|
||||
try:
|
||||
s.reading = true
|
||||
await s.readQueue.popFirst()
|
||||
except CancelledError as exc:
|
||||
# Not very efficient, but shouldn't happen often
|
||||
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
|
||||
raise exc
|
||||
finally:
|
||||
s.reading = false
|
||||
@@ -141,28 +132,18 @@ method readOnce*(
|
||||
trace "EOF", s
|
||||
s.isEof = true
|
||||
else:
|
||||
let remaining = min(buf.len, nbytes - rbytes)
|
||||
toOpenArray(p, rbytes, nbytes - 1)[0 ..< remaining] =
|
||||
buf.toOpenArray(0, remaining - 1)
|
||||
rbytes += remaining
|
||||
|
||||
if remaining < buf.len:
|
||||
trace "add leftovers", s, len = buf.len - remaining
|
||||
s.readBuf.add(buf.toOpenArray(remaining, buf.high))
|
||||
|
||||
if s.isEof and s.readBuf.len() == 0:
|
||||
# We can clear the readBuf memory since it won't be used any more
|
||||
s.readBuf = StreamSeq()
|
||||
s.readBuf.push(buf)
|
||||
|
||||
let consumed = s.readBuf.consumeTo(pbytes, nbytes)
|
||||
s.activity = true
|
||||
|
||||
# We want to return 0 exactly once - after that, we'll start raising instead -
|
||||
# this is a bit nuts in a mixed exception / return value world, but allows the
|
||||
# consumer of the stream to rely on the 0-byte read as a "regular" EOF marker
|
||||
# (instead of _sometimes_ getting an exception).
|
||||
s.returnedEof = rbytes == 0
|
||||
s.returnedEof = consumed == 0
|
||||
|
||||
return rbytes
|
||||
return consumed
|
||||
|
||||
method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
|
||||
## close the stream and clear the buffer
|
||||
@@ -171,7 +152,6 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
|
||||
# First, make sure any new calls to `readOnce` and `pushData` etc will fail -
|
||||
# there may already be such calls in the event queue however
|
||||
s.isEof = true
|
||||
s.readBuf = StreamSeq()
|
||||
s.pushedEof = true
|
||||
|
||||
# Essentially we need to handle the following cases
|
||||
@@ -199,8 +179,10 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
|
||||
elif s.pushing:
|
||||
if not s.readQueue.empty():
|
||||
discard s.readQueue.popFirstNoWait()
|
||||
except AsyncQueueFullError, AsyncQueueEmptyError:
|
||||
raiseAssert(getCurrentExceptionMsg())
|
||||
except AsyncQueueFullError as e:
|
||||
raiseAssert("closeImpl failed queue full: " & e.msg)
|
||||
except AsyncQueueEmptyError as e:
|
||||
raiseAssert("closeImpl failed queue empty: " & e.msg)
|
||||
|
||||
trace "Closed BufferStream", s
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@ when defined(libp2p_agents_metrics):
|
||||
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
|
||||
|
||||
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
|
||||
|
||||
func shortLog*(conn: ChronosStream): auto =
|
||||
try:
|
||||
if conn == nil:
|
||||
|
||||
@@ -52,6 +52,8 @@ func shortLog*(conn: Connection): string =
|
||||
chronicles.formatIt(Connection):
|
||||
shortLog(it)
|
||||
|
||||
declarePublicCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
|
||||
|
||||
method initStream*(s: Connection) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = ConnectionTrackerName
|
||||
|
||||
@@ -16,6 +16,7 @@ import std/oids
|
||||
import stew/byteutils
|
||||
import chronicles, chronos, metrics
|
||||
import ../varint, ../peerinfo, ../multiaddress, ../utility, ../errors
|
||||
import ../utils/sequninit
|
||||
|
||||
export errors
|
||||
|
||||
@@ -113,9 +114,9 @@ method initStream*(s: LPStream) {.base.} =
|
||||
trackCounter(s.objName)
|
||||
trace "Stream created", s, objName = s.objName, dir = $s.dir
|
||||
|
||||
proc join*(
|
||||
method join*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
|
||||
): Future[void] {.base, async: (raises: [CancelledError], raw: true), public.} =
|
||||
## Wait for the stream to be closed
|
||||
s.closeEvent.wait()
|
||||
|
||||
@@ -135,9 +136,9 @@ method readOnce*(
|
||||
## available
|
||||
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
|
||||
|
||||
proc readExactly*(
|
||||
method readExactly*(
|
||||
s: LPStream, pbytes: pointer, nbytes: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[void] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Waits for `nbytes` to be available, then read
|
||||
## them and return them
|
||||
if s.atEof:
|
||||
@@ -171,9 +172,9 @@ proc readExactly*(
|
||||
trace "couldn't read all bytes, incomplete data", s, nbytes, read
|
||||
raise newLPStreamIncompleteError()
|
||||
|
||||
proc readLine*(
|
||||
method readLine*(
|
||||
s: LPStream, limit = 0, sep = "\r\n"
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[string] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Reads up to `limit` bytes are read, or a `sep` is found
|
||||
# TODO replace with something that exploits buffering better
|
||||
var lim = if limit <= 0: -1 else: limit
|
||||
@@ -199,9 +200,9 @@ proc readLine*(
|
||||
if len(result) == lim:
|
||||
break
|
||||
|
||||
proc readVarint*(
|
||||
method readVarint*(
|
||||
conn: LPStream
|
||||
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
var buffer: array[10, byte]
|
||||
|
||||
for i in 0 ..< len(buffer):
|
||||
@@ -218,9 +219,9 @@ proc readVarint*(
|
||||
if true: # can't end with a raise apparently
|
||||
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
||||
|
||||
proc readLp*(
|
||||
method readLp*(
|
||||
s: LPStream, maxSize: int
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
): Future[seq[byte]] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## read length prefixed msg, with the length encoded as a varint
|
||||
let
|
||||
length = await s.readVarint()
|
||||
@@ -232,7 +233,7 @@ proc readLp*(
|
||||
if length == 0:
|
||||
return
|
||||
|
||||
var res = newSeqUninitialized[byte](length)
|
||||
var res = newSeqUninit[byte](length)
|
||||
await s.readExactly(addr res[0], res.len)
|
||||
res
|
||||
|
||||
@@ -244,19 +245,23 @@ method write*(
|
||||
# Write `msg` to stream, waiting for the write to be finished
|
||||
raiseAssert("[LPStream.write] abstract method not implemented!")
|
||||
|
||||
proc writeLp*(
|
||||
method writeLp*(
|
||||
s: LPStream, msg: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
): Future[void] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
|
||||
.} =
|
||||
## Write `msg` with a varint-encoded length prefix
|
||||
let vbytes = PB.toBytes(msg.len().uint64)
|
||||
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
|
||||
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
|
||||
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
|
||||
buf[vbytes.len ..< buf.len] = msg
|
||||
s.write(buf)
|
||||
|
||||
proc writeLp*(
|
||||
method writeLp*(
|
||||
s: LPStream, msg: string
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
): Future[void] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
|
||||
.} =
|
||||
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
||||
|
||||
proc write*(
|
||||
@@ -324,7 +329,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
|
||||
debug "Unexpected bytes while waiting for EOF", s
|
||||
except CancelledError:
|
||||
discard
|
||||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except LPStreamEOFError as e:
|
||||
trace "Expected EOF came", s, description = e.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, description = exc.msg
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/bitops2
|
||||
|
||||
type StreamSeq* = object
|
||||
# Seq adapted to the stream use case where we add data at the back and
|
||||
# consume at the front in chunks. A bit like a deque but contiguous memory
|
||||
# area - will try to avoid moving data unless it has to, subject to buffer
|
||||
# space. The assumption is that data is typically consumed fully.
|
||||
#
|
||||
# See also asio::stream_buf
|
||||
buf: seq[byte] # Data store
|
||||
rpos: int # Reading position - valid data starts here
|
||||
wpos: int # Writing position - valid data ends here
|
||||
|
||||
template len*(v: StreamSeq): int =
|
||||
v.wpos - v.rpos
|
||||
|
||||
func grow(v: var StreamSeq, n: int) =
|
||||
if v.rpos == v.wpos:
|
||||
# All data has been consumed, reset positions
|
||||
v.rpos = 0
|
||||
v.wpos = 0
|
||||
|
||||
if v.buf.len - v.wpos < n:
|
||||
if v.rpos > 0:
|
||||
# We've consumed some data so we'll try to move that data to the beginning
|
||||
# of the buffer, hoping that this will clear up enough capacity to avoid
|
||||
# reallocation
|
||||
moveMem(addr v.buf[0], addr v.buf[v.rpos], v.wpos - v.rpos)
|
||||
v.wpos -= v.rpos
|
||||
v.rpos = 0
|
||||
|
||||
if v.buf.len - v.wpos >= n:
|
||||
return
|
||||
|
||||
# TODO this is inefficient - `setLen` will copy all data of buf, even though
|
||||
# we know that only a part of it contains "valid" data
|
||||
v.buf.setLen(nextPow2(max(64, v.wpos + n).uint64).int)
|
||||
|
||||
template prepare*(v: var StreamSeq, n: int): var openArray[byte] =
|
||||
## Return a buffer that is at least `n` bytes long
|
||||
mixin grow
|
||||
v.grow(n)
|
||||
|
||||
v.buf.toOpenArray(v.wpos, v.buf.len - 1)
|
||||
|
||||
template commit*(v: var StreamSeq, n: int) =
|
||||
## Mark `n` bytes in the buffer returned by `prepare` as ready for reading
|
||||
v.wpos += n
|
||||
|
||||
func add*(v: var StreamSeq, data: openArray[byte]) =
|
||||
## Add data - the equivalent of `buf.prepare(n) = data; buf.commit(n)`
|
||||
if data.len > 0:
|
||||
v.grow(data.len)
|
||||
copyMem(addr v.buf[v.wpos], unsafeAddr data[0], data.len)
|
||||
v.commit(data.len)
|
||||
|
||||
template data*(v: StreamSeq): openArray[byte] =
|
||||
# Data that is ready to be consumed
|
||||
# TODO a double-hash comment here breaks compile (!)
|
||||
v.buf.toOpenArray(v.rpos, v.wpos - 1)
|
||||
|
||||
template toOpenArray*(v: StreamSeq, b, e: int): openArray[byte] =
|
||||
# Data that is ready to be consumed
|
||||
# TODO a double-hash comment here breaks compile (!)
|
||||
v.buf.toOpenArray(v.rpos + b, v.rpos + e - b)
|
||||
|
||||
func consume*(v: var StreamSeq, n: int) =
|
||||
## Mark `n` bytes that were returned via `data` as consumed
|
||||
v.rpos += n
|
||||
|
||||
func consumeTo*(v: var StreamSeq, buf: var openArray[byte]): int =
|
||||
let bytes = min(buf.len, v.len)
|
||||
if bytes > 0:
|
||||
copyMem(addr buf[0], addr v.buf[v.rpos], bytes)
|
||||
v.consume(bytes)
|
||||
bytes
|
||||
|
||||
func clear*(v: var StreamSeq) =
|
||||
v.consume(v.len)
|
||||
|
||||
func assign*(v: var StreamSeq, buf: openArray[byte]) =
|
||||
v.clear()
|
||||
v.add(buf)
|
||||
@@ -233,7 +233,7 @@ proc upgrader(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(UpgradeError, e.msg, e)
|
||||
raise newException(UpgradeError, "catchable error upgrader: " & e.msg, e)
|
||||
|
||||
proc upgradeMonitor(
|
||||
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
|
||||
@@ -275,7 +275,8 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
|
||||
await transport.accept()
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
raise
|
||||
newException(CatchableError, "failed to accept connection: " & exc.msg, exc)
|
||||
slot.trackConnection(conn)
|
||||
if isNil(conn):
|
||||
# A nil connection means that we might have hit a
|
||||
@@ -357,7 +358,9 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
|
||||
for fut in startFuts:
|
||||
if fut.failed:
|
||||
await s.stop()
|
||||
raise newException(LPError, "starting transports failed", fut.error)
|
||||
raise newException(
|
||||
LPError, "starting transports failed: " & $fut.error.msg, fut.error
|
||||
)
|
||||
|
||||
for t in s.transports: # for each transport
|
||||
if t.addrs.len > 0 or t.running:
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/quic
|
||||
import chronos
|
||||
import chronicles
|
||||
import metrics
|
||||
import quic
|
||||
import results
|
||||
import ../multiaddress
|
||||
import ../multicodec
|
||||
@@ -41,6 +42,9 @@ proc new(
|
||||
procCall P2PConnection(quicstream).initStream()
|
||||
quicstream
|
||||
|
||||
method getWrapped*(self: QuicStream): P2PConnection =
|
||||
nil
|
||||
|
||||
template mapExceptions(body: untyped) =
|
||||
try:
|
||||
body
|
||||
@@ -58,6 +62,7 @@ method readOnce*(
|
||||
result = min(nbytes, stream.cached.len)
|
||||
copyMem(pbytes, addr stream.cached[0], result)
|
||||
stream.cached = stream.cached[result ..^ 1]
|
||||
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
|
||||
except CatchableError as exc:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
@@ -66,6 +71,7 @@ method write*(
|
||||
stream: QuicStream, bytes: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mapExceptions(await stream.stream.write(bytes))
|
||||
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
|
||||
|
||||
{.pop.}
|
||||
|
||||
@@ -98,7 +104,7 @@ proc getStream*(
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
except CatchableError as exc:
|
||||
# TODO: incomingStream is using {.async.} with no raises
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
|
||||
|
||||
method getWrapped*(self: QuicSession): P2PConnection =
|
||||
nil
|
||||
@@ -116,7 +122,7 @@ method newStream*(
|
||||
try:
|
||||
return await m.quicSession.getStream(Direction.Out)
|
||||
except CatchableError as exc:
|
||||
raise newException(MuxerError, exc.msg, exc)
|
||||
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
|
||||
|
||||
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
@@ -233,11 +239,16 @@ method start*(
|
||||
except QuicConfigError as exc:
|
||||
doAssert false, "invalid quic setup: " & $exc.msg
|
||||
except TLSCertificateError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "tlscert error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
except QuicError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref QuicTransportError)(msg: "quicerror in quic start: " & exc.msg, parent: exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "transport error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
self.running = true
|
||||
|
||||
method stop*(transport: QuicTransport) {.async: (raises: []).} =
|
||||
@@ -315,7 +326,7 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(QuicTransportDialError, e.msg, e)
|
||||
raise newException(QuicTransportDialError, "error in quic dial:" & e.msg, e)
|
||||
|
||||
method upgrade*(
|
||||
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
|
||||
|
||||
@@ -133,7 +133,9 @@ method start*(
|
||||
try:
|
||||
createStreamServer(ta, flags = self.flags)
|
||||
except common.TransportError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "transport error in TcpTransport start:" & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.servers &= server
|
||||
|
||||
@@ -250,9 +252,13 @@ method accept*(
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportOs error in accept:" & exc.msg, parent: exc
|
||||
)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportError in accept: " & exc.msg, parent: exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
cancelAcceptFuts()
|
||||
raise exc
|
||||
@@ -302,7 +308,8 @@ method dial*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref TcpTransportError)(msg: "TcpTransport dial error: " & exc.msg, parent: exc)
|
||||
|
||||
# If `stop` is called after `connect` but before `await` returns, we might
|
||||
# end up with a race condition where `stop` returns but not all connections
|
||||
@@ -318,7 +325,7 @@ method dial*(
|
||||
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
|
||||
except TransportOsError as exc:
|
||||
safeCloseWait(transp)
|
||||
raise (ref TcpTransportError)(msg: exc.msg)
|
||||
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
|
||||
|
||||
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import ../../crypto/crypto
|
||||
import ../../errors
|
||||
import ./certificate_ffi
|
||||
import ../../../libp2p/peerid
|
||||
import ../../utils/sequninit
|
||||
|
||||
logScope:
|
||||
topics = "libp2p tls certificate"
|
||||
@@ -98,7 +99,7 @@ func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
|
||||
##
|
||||
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
|
||||
let prefixLen = P2P_SIGNING_PREFIX.len.int
|
||||
let msg = newSeq[byte](prefixLen + pubKey.len)
|
||||
let msg = newSeqUninit[byte](prefixLen + pubKey.len)
|
||||
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
|
||||
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)
|
||||
|
||||
@@ -118,8 +119,8 @@ proc makeASN1Time(time: Time): string {.inline.} =
|
||||
try:
|
||||
let f = initTimeFormat("yyyyMMddhhmmss")
|
||||
format(time.utc(), f)
|
||||
except TimeFormatParseError:
|
||||
raiseAssert "time format is const and checked with test"
|
||||
except TimeFormatParseError as e:
|
||||
raiseAssert "time format is const and checked with test: " & e.msg
|
||||
|
||||
return str & "Z"
|
||||
|
||||
@@ -278,7 +279,7 @@ proc parse*(
|
||||
validTo = parseCertTime($certParsed.valid_to)
|
||||
except TimeParseError as e:
|
||||
raise newException(
|
||||
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
|
||||
CertificateParsingError, "Failed to parse certificate validity time: " & $e.msg, e
|
||||
)
|
||||
|
||||
P2pCertificate(
|
||||
|
||||
@@ -18,6 +18,7 @@ import
|
||||
transport,
|
||||
tcptransport,
|
||||
../switch,
|
||||
../autotls/service,
|
||||
../builders,
|
||||
../stream/[lpstream, connection, chronosstream],
|
||||
../multiaddress,
|
||||
@@ -243,7 +244,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeCloseWait(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in dial TorTransport: " & e.msg, e
|
||||
)
|
||||
|
||||
method start*(
|
||||
self: TorTransport, addrs: seq[MultiAddress]
|
||||
@@ -301,7 +304,7 @@ proc new*(
|
||||
flags: set[ServerFlags] = {},
|
||||
): TorSwitch {.raises: [LPError], public.} =
|
||||
var builder = SwitchBuilder.new().withRng(rng).withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
TorTransport.new(torServer, flags, upgr)
|
||||
)
|
||||
if addresses.len != 0:
|
||||
|
||||
@@ -160,7 +160,9 @@ method start*(
|
||||
else:
|
||||
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
|
||||
except CatchableError as exc:
|
||||
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref WsTransportError)(
|
||||
msg: "error in WsTransport start: " & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.httpservers &= httpserver
|
||||
|
||||
@@ -309,7 +311,9 @@ method accept*(
|
||||
debug "OS Error", description = exc.msg
|
||||
except CatchableError as exc:
|
||||
info "Unexpected error accepting connection", description = exc.msg
|
||||
raise newException(transport.TransportError, exc.msg, exc)
|
||||
raise newException(
|
||||
transport.TransportError, "Error in WsTransport accept: " & exc.msg, exc
|
||||
)
|
||||
|
||||
method dial*(
|
||||
self: WsTransport,
|
||||
@@ -338,7 +342,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeClose(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in WsTransport dial: " & e.msg, e
|
||||
)
|
||||
|
||||
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
|
||||
if procCall Transport(t).handles(address):
|
||||
|
||||
@@ -54,8 +54,9 @@ when defined(libp2p_agents_metrics):
|
||||
proc safeToLowerAscii*(s: string): Result[string, cstring] =
|
||||
try:
|
||||
ok(s.toLowerAscii())
|
||||
except CatchableError:
|
||||
err("toLowerAscii failed")
|
||||
except CatchableError as e:
|
||||
let errMsg = "toLowerAscii failed: " & e.msg
|
||||
err(errMsg.cstring)
|
||||
|
||||
const
|
||||
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"
|
||||
|
||||
28
libp2p/utils/bytesview.nim
Normal file
28
libp2p/utils/bytesview.nim
Normal file
@@ -0,0 +1,28 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
type BytesView* = object
|
||||
data: seq[byte]
|
||||
rpos: int
|
||||
|
||||
proc init*(t: typedesc[BytesView], data: sink seq[byte]): BytesView =
|
||||
BytesView(data: data, rpos: 0)
|
||||
|
||||
func len*(v: BytesView): int {.inline.} =
|
||||
v.data.len - v.rpos
|
||||
|
||||
func consume*(v: var BytesView, n: int) {.inline.} =
|
||||
doAssert v.data.len >= v.rpos + n
|
||||
v.rpos += n
|
||||
|
||||
template toOpenArray*(v: BytesView, b, e: int): openArray[byte] =
|
||||
v.data.toOpenArray(v.rpos + b, v.rpos + e - b)
|
||||
|
||||
template data*(v: BytesView): openArray[byte] =
|
||||
v.data.toOpenArray(v.rpos, v.data.len - 1)
|
||||
@@ -27,9 +27,9 @@ proc anyCompleted*[T](
|
||||
if raceFut.completed:
|
||||
return raceFut
|
||||
requests.del(requests.find(raceFut))
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
raise newException(
|
||||
AllFuturesFailedError, "None of the futures completed successfully"
|
||||
AllFuturesFailedError, "None of the futures completed successfully: " & e.msg, e
|
||||
)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
7
libp2p/utils/sequninit.nim
Normal file
7
libp2p/utils/sequninit.nim
Normal file
@@ -0,0 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
when not declared(newSeqUninit):
|
||||
# newSeqUninit template avoids deprecated errors
|
||||
# for newSeqUninitialized in nim > 2.2
|
||||
template newSeqUninit*[T: byte](len: Natural): seq[byte] =
|
||||
newSeqUninitialized[byte](len)
|
||||
85
libp2p/utils/zeroqueue.nim
Normal file
85
libp2p/utils/zeroqueue.nim
Normal file
@@ -0,0 +1,85 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import std/deques
|
||||
import ./sequninit
|
||||
|
||||
type Chunk = ref object
|
||||
data: seq[byte]
|
||||
size: int
|
||||
start: int
|
||||
|
||||
template clone(c: Chunk): Chunk =
|
||||
Chunk(data: c.data, size: c.size, start: c.start)
|
||||
|
||||
template newChunk(b: sink seq[byte]): Chunk =
|
||||
Chunk(data: b, size: b.len, start: 0)
|
||||
|
||||
template len(c: Chunk): int =
|
||||
c.size - c.start
|
||||
|
||||
type ZeroQueue* = object
|
||||
# ZeroQueue is queue structure optimized for efficient pushing and popping of
|
||||
# byte sequences `seq[byte]` (called chunks). This type is useful for streaming or buffering
|
||||
# scenarios where chunks of binary data are accumulated and consumed incrementally.
|
||||
chunks: Deque[Chunk]
|
||||
|
||||
proc clear*(q: var ZeroQueue) =
|
||||
q.chunks.clear()
|
||||
|
||||
proc isEmpty*(q: ZeroQueue): bool =
|
||||
return q.chunks.len() == 0
|
||||
|
||||
proc len*(q: ZeroQueue): int64 =
|
||||
var l: int64
|
||||
for b in q.chunks.items():
|
||||
l += b.len()
|
||||
return l
|
||||
|
||||
proc push*(q: var ZeroQueue, b: sink seq[byte]) =
|
||||
if b.len > 0:
|
||||
q.chunks.addLast(newChunk(b))
|
||||
|
||||
proc popChunk(q: var ZeroQueue, count: int): Chunk {.inline.} =
|
||||
var first = q.chunks.popFirst()
|
||||
|
||||
# first chunk has up to requested count elements,
|
||||
# queue will return this chunk (chunk might have less then requested)
|
||||
if first.len() <= count:
|
||||
return first
|
||||
|
||||
# first chunk has more elements then requested count,
|
||||
# queue will return view of first count elements, leaving the rest in the queue
|
||||
var ret = first.clone()
|
||||
ret.size = ret.start + count
|
||||
first.start += count
|
||||
q.chunks.addFirst(first)
|
||||
return ret
|
||||
|
||||
proc consumeTo*(q: var ZeroQueue, pbytes: pointer, nbytes: int): int =
|
||||
var consumed = 0
|
||||
while consumed < nbytes and not q.isEmpty():
|
||||
let chunk = q.popChunk(nbytes - consumed)
|
||||
let dest = cast[pointer](cast[ByteAddress](pbytes) + consumed)
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest, offsetPtr, chunk.len())
|
||||
consumed += chunk.len()
|
||||
|
||||
return consumed
|
||||
|
||||
proc popChunkSeq*(q: var ZeroQueue, count: int): seq[byte] =
|
||||
if q.isEmpty:
|
||||
return @[]
|
||||
|
||||
let chunk = q.popChunk(count)
|
||||
var dest = newSeqUninit[byte](chunk.len())
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest[0].addr, offsetPtr, chunk.len())
|
||||
|
||||
return dest
|
||||
@@ -12,6 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import varint, strutils
|
||||
import ./utils/sequninit
|
||||
|
||||
type VBuffer* = object
|
||||
buffer*: seq[byte]
|
||||
@@ -42,7 +43,7 @@ proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
|
||||
|
||||
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =
|
||||
## Initialize VBuffer with copy of ``data``.
|
||||
result.buffer = newSeqUninitialized[byte](len(data))
|
||||
result.buffer = newSeqUninit[byte](len(data))
|
||||
if len(data) > 0:
|
||||
copyMem(addr result.buffer[0], unsafeAddr data[0], len(data))
|
||||
result.offset = offset
|
||||
|
||||
@@ -108,7 +108,9 @@ proc createStreamServer*[T](
|
||||
): StreamServer {.raises: [LPError, MaInvalidAddress].} =
|
||||
## Create new TCP stream server which bounds to ``ma`` address.
|
||||
if not (RTRANSPMA.match(ma)):
|
||||
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
|
||||
raise newException(
|
||||
MaInvalidAddress, "Incorrect or unsupported address in createStreamServer"
|
||||
)
|
||||
|
||||
try:
|
||||
return createStreamServer(
|
||||
@@ -123,7 +125,7 @@ proc createStreamServer*[T](
|
||||
init,
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createStreamServer*[T](
|
||||
ma: MultiAddress,
|
||||
@@ -146,7 +148,7 @@ proc createStreamServer*[T](
|
||||
initTAddress(ma).tryGet(), flags, udata, sock, backlog, bufferSize, child, init
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
|
||||
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
|
||||
@@ -178,7 +180,9 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPErro
|
||||
try:
|
||||
createAsyncSocket(address.getDomain(), socktype, protocol)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(
|
||||
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
|
||||
)
|
||||
|
||||
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
|
||||
## Bind socket ``sock`` to MultiAddress ``ma``.
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import helpers
|
||||
import ../libp2p
|
||||
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
||||
import
|
||||
../libp2p/
|
||||
[autotls/service, daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
||||
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
|
||||
|
||||
type
|
||||
SwitchCreator = proc(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov: TransportProvider = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.gcsafe, raises: [LPError].}
|
||||
@@ -76,7 +80,7 @@ proc testPubSubDaemonPublish(
|
||||
|
||||
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
|
||||
|
||||
proc pubsubHandler(
|
||||
@@ -86,12 +90,12 @@ proc testPubSubDaemonPublish(
|
||||
|
||||
asyncDiscard daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
|
||||
pubsub.subscribe(testTopic, nativeHandler)
|
||||
await sleepAsync(5.seconds)
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
proc publisher() {.async.} =
|
||||
while not finished:
|
||||
await daemonNode.pubsubPublish(testTopic, msgData)
|
||||
await sleepAsync(500.millis)
|
||||
await sleepAsync(250.millis)
|
||||
|
||||
await wait(publisher(), 5.minutes) # should be plenty of time
|
||||
|
||||
@@ -128,7 +132,7 @@ proc testPubSubNodePublish(
|
||||
|
||||
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
|
||||
|
||||
var times = 0
|
||||
@@ -148,12 +152,12 @@ proc testPubSubNodePublish(
|
||||
discard
|
||||
|
||||
pubsub.subscribe(testTopic, nativeHandler)
|
||||
await sleepAsync(5.seconds)
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
proc publisher() {.async.} =
|
||||
while not finished:
|
||||
discard await pubsub.publish(testTopic, msgData)
|
||||
await sleepAsync(500.millis)
|
||||
await sleepAsync(250.millis)
|
||||
|
||||
await wait(publisher(), 5.minutes) # should be plenty of time
|
||||
|
||||
@@ -206,7 +210,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "native -> daemon connection":
|
||||
var protos = @["/test-stream"]
|
||||
@@ -288,7 +292,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await stream.close()
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "native -> daemon websocket connection":
|
||||
var protos = @["/test-stream"]
|
||||
@@ -318,7 +322,9 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
|
||||
let nativeNode = swCreator(
|
||||
ma = wsAddress,
|
||||
prov = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
WsTransport.new(upgr),
|
||||
)
|
||||
|
||||
@@ -337,7 +343,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await stream.close()
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "daemon -> native websocket connection":
|
||||
var protos = @["/test-stream"]
|
||||
@@ -357,10 +363,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
.withAddress(wsAddress)
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
.withWsTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
|
||||
testBufferStream.initStream()
|
||||
testBufferStream
|
||||
|
||||
macro checkUntilCustomTimeout*(
|
||||
macro checkUntilTimeoutCustom*(
|
||||
timeout: Duration, sleepInterval: Duration, code: untyped
|
||||
): untyped =
|
||||
## Periodically checks a given condition until it is true or a timeout occurs.
|
||||
@@ -105,17 +105,17 @@ macro checkUntilCustomTimeout*(
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
|
||||
## asyncTest "checkUntilTimeoutCustom should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(2.seconds):
|
||||
## checkUntilTimeoutCustom(2.seconds):
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
|
||||
## asyncTest "checkUntilTimeoutCustom should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(5.seconds)::
|
||||
## checkUntilTimeoutCustom(5.seconds)::
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
@@ -154,7 +154,7 @@ macro checkUntilCustomTimeout*(
|
||||
await checkExpiringInternal()
|
||||
|
||||
macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
|
||||
## Same as `checkUntilTimeoutCustom` but with a default timeout of 2s with 50ms interval.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
@@ -175,7 +175,7 @@ macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## b == 1
|
||||
## ```
|
||||
result = quote:
|
||||
checkUntilCustomTimeout(10.seconds, 100.milliseconds, `code`)
|
||||
checkUntilTimeoutCustom(2.seconds, 50.milliseconds, `code`)
|
||||
|
||||
proc unorderedCompare*[T](a, b: seq[T]): bool =
|
||||
if a == b:
|
||||
|
||||
142
tests/kademlia/testencoding.nim
Normal file
142
tests/kademlia/testencoding.nim
Normal file
@@ -0,0 +1,142 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest2
|
||||
import ../../libp2p/protobuf/minprotobuf
|
||||
import ../../libp2p/protocols/kademlia/protobuf
|
||||
import ../../libp2p/multiaddress
|
||||
import options
|
||||
import results
|
||||
|
||||
suite "kademlia protobuffers":
|
||||
const invalidType = uint32(999)
|
||||
|
||||
proc valFromResultOption[T](res: ProtoResult[Option[T]]): T =
|
||||
assert res.isOk()
|
||||
assert res.value().isSome()
|
||||
return res.value().unsafeGet()
|
||||
|
||||
test "record encode/decode":
|
||||
let rec = Record(
|
||||
key: some(@[1'u8, 2, 3]),
|
||||
value: some(@[4'u8, 5, 6]),
|
||||
timeReceived: some("2025-05-12T12:00:00Z"),
|
||||
)
|
||||
let encoded = rec.encode()
|
||||
let decoded = Record.decode(encoded).valFromResultOption
|
||||
check:
|
||||
decoded.key.get() == rec.key.get()
|
||||
decoded.value.get() == rec.value.get()
|
||||
decoded.timeReceived.get() == rec.timeReceived.get()
|
||||
|
||||
test "peer encode/decode":
|
||||
let maddr = MultiAddress.init("/ip4/127.0.0.1/tcp/9000").tryGet()
|
||||
let peer =
|
||||
Peer(id: @[1'u8, 2, 3], addrs: @[maddr], connection: ConnectionType.connected)
|
||||
let encoded = peer.encode()
|
||||
var decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
|
||||
check:
|
||||
decoded == peer
|
||||
|
||||
test "message encode/decode roundtrip":
|
||||
let maddr = MultiAddress.init("/ip4/10.0.0.1/tcp/4001").tryGet()
|
||||
let peer = Peer(id: @[9'u8], addrs: @[maddr], connection: canConnect)
|
||||
let r = Record(key: some(@[1'u8]), value: some(@[2'u8]), timeReceived: some("t"))
|
||||
let msg = Message(
|
||||
msgType: MessageType.findNode,
|
||||
key: some(@[7'u8]),
|
||||
record: some(r),
|
||||
closerPeers: @[peer],
|
||||
providerPeers: @[peer],
|
||||
)
|
||||
let encoded = msg.encode()
|
||||
let decoded = Message.decode(encoded.buffer).valFromResultOption
|
||||
check:
|
||||
decoded == msg
|
||||
|
||||
test "decode record with missing fields":
|
||||
var pb = initProtoBuffer()
|
||||
# no fields written
|
||||
let rec = Record.decode(pb).valFromResultOption
|
||||
check:
|
||||
rec.key.isNone()
|
||||
rec.value.isNone()
|
||||
rec.timeReceived.isNone()
|
||||
|
||||
test "decode peer with missing id (invalid)":
|
||||
var pb = initProtoBuffer()
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
|
||||
test "decode peer with invalid connection type":
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, @[1'u8, 2, 3]) # id field
|
||||
pb.write(3, invalidType) # bogus connection type
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
|
||||
test "decode message with invalid msgType":
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, invalidType) # invalid MessageType
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with invalid peer in closerPeers":
|
||||
let badPeerBuf = @[0'u8, 1, 2] # junk
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(8, badPeerBuf) # closerPeers field
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with invalid embedded record":
|
||||
# encode junk data into field 3 (record)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, uint32(MessageType.putValue)) # valid msgType
|
||||
pb.write(3, @[0x00'u8, 0xFF, 0xAB]) # broken protobuf for record
|
||||
check:
|
||||
Message.decode(pb.buffer).isErr()
|
||||
|
||||
test "decode message with empty embedded record":
|
||||
var recordPb = initProtoBuffer() # no fields
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, uint32(MessageType.getValue))
|
||||
pb.write(3, recordPb.buffer)
|
||||
let decoded = Message.decode(pb.buffer).valFromResultOption
|
||||
check:
|
||||
decoded.record.isSome()
|
||||
decoded.record.get().key.isNone()
|
||||
|
||||
test "peer with empty addr list and no connection":
|
||||
let peer = Peer(id: @[0x42'u8], addrs: @[], connection: ConnectionType.notConnected)
|
||||
let encoded = peer.encode()
|
||||
let decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
|
||||
check:
|
||||
decoded == peer
|
||||
|
||||
test "message with empty closer/provider peers":
|
||||
let msg = Message(
|
||||
msgType: MessageType.ping,
|
||||
key: none[seq[byte]](),
|
||||
record: none[Record](),
|
||||
closerPeers: @[],
|
||||
providerPeers: @[],
|
||||
)
|
||||
let encoded = msg.encode()
|
||||
let decoded = Message.decode(encoded.buffer).valFromResultOption
|
||||
check:
|
||||
decoded == msg
|
||||
|
||||
test "peer with addr but missing id":
|
||||
var pb = initProtoBuffer()
|
||||
let maddr = MultiAddress.init("/ip4/1.2.3.4/tcp/1234").tryGet()
|
||||
pb.write(2, maddr.data.buffer)
|
||||
check:
|
||||
Peer.decode(pb).isErr()
|
||||
83
tests/kademlia/testroutingtable.nim
Normal file
83
tests/kademlia/testroutingtable.nim
Normal file
@@ -0,0 +1,83 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest
|
||||
import chronos
|
||||
import ../../libp2p/crypto/crypto
|
||||
import ../../libp2p/protocols/kademlia/[routingtable, consts, keys]
|
||||
|
||||
proc testKey*(x: byte): Key =
|
||||
var buf: array[IdLength, byte]
|
||||
buf[31] = x
|
||||
return Key(kind: KeyType.Unhashed, data: buf)
|
||||
|
||||
let rng = crypto.newRng()
|
||||
|
||||
suite "routing table":
|
||||
test "inserts single key in correct bucket":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let other = testKey(0b10000000)
|
||||
discard rt.insert(other)
|
||||
|
||||
let idx = bucketIndex(selfId, other)
|
||||
check:
|
||||
rt.buckets.len > idx
|
||||
rt.buckets[idx].peers.len == 1
|
||||
rt.buckets[idx].peers[0].nodeId == other
|
||||
|
||||
test "does not insert beyond capacity":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let targetBucket = 6
|
||||
for _ in 0 ..< k + 5:
|
||||
var kid = randomKeyInBucketRange(selfId, targetBucket, rng)
|
||||
kid.kind = KeyType.Unhashed
|
||||
# Overriding so we don't use sha for comparing xor distances
|
||||
discard rt.insert(kid)
|
||||
|
||||
check targetBucket < rt.buckets.len
|
||||
let bucket = rt.buckets[targetBucket]
|
||||
check bucket.peers.len <= k
|
||||
|
||||
test "findClosest returns sorted keys":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let ids = @[testKey(1), testKey(2), testKey(3), testKey(4), testKey(5)]
|
||||
for id in ids:
|
||||
discard rt.insert(id)
|
||||
|
||||
let res = rt.findClosest(testKey(1), 3)
|
||||
|
||||
check:
|
||||
res.len == 3
|
||||
res == @[testKey(1), testKey(3), testKey(2)]
|
||||
|
||||
test "isStale returns true for empty or old keys":
|
||||
var bucket: Bucket
|
||||
check isStale(bucket) == true
|
||||
|
||||
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now() - 40.minutes)]
|
||||
check isStale(bucket) == true
|
||||
|
||||
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now())]
|
||||
check isStale(bucket) == false
|
||||
|
||||
test "randomKeyInBucketRange returns id at correct distance":
|
||||
let selfId = testKey(0)
|
||||
let targetBucket = 3
|
||||
var rid = randomKeyInBucketRange(selfId, targetBucket, rng)
|
||||
rid.kind = KeyType.Unhashed
|
||||
# Overriding so we don't use sha for comparing xor distances
|
||||
let idx = bucketIndex(selfId, rid)
|
||||
check:
|
||||
idx == targetBucket
|
||||
rid != selfId
|
||||
54
tests/kademlia/testxordistance.nim
Normal file
54
tests/kademlia/testxordistance.nim
Normal file
@@ -0,0 +1,54 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest
|
||||
import chronos
|
||||
import ../../libp2p/protocols/kademlia/[consts, keys, xordistance]
|
||||
|
||||
suite "xor distance":
|
||||
test "countLeadingZeroBits works":
|
||||
check countLeadingZeroBits(0b00000000'u8) == 8
|
||||
check countLeadingZeroBits(0b10000000'u8) == 0
|
||||
check countLeadingZeroBits(0b01000000'u8) == 1
|
||||
check countLeadingZeroBits(0b00000001'u8) == 7
|
||||
|
||||
test "leadingZeros of xor distance":
|
||||
var d: XorDistance
|
||||
for i in 0 ..< IdLength:
|
||||
d[i] = 0
|
||||
check leadingZeros(d) == IdLength * 8
|
||||
|
||||
d[0] = 0b00010000
|
||||
check leadingZeros(d) == 3
|
||||
|
||||
d[0] = 0
|
||||
d[1] = 0b00100000
|
||||
check leadingZeros(d) == 10
|
||||
|
||||
test "xorDistance of identical keys is zero":
|
||||
let k = @[
|
||||
1'u8, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 9, 0, 1, 2,
|
||||
].toKey()
|
||||
let dist = xorDistance(k, k)
|
||||
check:
|
||||
leadingZeros(dist) == IdLength * 8
|
||||
dist == default(XorDistance)
|
||||
|
||||
test "cmp gives correct order":
|
||||
var a: XorDistance
|
||||
var b: XorDistance
|
||||
a[0] = 0x01
|
||||
b[0] = 0x02
|
||||
check a < b
|
||||
check cmp(a, b) == -1
|
||||
check cmp(b, a) == 1
|
||||
check cmp(a, a) == 0
|
||||
@@ -12,8 +12,8 @@
|
||||
import sequtils, tables, sets
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
../utils,
|
||||
../../../libp2p/[
|
||||
switch,
|
||||
stream/connection,
|
||||
crypto/crypto,
|
||||
@@ -23,9 +23,9 @@ import
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
]
|
||||
import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
import ../../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
import ../../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
@@ -38,7 +38,7 @@ proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
dec ceil
|
||||
doAssert(ceil > 0, "waitSub timeout!")
|
||||
|
||||
suite "FloodSub":
|
||||
suite "FloodSub Integration":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -310,5 +310,5 @@ suite "FloodSub":
|
||||
|
||||
check (await bigNode1[0].publish("foo", bigMessage)) > 0
|
||||
|
||||
checkUntilTimeout:
|
||||
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
|
||||
messageReceived == 1
|
||||
@@ -1,105 +1,15 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Control Messages":
|
||||
suite "GossipSub Integration - Control Messages":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
@@ -148,9 +58,8 @@ suite "GossipSub Control Messages":
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 1), PeerTableType.Mesh
|
||||
)
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len == 1)
|
||||
|
||||
# Then the peers are GRAFTed
|
||||
check:
|
||||
@@ -477,38 +386,3 @@ suite "GossipSub Control Messages":
|
||||
# Then IDONTWANT is sent to B on publish
|
||||
checkUntilTimeout:
|
||||
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
|
||||
|
||||
asyncTest "IDONTWANT is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
|
||||
let
|
||||
topic = "foobar"
|
||||
nodeA = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeB = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeC = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_11)
|
||||
.toGossipSub()[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await connectNodes(nodeA, nodeB)
|
||||
await connectNodes(nodeB, nodeC)
|
||||
|
||||
let (bFinished, handlerB) = createCompleteHandler()
|
||||
|
||||
nodeA.subscribe(topic, voidTopicHandler)
|
||||
nodeB.subscribe(topic, handlerB)
|
||||
nodeC.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], topic)
|
||||
|
||||
check:
|
||||
nodeC.mesh.peers(topic) == 1
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodeA.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
discard await bFinished
|
||||
|
||||
# Then B doesn't send IDONTWANT to both A and C (because C.gossipSubVersion == GossipSubCodec_11)
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
99
tests/pubsub/integration/testgossipsubcustomconn.nim
Normal file
99
tests/pubsub/integration/testgossipsubcustomconn.nim
Normal file
@@ -0,0 +1,99 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../../libp2p/stream/connection
|
||||
import ../../helpers
|
||||
|
||||
type DummyConnection* = ref object of Connection
|
||||
|
||||
method write*(
|
||||
self: DummyConnection, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
return fut
|
||||
|
||||
proc new*(T: typedesc[DummyConnection]): DummyConnection =
|
||||
let instance = T()
|
||||
instance
|
||||
|
||||
suite "GossipSub Integration - Custom Connection Support":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
|
||||
let
|
||||
topic = "test"
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
var
|
||||
customConnCreated = false
|
||||
peerSelectionCalled = false
|
||||
|
||||
nodes[0].customConnCallbacks = some(
|
||||
CustomConnectionCallbacks(
|
||||
customConnCreationCB: proc(
|
||||
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
|
||||
): Connection =
|
||||
customConnCreated = true
|
||||
return DummyConnection.new(),
|
||||
customPeerSelectionCB: proc(
|
||||
allPeers: HashSet[PubSubPeer],
|
||||
directPeers: HashSet[PubSubPeer],
|
||||
meshPeers: HashSet[PubSubPeer],
|
||||
fanoutPeers: HashSet[PubSubPeer],
|
||||
): HashSet[PubSubPeer] =
|
||||
peerSelectionCalled = true
|
||||
return allPeers,
|
||||
)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
tryPublish await nodes[0].publish(
|
||||
topic, "hello".toBytes(), publishParams = some(PublishParams(useCustomConn: true))
|
||||
), 1
|
||||
|
||||
check:
|
||||
peerSelectionCalled
|
||||
customConnCreated
|
||||
|
||||
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
|
||||
let
|
||||
topic = "test"
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard await nodes[0].publish(
|
||||
topic,
|
||||
"hello".toBytes(),
|
||||
publishParams = some(PublishParams(useCustomConn: true)),
|
||||
)
|
||||
except Defect:
|
||||
raised = true
|
||||
|
||||
check raised
|
||||
@@ -9,66 +9,18 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
suite "GossipSub Integration - Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
asyncTest "GossipSub send over fanout A -> B":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
@@ -107,7 +59,7 @@ suite "GossipSub Fanout Management":
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
asyncTest "GossipSub send over fanout A -> B for subscribed topic":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
@@ -12,129 +12,15 @@
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
suite "GossipSub Integration - Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
@@ -154,7 +40,7 @@ suite "GossipSub Gossip Protocol":
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilCustomTimeout(500.milliseconds, 20.milliseconds):
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.gossipsub.getOrDefault(topic).len == numberOfNodes - 1)
|
||||
|
||||
# When node 0 sends a message
|
||||
@@ -162,7 +48,7 @@ suite "GossipSub Gossip Protocol":
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
checkUntilCustomTimeout(500.milliseconds, 20.milliseconds):
|
||||
checkUntilTimeout:
|
||||
messages[].mapIt(it[].len).anyIt(it > 0)
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
@@ -193,7 +79,7 @@ suite "GossipSub Gossip Protocol":
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat()
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
@@ -209,8 +95,12 @@ suite "GossipSub Gossip Protocol":
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
|
||||
)
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.5),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
@@ -223,10 +113,12 @@ suite "GossipSub Gossip Protocol":
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
@@ -243,11 +135,12 @@ suite "GossipSub Gossip Protocol":
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
@@ -260,10 +153,12 @@ suite "GossipSub Gossip Protocol":
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
@@ -276,7 +171,7 @@ suite "GossipSub Gossip Protocol":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
@@ -289,21 +184,23 @@ suite "GossipSub Gossip Protocol":
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == 1
|
||||
nodes[1].gossipsub.getOrDefault(topic).len == 2
|
||||
nodes[2].gossipsub.getOrDefault(topic).len == 1
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
check (await nodes[0].publish(topic, largeMsg)) == 1
|
||||
await waitForHeartbeat()
|
||||
tryPublish await nodes[0].publish(topic, largeMsg), 1
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
let receivedIDontWants = messages[].mapIt(it[].len)
|
||||
check:
|
||||
receivedIDontWants[0] == 0
|
||||
receivedIDontWants[1] == 0
|
||||
receivedIDontWants[2] == 1
|
||||
checkUntilTimeout:
|
||||
messages[].mapIt(it[].len)[2] == 1
|
||||
messages[].mapIt(it[].len)[1] == 0
|
||||
messages[].mapIt(it[].len)[0] == 0
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
asyncTest "GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
@@ -354,25 +251,3 @@ suite "GossipSub Gossip Protocol":
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
let
|
||||
topic = "foobar"
|
||||
node0 = generateNodes(1, gossip = true)[0]
|
||||
node1 = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_10)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await connectNodes(node0, node1)
|
||||
|
||||
node0.subscribe(topic, voidTopicHandler)
|
||||
node1.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[node0, node1], topic)
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
@@ -0,0 +1,91 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import chronicles
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Compatibility":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Protocol negotiation selects highest common version":
|
||||
let
|
||||
node0 = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
codecs = @[GossipSubCodec_12, GossipSubCodec_11, GossipSubCodec_10],
|
||||
# Order from highest to lowest version is required because
|
||||
# multistream protocol negotiation selects the first protocol
|
||||
# in the dialer's list that both peers support
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
node1 = generateNodes(
|
||||
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
node2 =
|
||||
generateNodes(1, gossip = true, codecs = @[GossipSubCodec_10]).toGossipSub()[0]
|
||||
nodes = @[node0, node1, node2]
|
||||
node0PeerId = node0.peerInfo.peerId
|
||||
node1PeerId = node1.peerInfo.peerId
|
||||
node2PeerId = node2.peerInfo.peerId
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_11
|
||||
node0.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
|
||||
|
||||
node1.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_11
|
||||
node1.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
|
||||
|
||||
node2.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_10
|
||||
node2.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_10
|
||||
|
||||
asyncTest "IDONTWANT is sent only for GossipSubCodec_12":
|
||||
# 4 nodes: nodeCenter in the center connected to the rest
|
||||
var nodes = generateNodes(3, gossip = true).toGossipSub()
|
||||
let
|
||||
nodeCenter = nodes[0]
|
||||
nodeSender = nodes[1]
|
||||
nodeCodec12 = nodes[2]
|
||||
nodeCodec11 = generateNodes(
|
||||
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
|
||||
nodes &= nodeCodec11
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodeCenter, nodeSender)
|
||||
await connectNodes(nodeCenter, nodeCodec12)
|
||||
await connectNodes(nodeCenter, nodeCodec11)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodeSender.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
# Then nodeCenter sends IDONTWANT only to nodeCodec12 (because nodeCodec11.codec == GossipSubCodec_11)
|
||||
checkUntilTimeout:
|
||||
nodeCodec12.mesh.getOrDefault(topic).toSeq()[0].iDontWants.anyIt(it.len == 1)
|
||||
nodeCodec11.mesh.getOrDefault(topic).toSeq()[0].iDontWants.allIt(it.len == 0)
|
||||
@@ -1,18 +1,14 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Heartbeat":
|
||||
suite "GossipSub Integration - Heartbeat":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
const
|
||||
timeout = 1.seconds
|
||||
interval = 50.milliseconds
|
||||
|
||||
asyncTest "Mesh is rebalanced during heartbeat - pruning peers":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
@@ -32,7 +28,7 @@ suite "GossipSub Heartbeat":
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When DValues of Node0 are updated to lower than defaults
|
||||
@@ -52,7 +48,7 @@ suite "GossipSub Heartbeat":
|
||||
node0.parameters.applyDValues(newDValues)
|
||||
|
||||
# Then mesh of Node0 is rebalanced and peers are pruned to adapt to new values
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.mesh[topic].len >= newDLow and node0.mesh[topic].len <= newDHigh
|
||||
|
||||
asyncTest "Mesh is rebalanced during heartbeat - grafting new peers":
|
||||
@@ -82,7 +78,7 @@ suite "GossipSub Heartbeat":
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len >= dLow and
|
||||
node0.mesh.getOrDefault(topic).len <= dHigh
|
||||
|
||||
@@ -90,7 +86,7 @@ suite "GossipSub Heartbeat":
|
||||
let peersToDisconnect = node0.mesh[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
|
||||
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
|
||||
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.mesh[topic].len >= dLow and node0.mesh[topic].len <= dHigh
|
||||
node0.mesh[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
|
||||
|
||||
@@ -148,9 +144,8 @@ suite "GossipSub Heartbeat":
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
let actualGrafts = node0.mesh[topic].toSeq().filterIt(it notin startingMesh)
|
||||
const maxOpportunisticGraftsPerHeartbeat = 2
|
||||
check:
|
||||
actualGrafts.len == maxOpportunisticGraftsPerHeartbeat
|
||||
actualGrafts.len == MaxOpportunisticGraftPeers
|
||||
actualGrafts.allIt(it in expectedGrafts)
|
||||
|
||||
asyncTest "Fanout maintenance during heartbeat - expired peers are dropped":
|
||||
@@ -174,17 +169,20 @@ suite "GossipSub Heartbeat":
|
||||
node.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# When Node0 sends a message to the topic
|
||||
let node0 = nodes[0]
|
||||
checkUntilTimeout:
|
||||
node0.gossipsub.hasKey(topic)
|
||||
|
||||
# When Node0 sends a message to the topic
|
||||
tryPublish await node0.publish(topic, newSeq[byte](10000)), 3
|
||||
|
||||
# Then Node0 fanout peers are populated
|
||||
let maxFanoutPeers = node0.parameters.d
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
node0.fanout.hasKey(topic) and node0.fanout[topic].len == maxFanoutPeers
|
||||
checkUntilTimeout:
|
||||
node0.fanout.hasKey(topic)
|
||||
node0.fanout[topic].len > 0
|
||||
|
||||
# And after heartbeat Node0 fanout peers are dropped (because fanoutTTL < heartbeatInterval)
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
not node0.fanout.hasKey(topic)
|
||||
|
||||
asyncTest "Fanout maintenance during heartbeat - fanout peers are replenished":
|
||||
@@ -212,7 +210,7 @@ suite "GossipSub Heartbeat":
|
||||
|
||||
# Then Node0 fanout peers are populated
|
||||
let maxFanoutPeers = node0.parameters.d
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.fanout[topic].len == maxFanoutPeers
|
||||
|
||||
# When all peers but first one of Node0 fanout are disconnected
|
||||
@@ -222,7 +220,7 @@ suite "GossipSub Heartbeat":
|
||||
# Then Node0 fanout peers are replenished during heartbeat
|
||||
# expecting 10[numberOfNodes] - 1[Node0] - (6[maxFanoutPeers] - 1[first peer not disconnected]) = 4
|
||||
let expectedLen = numberOfNodes - 1 - (maxFanoutPeers - 1)
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
node0.fanout[topic].len == expectedLen
|
||||
node0.fanout[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
|
||||
|
||||
@@ -250,22 +248,22 @@ suite "GossipSub Heartbeat":
|
||||
let peer = nodes[1].mesh[topic].toSeq()[0]
|
||||
|
||||
# Wait for history to populate
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants.len == historyLength
|
||||
|
||||
# When Node0 sends 5 messages to the topic
|
||||
# When Node0 sends 5 messages to the topic
|
||||
const msgCount = 5
|
||||
for i in 0 ..< msgCount:
|
||||
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# Then Node1 receives 5 iDontWant messages from Node0
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeoutCustom(3.seconds, 50.milliseconds):
|
||||
peer.iDontWants[0].len == msgCount
|
||||
|
||||
for i in 0 ..< historyLength:
|
||||
# When heartbeat happens
|
||||
# And history moves (new element added at start, last element pruned)
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants[i].len == 0
|
||||
|
||||
# Then iDontWant messages are moved to the next element
|
||||
@@ -275,7 +273,7 @@ suite "GossipSub Heartbeat":
|
||||
expectedHistory[nextIndex] = msgCount
|
||||
|
||||
# Until they reach last element and are pruned
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
checkUntilTimeout:
|
||||
peer.iDontWants.mapIt(it.len) == expectedHistory
|
||||
|
||||
asyncTest "sentIHaves history - last element is pruned during heartbeat":
|
||||
@@ -286,6 +284,7 @@ suite "GossipSub Heartbeat":
|
||||
topic = "foobar"
|
||||
heartbeatInterval = 200.milliseconds
|
||||
historyLength = 3
|
||||
gossipThreshold = -100.0
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
@@ -293,6 +292,7 @@ suite "GossipSub Heartbeat":
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
heartbeatInterval = heartbeatInterval,
|
||||
gossipThreshold = gossipThreshold,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
@@ -304,25 +304,45 @@ suite "GossipSub Heartbeat":
|
||||
await waitForHeartbeat(heartbeatInterval)
|
||||
|
||||
# Find Peer outside of mesh to which Node 0 will send IHave
|
||||
let peer =
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
|
||||
# Wait for history to populate
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
peer.sentIHaves.len == historyLength
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves.len == historyLength
|
||||
|
||||
# When Node0 sends a messages to the topic
|
||||
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
|
||||
# When a nodeOutsideMesh receives an IHave message, it responds with an IWant to request the full message from Node0
|
||||
# Setting `peer.score < gossipThreshold` to prevent the nodeOutsideMesh from sending the IWant
|
||||
# As when IWant is processed, messages are removed from sentIHaves history
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
for p in nodeOutsideMesh.gossipsub[topic].toSeq():
|
||||
p.score = 2 * gossipThreshold
|
||||
|
||||
# When NodeInsideMesh sends a messages to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# When next heartbeat occurs
|
||||
# Then IHave is sent and sentIHaves is populated
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
peer.sentIHaves[^1].len == 1
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves[0].len == 1
|
||||
|
||||
# Need to clear mCache as node would keep populating sentIHaves
|
||||
# Need to clear mCache as node would keep populating sentIHaves until cache is shifted enough times
|
||||
nodes[0].clearMCache()
|
||||
|
||||
# When next heartbeat occurs
|
||||
# Then last element of sentIHaves history is pruned
|
||||
checkUntilCustomTimeout(timeout, interval):
|
||||
peer.sentIHaves[^1].len == 0
|
||||
for i in 0 ..< historyLength:
|
||||
# When heartbeat happens
|
||||
# And history moves (new element added at start, last element pruned)
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves[i].len == 0
|
||||
|
||||
# Then sentIHaves messages are moved to the next element
|
||||
var expectedHistory = newSeqWith(historyLength, 0)
|
||||
let nextIndex = i + 1
|
||||
if nextIndex < historyLength:
|
||||
expectedHistory[nextIndex] = 1
|
||||
|
||||
# Until they reach last element and are pruned
|
||||
checkUntilTimeout:
|
||||
peerOutsideMesh.sentIHaves.mapIt(it.len) == expectedHistory
|
||||
347
tests/pubsub/integration/testgossipsubmeshmanagement.nim
Normal file
347
tests/pubsub/integration/testgossipsubmeshmanagement.nim
Normal file
@@ -0,0 +1,347 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronicles
|
||||
import std/[sequtils]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let node = nodes[i]
|
||||
checkUntilTimeout:
|
||||
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.mesh.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes > dHigh":
|
||||
let
|
||||
numberOfNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberOfNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let node = nodes[i]
|
||||
checkUntilTimeout:
|
||||
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
|
||||
node.mesh.getOrDefault(topic).len >= dLow and
|
||||
node.mesh.getOrDefault(topic).len <= dHigh
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
let
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.topics
|
||||
"foobar" in gossip2.topics
|
||||
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "mesh and gossipsub updated when topic subscribed and unsubscribed":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When all of them are connected and subscribed to the same topic
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then mesh and gossipsub should be populated
|
||||
for node in nodes:
|
||||
check node.topics.contains(topic)
|
||||
check node.gossipsub.hasKey(topic)
|
||||
check node.gossipsub[topic].len() == numberOfNodes - 1
|
||||
check node.mesh.hasKey(topic)
|
||||
check node.mesh[topic].len() == numberOfNodes - 1
|
||||
|
||||
# When all nodes unsubscribe from the topic
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the topic should be removed from mesh and gossipsub
|
||||
for node in nodes:
|
||||
check topic notin node.topics
|
||||
check topic notin node.mesh
|
||||
check topic notin node.gossipsub
|
||||
|
||||
asyncTest "handle subscribe and unsubscribe for multiple topics":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topics = @["foobar1", "foobar2", "foobar3"]
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# When nodes subscribe to multiple topics
|
||||
await connectNodesStar(nodes)
|
||||
for topic in topics:
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then all nodes should be subscribed to the topics initially
|
||||
for i in 0 ..< topics.len:
|
||||
let topic = topics[i]
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.topics.contains(topic))
|
||||
nodes.allIt(it.gossipsub.getOrDefault(topic).len() == numberOfNodes - 1)
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len() == numberOfNodes - 1)
|
||||
|
||||
# When they unsubscribe from all topics
|
||||
for topic in topics:
|
||||
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
# Then topics should be removed from mesh and gossipsub
|
||||
for i in 0 ..< topics.len:
|
||||
let topic = topics[i]
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(not it.topics.contains(topic))
|
||||
nodes.allIt(topic notin it.gossipsub)
|
||||
nodes.allIt(topic notin it.mesh)
|
||||
|
||||
asyncTest "Unsubscribe backoff":
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
unsubscribeBackoff = 1.seconds # 1s is the minimum
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, unsubscribeBackoff = unsubscribeBackoff
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
nodes[0].mesh[topic].len == numberOfNodes - 1
|
||||
|
||||
# When Node0 unsubscribes from the topic
|
||||
nodes[0].unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
# And subscribes back straight away
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then its mesh is pruned and peers have applied unsubscribeBackoff
|
||||
# Waiting more than one heartbeat (60ms) and less than unsubscribeBackoff (1s)
|
||||
await sleepAsync(unsubscribeBackoff.div(2))
|
||||
check:
|
||||
not nodes[0].mesh.hasKey(topic)
|
||||
|
||||
# When unsubscribeBackoff period is done
|
||||
await sleepAsync(unsubscribeBackoff)
|
||||
|
||||
# Then on the next heartbeat mesh is rebalanced and peers are regrafted
|
||||
check:
|
||||
nodes[0].mesh[topic].len == numberOfNodes - 1
|
||||
|
||||
asyncTest "Prune backoff":
|
||||
const
|
||||
numberOfNodes = 9
|
||||
topic = "foobar"
|
||||
pruneBackoff = 1.seconds # 1s is the minimum
|
||||
dValues = some(
|
||||
DValues(
|
||||
dLow: some(6),
|
||||
dHigh: some(8),
|
||||
d: some(6),
|
||||
dLazy: some(6),
|
||||
dScore: some(4),
|
||||
dOut: some(2),
|
||||
)
|
||||
)
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = dValues, pruneBackoff = pruneBackoff
|
||||
)
|
||||
.toGossipSub()
|
||||
node0 = nodes[0]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes are connected to Node0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(node0, nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
|
||||
# When DValues of Node0 are updated to lower than initial dValues
|
||||
const newDValues = some(
|
||||
DValues(
|
||||
dLow: some(2),
|
||||
dHigh: some(4),
|
||||
d: some(3),
|
||||
dLazy: some(3),
|
||||
dScore: some(2),
|
||||
dOut: some(2),
|
||||
)
|
||||
)
|
||||
node0.parameters.applyDValues(newDValues)
|
||||
|
||||
# Then Node0 mesh is pruned to newDValues.dHigh length
|
||||
# And pruned peers have applied pruneBackoff
|
||||
checkUntilTimeout:
|
||||
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
|
||||
|
||||
# When DValues of Node0 are updated back to the initial dValues
|
||||
node0.parameters.applyDValues(dValues)
|
||||
|
||||
# Waiting more than one heartbeat (60ms) and less than pruneBackoff (1s)
|
||||
await sleepAsync(pruneBackoff.div(2))
|
||||
check:
|
||||
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
|
||||
|
||||
# When pruneBackoff period is done
|
||||
await sleepAsync(pruneBackoff)
|
||||
|
||||
# Then on the next heartbeat mesh is rebalanced and peers are regrafted to the initial d value
|
||||
check:
|
||||
node0.mesh.getOrDefault(topic).len == dValues.get.d.get
|
||||
302
tests/pubsub/integration/testgossipsubmessagecache.nim
Normal file
302
tests/pubsub/integration/testgossipsubmessagecache.nim
Normal file
@@ -0,0 +1,302 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Message Cache":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Received messages are added to the message cache":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message and saves it in the cache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
|
||||
asyncTest "Message cache history shifts on heartbeat and is cleared on shift":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message and saves it in the cache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
|
||||
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# When heartbeat happens, circular history shifts to the next position
|
||||
# Waiting for 5(historyLength) heartbeats
|
||||
await waitForHeartbeat(historyLength)
|
||||
|
||||
# Then history is cleared when the position with the message is reached again
|
||||
# And message is removed
|
||||
check:
|
||||
nodes[1].mcache.window(topic).len == 0
|
||||
not nodes[1].mcache.contains(messageId)
|
||||
|
||||
asyncTest "IHave propagation capped by history window":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
|
||||
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Add observer to NodeOutsideMesh for received IHave messages
|
||||
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
nodeOutsideMesh.addOnRecvObserver(checkForIHaves)
|
||||
|
||||
# When NodeInsideMesh sends a messages to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
|
||||
|
||||
# On each heartbeat, Node0 retrieves messages in its mcache and sends IHave to NodeOutsideMesh
|
||||
# On heartbeat, Node0 mcache advances to the next position (rotating the message cache window)
|
||||
# Node0 will gossip about messages from the last few positions, depending on the mcache window size (historyGossip)
|
||||
# By waiting more than 'historyGossip' (2x3 = 6) heartbeats, we ensure Node0 does not send IHave messages for messages older than the window size
|
||||
await waitForHeartbeat(2 * historyGossip)
|
||||
|
||||
# Then nodeInsideMesh receives 3 (historyGossip) IHave messages
|
||||
check:
|
||||
receivedIHaves[].len == historyGossip
|
||||
|
||||
asyncTest "Message is retrieved from cache when handling IWant and relayed to a peer outside the mesh":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
|
||||
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
historyGossip = 3 # mcache window
|
||||
historyLength = 5
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
historyGossip = historyGossip,
|
||||
historyLength = historyLength,
|
||||
dValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Add observer to Node0 for received IWant messages
|
||||
var (receivedIWantsNode0, checkForIWant) = createCheckForIWant()
|
||||
nodes[0].addOnRecvObserver(checkForIWant)
|
||||
|
||||
# Find Peer outside of mesh to which Node 0 will relay received message
|
||||
let peerOutsideMesh =
|
||||
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
|
||||
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
|
||||
|
||||
# Add observer to NodeOutsideMesh for received messages
|
||||
var (receivedMessagesNodeOutsideMesh, checkForMessage) = createCheckForMessages()
|
||||
nodeOutsideMesh.addOnRecvObserver(checkForMessage)
|
||||
|
||||
# When NodeInsideMesh publishes a message to the topic
|
||||
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
|
||||
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
|
||||
tryPublish await nodeInsideMesh.publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node0 receives the message from NodeInsideMesh and saves it in its cache
|
||||
checkUntilTimeout:
|
||||
nodes[0].mcache.window(topic).len == 1
|
||||
let messageId = nodes[0].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# When Node0 sends an IHave message to NodeOutsideMesh during a heartbeat
|
||||
# Then NodeOutsideMesh responds with an IWant message to Node0
|
||||
checkUntilTimeout:
|
||||
receivedIWantsNode0[].anyIt(messageId in it.messageIDs)
|
||||
|
||||
# When Node0 handles the IWant message, it retrieves the message from its message cache using the MessageId
|
||||
# Then Node0 relays the original message to NodeOutsideMesh
|
||||
checkUntilTimeout:
|
||||
messageId in
|
||||
receivedMessagesNodeOutsideMesh[].mapIt(
|
||||
nodeOutsideMesh.msgIdProvider(it).value()
|
||||
)
|
||||
|
||||
asyncTest "Published and received messages are added to the seen cache":
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes a message to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the message
|
||||
# Get messageId from mcache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 1
|
||||
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
|
||||
# And both nodes save it in their seen cache
|
||||
# Node0 when publish, Node1 when received
|
||||
check:
|
||||
nodes[0].hasSeen(nodes[0].salt(messageId))
|
||||
nodes[1].hasSeen(nodes[1].salt(messageId))
|
||||
|
||||
asyncTest "Received messages are dropped if they are already in seen cache":
|
||||
# 3 Nodes, Node 0 <==> Node 1 and Node 2 not connected and not subscribed yet
|
||||
const
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When Node0 publishes two messages to the topic
|
||||
tryPublish await nodes[0].publish(topic, "Hello".toBytes()), 1
|
||||
tryPublish await nodes[0].publish(topic, "World".toBytes()), 1
|
||||
|
||||
# Then Node1 receives the messages
|
||||
# Getting messageIds from mcache
|
||||
checkUntilTimeout:
|
||||
nodes[1].mcache.window(topic).len == 2
|
||||
|
||||
let messageId1 = nodes[1].mcache.window(topic).toSeq()[0]
|
||||
let messageId2 = nodes[1].mcache.window(topic).toSeq()[1]
|
||||
|
||||
# And Node0 doesn't receive messages
|
||||
check:
|
||||
nodes[2].mcache.window(topic).len == 0
|
||||
|
||||
# When Node2 connects with Node0 and subscribes to the topic
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
nodes[2].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# And messageIds are added to node0PeerNode2 sentIHaves to allow processing IWant
|
||||
# let node0PeerNode2 =
|
||||
let node0PeerNode2 = nodes[0].getPeerByPeerId(topic, nodes[2].peerInfo.peerId)
|
||||
node0PeerNode2.sentIHaves[0].incl(messageId1)
|
||||
node0PeerNode2.sentIHaves[0].incl(messageId2)
|
||||
|
||||
# And messageId1 is added to seen messages cache of Node2
|
||||
check:
|
||||
not nodes[2].addSeen(nodes[2].salt(messageId1))
|
||||
|
||||
# And Node2 sends IWant to Node0 requesting both messages
|
||||
let iWantMessage =
|
||||
ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageId1, messageId2])])
|
||||
let node2PeerNode0 = nodes[2].getPeerByPeerId(topic, nodes[0].peerInfo.peerId)
|
||||
nodes[2].broadcast(
|
||||
@[node2PeerNode0], RPCMsg(control: some(iWantMessage)), isHighPriority = false
|
||||
)
|
||||
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then Node2 receives only messageId2 and messageId1 is dropped
|
||||
check:
|
||||
nodes[2].mcache.window(topic).len == 1
|
||||
nodes[2].mcache.window(topic).toSeq()[0] == messageId2
|
||||
|
||||
asyncTest "Published messages are dropped if they are already in seen cache":
|
||||
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok("fixed_message_id_string".toBytes())
|
||||
|
||||
const
|
||||
numberOfNodes = 2
|
||||
topic = "foobar"
|
||||
let nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, msgIdProvider = customMsgIdProvider
|
||||
)
|
||||
.toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given Node0 has msgId already in seen cache
|
||||
let data = "Hello".toBytes()
|
||||
let msg = Message.init(
|
||||
some(nodes[0].peerInfo), data, topic, some(nodes[0].msgSeqno), nodes[0].sign
|
||||
)
|
||||
let msgId = nodes[0].msgIdProvider(msg)
|
||||
|
||||
check:
|
||||
not nodes[0].addSeen(nodes[0].salt(msgId.value()))
|
||||
|
||||
# When Node0 publishes the message to the topic
|
||||
discard await nodes[0].publish(topic, data)
|
||||
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then Node1 doesn't receive the message
|
||||
check:
|
||||
nodes[1].mcache.window(topic).len == 0
|
||||
@@ -11,12 +11,12 @@
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -72,62 +72,11 @@ proc createMessages(
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
suite "GossipSub Integration - Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
asyncTest "Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -154,7 +103,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
asyncTest "Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -181,7 +130,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
asyncTest "Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -208,7 +157,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
asyncTest "Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -247,7 +196,7 @@ suite "GossipSub Message Handling":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
@@ -262,9 +211,9 @@ suite "GossipSub Message Handling":
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes.allIt(it.mesh.getOrDefault(topic).len == numberOfNodes - 1)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
|
||||
@@ -464,12 +413,12 @@ suite "GossipSub Message Handling":
|
||||
# Send message that will be rejected by the receiver's validator
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
checkUntilCustomTimeout(500.milliseconds, 20.milliseconds):
|
||||
checkUntilTimeout:
|
||||
recvCounter == 2
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
asyncTest "GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -499,7 +448,7 @@ suite "GossipSub Message Handling":
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
asyncTest "GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
@@ -565,7 +514,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
asyncTest "GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -595,7 +544,7 @@ suite "GossipSub Message Handling":
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
asyncTest "GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -607,7 +556,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
asyncTest "GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -620,7 +569,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
asyncTest "GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -662,7 +611,7 @@ suite "GossipSub Message Handling":
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
asyncTest "GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -711,7 +660,7 @@ suite "GossipSub Message Handling":
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
asyncTest "GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
535
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
535
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
@@ -0,0 +1,535 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, strutils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Integration - Scoring":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
tryPublish await nodes[0].publish(topic, message), 1
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "Should not rate limit decodable messages below the size allowed":
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
|
||||
asyncTest "Should rate limit undecodable messages above the size allowed":
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await nodes[1].peers[nodes[0].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
await nodes[0].peers[nodes[1].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit decodable messages above the size allowed":
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit invalid messages above the size allowed":
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Reject
|
||||
|
||||
nodes[0].addValidator(topic, execValidator)
|
||||
nodes[1].addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "DirectPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await nodes.addDirectPeerStar()
|
||||
|
||||
nodes[1].parameters.disconnectBadPeers = true
|
||||
nodes[1].parameters.graylistThreshold = 100000
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[1].updateScores()
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
topic notin nodes[1].mesh
|
||||
nodes[1].peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
nodes[1].parameters.graylistThreshold
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hellow")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
asyncTest "Peers disconnections mechanics":
|
||||
const numberOfNodes = 10
|
||||
let nodes =
|
||||
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topicName: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topicName == topic
|
||||
if not seenFut.finished() and seen.len >= numberOfNodes:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe(topic, handler)
|
||||
|
||||
await waitSubGraph(nodes, topic)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
await seenFut.wait(2.seconds)
|
||||
check:
|
||||
seen.len >= numberOfNodes
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
check:
|
||||
topic in node.gossipsub
|
||||
node.fanout.len == 0
|
||||
node.mesh[topic].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll(topic)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
asyncTest "DecayInterval":
|
||||
const
|
||||
topic = "foobar"
|
||||
decayInterval = 50.milliseconds
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries =
|
||||
100
|
||||
nodes[0].topicParams[topic].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(decayInterval * 5)
|
||||
check:
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
|
||||
asyncTest "Nodes publishing invalid messages are penalised and disconnected":
|
||||
# Given GossipSub nodes with Topic Params
|
||||
const numberOfNodes = 3
|
||||
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
verifySignature = false,
|
||||
# Disable signature verification to isolate validation penalties
|
||||
decayInterval = 200.milliseconds, # scoring heartbeat interval
|
||||
heartbeatInterval = 5.seconds,
|
||||
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
|
||||
publishThreshold = -150.0,
|
||||
graylistThreshold = -200.0,
|
||||
disconnectBadPeers = false,
|
||||
)
|
||||
.toGossipSub()
|
||||
centerNode = nodes[0]
|
||||
node1peerId = nodes[1].peerInfo.peerId
|
||||
node2peerId = nodes[2].peerInfo.peerId
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
for node in nodes:
|
||||
node.topicParams[topic].invalidMessageDeliveriesWeight = -10.0
|
||||
node.topicParams[topic].invalidMessageDeliveriesDecay = 0.9
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And Node 0 is center node, connected to others
|
||||
await connectNodes(nodes[0], nodes[1]) # center to Node 1 (valid messages)
|
||||
await connectNodes(nodes[0], nodes[2]) # center to Node 2 (invalid messages)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
# And center node has message validator: accept from node 1, reject from node 2
|
||||
var validatedMessageCount = 0
|
||||
proc validationHandler(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
validatedMessageCount.inc
|
||||
if string.fromBytes(message.data).contains("invalid"):
|
||||
return ValidationResult.Reject # reject invalid messages
|
||||
else:
|
||||
return ValidationResult.Accept
|
||||
|
||||
nodes[0].addValidator(topic, validationHandler)
|
||||
|
||||
# 1st scoring heartbeat
|
||||
checkUntilTimeout:
|
||||
centerNode.gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
centerNode.getPeerScore(node1peerId) > 0
|
||||
centerNode.getPeerScore(node2peerId) > 0
|
||||
|
||||
# When messages are broadcasted
|
||||
const messagesToSend = 5
|
||||
for i in 0 ..< messagesToSend:
|
||||
nodes[1].broadcast(
|
||||
nodes[1].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: ("valid_" & $i).toBytes())]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
nodes[2].broadcast(
|
||||
nodes[2].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: ("invalid_" & $i).toBytes())]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
# And messages are processed
|
||||
# Then invalidMessageDeliveries stats are applied
|
||||
checkUntilTimeout:
|
||||
validatedMessageCount == messagesToSend * (numberOfNodes - 1)
|
||||
centerNode.getPeerTopicInfo(node1peerId, topic).invalidMessageDeliveries == 0.0
|
||||
# valid messages
|
||||
centerNode.getPeerTopicInfo(node2peerId, topic).invalidMessageDeliveries == 5.0
|
||||
# invalid messages
|
||||
|
||||
# When scoring hartbeat occurs (2nd scoring heartbeat)
|
||||
# Then peer scores are calculated
|
||||
checkUntilTimeout:
|
||||
# node1: p1 (time in mesh) + p2 (first message deliveries)
|
||||
centerNode.getPeerScore(node1peerId) > 5.0 and
|
||||
centerNode.getPeerScore(node1peerId) < 6.0
|
||||
# node2: p1 (time in mesh) - p4 (invalid message deliveries)
|
||||
centerNode.getPeerScore(node2peerId) < -249.0 and
|
||||
centerNode.getPeerScore(node2peerId) > -250.0
|
||||
# all peers are still connected
|
||||
centerNode.mesh[topic].toSeq().len == 2
|
||||
|
||||
# When disconnecting peers with bad score (score < graylistThreshold) is enabled
|
||||
for node in nodes:
|
||||
node.parameters.disconnectBadPeers = true
|
||||
|
||||
# Then peers with bad score are disconnected on scoring heartbeat (3rd scoring heartbeat)
|
||||
checkUntilTimeout:
|
||||
centerNode.mesh[topic].toSeq().len == 1
|
||||
|
||||
asyncTest "Nodes not meeting Mesh Message Deliveries Threshold are penalised":
|
||||
# Given GossipSub nodes with Topic Params
|
||||
const numberOfNodes = 2
|
||||
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
decayInterval = 200.milliseconds, # scoring heartbeat interval
|
||||
heartbeatInterval = 5.seconds,
|
||||
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
|
||||
disconnectBadPeers = false,
|
||||
)
|
||||
.toGossipSub()
|
||||
node1PeerId = nodes[1].peerInfo.peerId
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
for node in nodes:
|
||||
node.topicParams[topic].meshMessageDeliveriesThreshold = 5
|
||||
node.topicParams[topic].meshMessageDeliveriesActivation = 1.milliseconds
|
||||
# active from the start
|
||||
node.topicParams[topic].meshMessageDeliveriesDecay = 0.9
|
||||
node.topicParams[topic].meshMessageDeliveriesWeight = -10.0
|
||||
node.topicParams[topic].meshFailurePenaltyDecay = 0.9
|
||||
node.topicParams[topic].meshFailurePenaltyWeight = -5.0
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And Nodes are connected and subscribed to the topic
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
# When scoring heartbeat occurs
|
||||
# Then Peer has negative score due to active meshMessageDeliveries deficit
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
nodes[0].mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
# p1 (time in mesh) - p3 (mesh message deliveries)
|
||||
nodes[0].getPeerScore(node1PeerId) < -249.0
|
||||
|
||||
# When Peer is unsubscribed
|
||||
nodes[1].unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then meshFailurePenalty is applied due to active meshMessageDeliveries deficit
|
||||
checkUntilTimeout:
|
||||
nodes[0].getPeerTopicInfo(node1PeerId, topic).meshFailurePenalty == 25
|
||||
|
||||
# When next scoring heartbeat occurs
|
||||
# Then Peer has negative score
|
||||
checkUntilTimeout:
|
||||
# p3b (mesh failure penalty) [p1 and p3 not calculated when peer was pruned]
|
||||
nodes[0].getPeerScore(node1PeerId) == -125.0
|
||||
|
||||
# When Peer subscribes again
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then Peer is not grafted to the mesh due to negative score (score was retained)
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
nodes[0].mesh.getOrDefault(topic).len == 0
|
||||
217
tests/pubsub/integration/testgossipsubsignatureflags.nim
Normal file
217
tests/pubsub/integration/testgossipsubsignatureflags.nim
Normal file
@@ -0,0 +1,217 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import unittest2
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, pubsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/futures
|
||||
|
||||
suite "GossipSub Integration - Signature Flags":
|
||||
const
|
||||
topic = "foobar"
|
||||
testData = "test message".toBytes()
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Default - messages are signed when sign=true and contain fromPeer and seqno when anonymize=false":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = true, verifySignature = true, anonymize = false
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.fromPeer.data.len > 0
|
||||
receivedMessage.seqno.len > 0
|
||||
receivedMessage.signature.len > 0
|
||||
receivedMessage.key.len > 0
|
||||
|
||||
asyncTest "Sign flag - messages are not signed when sign=false":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = false, verifySignature = false, anonymize = false
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.signature.len == 0
|
||||
receivedMessage.key.len == 0
|
||||
|
||||
asyncTest "Anonymize flag - messages are anonymous when anonymize=true":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = true, verifySignature = true, anonymize = true
|
||||
) # anonymize = true takes precedence
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
let testData = "anonymous message".toBytes()
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.fromPeer.data.len == 0
|
||||
receivedMessage.seqno.len == 0
|
||||
receivedMessage.signature.len == 0
|
||||
receivedMessage.key.len == 0
|
||||
|
||||
type NodeConfig = object
|
||||
sign: bool
|
||||
verify: bool
|
||||
anonymize: bool
|
||||
|
||||
type Scenario = object
|
||||
senderConfig: NodeConfig
|
||||
receiverConfig: NodeConfig
|
||||
shouldWork: bool
|
||||
|
||||
let scenarios: seq[Scenario] =
|
||||
@[
|
||||
# valid combos
|
||||
# S default, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S default, R anonymous
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S anonymous, R anonymous
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S only sign, R only verify
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: true, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S only verify, R only sign
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S anonymous (not signed despite the flag), R minimal
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: true, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: true, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S unsigned, R unsigned
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
|
||||
# invalid combos
|
||||
# S anonymous, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: false,
|
||||
),
|
||||
# S unsigned, R anonymous but verify
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: true),
|
||||
shouldWork: false,
|
||||
),
|
||||
# S unsigned, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: false,
|
||||
),
|
||||
]
|
||||
|
||||
for scenario in scenarios:
|
||||
let title = "Compatibility matrix: " & $scenario
|
||||
asyncTest title:
|
||||
let
|
||||
sender = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
sign = scenario.senderConfig.sign,
|
||||
verifySignature = scenario.senderConfig.verify,
|
||||
anonymize = scenario.senderConfig.anonymize,
|
||||
)[0]
|
||||
receiver = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
sign = scenario.receiverConfig.sign,
|
||||
verifySignature = scenario.receiverConfig.verify,
|
||||
anonymize = scenario.receiverConfig.anonymize,
|
||||
)[0]
|
||||
nodes = @[sender, receiver]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
let (messageReceivedFut, handler) = createCompleteHandler()
|
||||
|
||||
nodes.subscribeAllNodes(topic, handler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
discard await sender.publish(topic, testData)
|
||||
|
||||
let messageReceived = await waitForState(messageReceivedFut, HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
if scenario.shouldWork:
|
||||
messageReceived.isCompleted(true)
|
||||
else:
|
||||
messageReceived.isCancelled()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user