mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-11 02:18:27 -05:00
Compare commits
31 Commits
move-testd
...
newchronos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e18e552d45 | ||
|
|
e4faec5570 | ||
|
|
41c9bf8e8c | ||
|
|
7ae366d979 | ||
|
|
9b33cea225 | ||
|
|
f8077f7432 | ||
|
|
773fc67865 | ||
|
|
7e07ffc5a8 | ||
|
|
aa1c33ffe9 | ||
|
|
f1e220fba4 | ||
|
|
5ad656bf26 | ||
|
|
cfd631457a | ||
|
|
4f8597609b | ||
|
|
4ed72a753c | ||
|
|
2a9abbe925 | ||
|
|
ee61e234ac | ||
|
|
8f54367e3a | ||
|
|
61826a20e4 | ||
|
|
32951e1a68 | ||
|
|
1d13e405e4 | ||
|
|
729e879c1c | ||
|
|
64c9cf1b9e | ||
|
|
4d94892eb0 | ||
|
|
3ecb1744ce | ||
|
|
2f9c3fb3e2 | ||
|
|
2609c270b8 | ||
|
|
48b3e34cd3 | ||
|
|
abb2c43667 | ||
|
|
d1cfbb35d3 | ||
|
|
38a630eee0 | ||
|
|
be1a2023ce |
10
.github/workflows/daily_common.yml
vendored
10
.github/workflows/daily_common.yml
vendored
@@ -85,7 +85,7 @@ jobs:
|
||||
nimble install_pinned
|
||||
|
||||
- name: Install dependencies (latest)
|
||||
if: ${{ inputs.pinned_deps != 'true' }}
|
||||
if: ${{ inputs.pinned_deps == false }}
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
@@ -96,4 +96,12 @@ jobs:
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ matrix.platform.os == 'linux' && matrix.cpu == 'amd64' }}
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble testintegration
|
||||
|
||||
36
.github/workflows/interop.yml
vendored
36
.github/workflows/interop.yml
vendored
@@ -41,20 +41,22 @@ jobs:
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
# nim-libp2p#1367: hole punching tests are temporary disabled as they keep failing
|
||||
# and issue does not seem to be on nim-libp2p side
|
||||
# run-hole-punching-interop:
|
||||
# name: Run hole-punching interoperability tests
|
||||
# runs-on: ubuntu-22.04
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: docker/setup-buildx-action@v3
|
||||
# - name: Build image
|
||||
# run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
# - name: Run tests
|
||||
# uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
# with:
|
||||
# test-filter: nim-libp2p-head
|
||||
# extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
# s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
3
.pinned
3
.pinned
@@ -1,6 +1,6 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
|
||||
chronos;https://github.com/status-im/nim-chronos@#4a3563595a0eedd2f84ddf52583a6d6bf6113d87
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
|
||||
@@ -19,4 +19,3 @@ websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
|
||||
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
|
||||
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97
|
||||
|
||||
@@ -9,8 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7",
|
||||
"https://github.com/status-im/nim-chronos#4a3563595a0eedd2f84ddf52583a6d6bf6113d87",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import options, base64, sequtils, strutils, json, uri
|
||||
import options, sequtils, strutils, json, uri
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
|
||||
import chronos/apps/http/httpclient, jwt, results, bearssl/pem, chronicles
|
||||
|
||||
import ./utils
|
||||
import ../../crypto/crypto
|
||||
@@ -8,6 +8,9 @@ import ../../crypto/rsa
|
||||
|
||||
export ACMEError
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme api"
|
||||
|
||||
const
|
||||
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
|
||||
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
|
||||
@@ -78,15 +81,22 @@ type ACMERegisterResponse* = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
PENDING = "pending"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEChallengeType* {.pure.} = enum
|
||||
dns01 = "dns-01"
|
||||
http01 = "http-01"
|
||||
tlsalpn01 = "tls-alpn-01"
|
||||
DNS01 = "dns-01"
|
||||
HTTP01 = "http-01"
|
||||
TLSALPN01 = "tls-alpn-01"
|
||||
|
||||
type ACMEChallengeToken* = string
|
||||
|
||||
@@ -104,12 +114,12 @@ type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEChallengeStatus
|
||||
status: ACMEOrderStatus
|
||||
authorizations: seq[Authorization]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEChallengeStatus
|
||||
status*: ACMEOrderStatus
|
||||
authorizations*: seq[Authorization]
|
||||
finalize*: string
|
||||
order*: string
|
||||
@@ -123,14 +133,7 @@ type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
checkURL: string
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
ready = "ready"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
url: string
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
@@ -152,8 +155,8 @@ type ACMEOrderResponse* = object
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate: string
|
||||
certificateExpiry: DateTime
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
@@ -213,7 +216,7 @@ method requestNonce*(
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone:
|
||||
if not needsJwk and kid.isNone():
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
@@ -318,7 +321,7 @@ proc requestNewOrder*(
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len() == 0:
|
||||
if challengeResponseBody.authorizations.len == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
@@ -338,22 +341,22 @@ proc requestAuthorizations*(
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let challengeResponse = await self.requestNewOrder(domains, key, kid)
|
||||
if challengeResponse.status != ACMEChallengeStatus.pending:
|
||||
raise newException(
|
||||
ACMEError, "Invalid new challenge status: " & $challengeResponse.status
|
||||
)
|
||||
let orderResponse = await self.requestNewOrder(domains, key, kid)
|
||||
if orderResponse.status != ACMEOrderStatus.PENDING and
|
||||
orderResponse.status != ACMEOrderStatus.READY:
|
||||
# ready is a valid status when renewing certs before expiry
|
||||
raise newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
|
||||
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
|
||||
if authorizationsResponse.challenges.len == 0:
|
||||
raise newException(ACMEError, "No challenges received")
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalize: challengeResponse.finalize,
|
||||
order: challengeResponse.order,
|
||||
finalize: orderResponse.finalize,
|
||||
order: orderResponse.order,
|
||||
dns01: authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.dns01
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0],
|
||||
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
|
||||
)
|
||||
@@ -396,7 +399,7 @@ proc requestCheck*(
|
||||
proc sendChallengeCompleted*(
|
||||
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("sendChallengeCompleted (send notify)"):
|
||||
handleError("sendChallengeCompleted"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
@@ -412,9 +415,9 @@ proc checkChallengeCompleted*(
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.pending:
|
||||
of ACMEChallengeStatus.PENDING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.valid:
|
||||
of ACMEChallengeStatus.VALID:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
@@ -438,12 +441,9 @@ proc completeChallenge*(
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let derCSR = createCSR(domain)
|
||||
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalize, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
|
||||
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalize, payload)
|
||||
# server responds with updated order response
|
||||
@@ -459,17 +459,14 @@ proc checkCertFinalized*(
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
of ACMEOrderStatus.VALID:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
of ACMEOrderStatus.PROCESSING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed certificate finalization: expected 'valid', got '" &
|
||||
$checkResponse.orderStatus & "'",
|
||||
)
|
||||
return false
|
||||
error "Failed certificate finalization",
|
||||
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
|
||||
return false # do not try again
|
||||
|
||||
return false
|
||||
|
||||
@@ -508,5 +505,5 @@ proc downloadCertificate*(
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
@@ -10,40 +10,50 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, results, bio
|
||||
import chronos, results, chronicles, stew/byteutils
|
||||
|
||||
import ./api, ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export api
|
||||
|
||||
type KeyAuthorization* = string
|
||||
|
||||
type ACMEClient* = object
|
||||
type ACMEClient* = ref object
|
||||
api: ACMEApi
|
||||
key*: KeyPair
|
||||
kid*: Kid
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme client"
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEClient],
|
||||
api: Opt[ACMEApi] = Opt.none(ACMEApi),
|
||||
key: Opt[KeyPair] = Opt.none(KeyPair),
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let api = api.valueOr:
|
||||
ACMEApi.new()
|
||||
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
|
||||
key: Opt[KeyPair] = Opt.none(KeyPair),
|
||||
kid: Kid = Kid(""),
|
||||
): T {.raises: [].} =
|
||||
let key = key.valueOr:
|
||||
KeyPair.random(PKScheme.RSA, rng[]).get()
|
||||
let registerResponse = await api.requestRegister(key)
|
||||
T(api: api, key: key, kid: registerResponse.kid)
|
||||
T(api: api, key: key, kid: kid)
|
||||
|
||||
proc genKeyAuthorization(self: ACMEClient, token: string): KeyAuthorization =
|
||||
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toByteSeq).data))
|
||||
proc getOrInitKid*(
|
||||
self: ACMEClient
|
||||
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if self.kid.len == 0:
|
||||
let registerResponse = await self.api.requestRegister(self.key)
|
||||
self.kid = registerResponse.kid
|
||||
return self.kid
|
||||
|
||||
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
|
||||
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
|
||||
|
||||
proc getChallenge*(
|
||||
self: ACMEClient, domains: seq[api.Domain]
|
||||
): Future[ACMEChallengeResponseWrapper] {.raises: [ACMEError, CancelledError].} =
|
||||
self.api.requestChallenge(domains, self.key, self.kid)
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
|
||||
|
||||
proc getCertificate*(
|
||||
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
|
||||
@@ -51,20 +61,26 @@ proc getCertificate*(
|
||||
let chalURL = parseUri(challenge.dns01.url)
|
||||
let orderURL = parseUri(challenge.order)
|
||||
let finalizeURL = parseUri(challenge.finalize)
|
||||
discard await self.api.sendChallengeCompleted(chalURL, self.key, self.kid)
|
||||
trace "sending challenge completed notification"
|
||||
discard
|
||||
await self.api.sendChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
|
||||
|
||||
let completed = await self.api.checkChallengeCompleted(chalURL, self.key, self.kid)
|
||||
trace "checking for completed challenge"
|
||||
let completed =
|
||||
await self.api.checkChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
|
||||
if not completed:
|
||||
raise
|
||||
newException(ACMEError, "Failed to signal ACME server about challenge completion")
|
||||
|
||||
trace "waiting for certificate to be finalized"
|
||||
let finalized = await self.api.certificateFinalized(
|
||||
domain, finalizeURL, orderURL, self.key, self.kid
|
||||
domain, finalizeURL, orderURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not finalized:
|
||||
raise newException(ACMEError, "Failed to finalize certificate for domain " & domain)
|
||||
|
||||
trace "downloading certificate"
|
||||
await self.api.downloadCertificate(orderURL)
|
||||
|
||||
proc close*(self: ACMEClient): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
|
||||
await self.api.close()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
@@ -62,3 +63,5 @@ proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
|
||||
base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
223
libp2p/autotls/service.nim
Normal file
223
libp2p/autotls/service.nim
Normal file
@@ -0,0 +1,223 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, results, json, sequtils
|
||||
|
||||
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
./utils,
|
||||
../crypto/crypto,
|
||||
../nameresolving/dnsresolver,
|
||||
../peeridauth/client,
|
||||
../peerinfo,
|
||||
../switch,
|
||||
../utils/heartbeat,
|
||||
../wire
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autotls"
|
||||
|
||||
export LetsEncryptURL, AutoTLSError
|
||||
|
||||
const
|
||||
DefaultDnsServers* =
|
||||
@[
|
||||
initTAddress("1.1.1.1:53"),
|
||||
initTAddress("1.0.0.1:53"),
|
||||
initTAddress("[2606:4700:4700::1111]:53"),
|
||||
]
|
||||
DefaultRenewCheckTime* = 1.hours
|
||||
DefaultRenewBufferTime = 1.hours
|
||||
|
||||
AutoTLSBroker* = "registration.libp2p.direct"
|
||||
AutoTLSDNSServer* = "libp2p.direct"
|
||||
HttpOk* = 200
|
||||
HttpCreated* = 201
|
||||
# NoneIp is needed because nim 1.6.16 can't do proper generic inference
|
||||
NoneIp = Opt.none(IpAddress)
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
type AutotlsCert* = ref object
|
||||
cert*: TLSCertificate
|
||||
expiry*: Moment
|
||||
|
||||
type AutotlsConfig* = ref object
|
||||
acmeServerURL*: Uri
|
||||
dnsResolver*: DnsResolver
|
||||
ipAddress: Opt[IpAddress]
|
||||
renewCheckTime*: Duration
|
||||
renewBufferTime*: Duration
|
||||
|
||||
type AutotlsService* = ref object of Service
|
||||
acmeClient: ACMEClient
|
||||
bearer*: Opt[BearerToken]
|
||||
brokerClient: PeerIDAuthClient
|
||||
cert*: Opt[AutotlsCert]
|
||||
certReady*: AsyncEvent
|
||||
config: AutotlsConfig
|
||||
managerFut: Future[void]
|
||||
peerInfo: PeerInfo
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
proc new*(T: typedesc[AutotlsCert], cert: TLSCertificate, expiry: Moment): T =
|
||||
T(cert: cert, expiry: expiry)
|
||||
|
||||
proc getCertWhenReady*(
|
||||
self: AutotlsService
|
||||
): Future[TLSCertificate] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
await self.certReady.wait()
|
||||
return self.cert.get.cert
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsConfig],
|
||||
ipAddress: Opt[IpAddress] = NoneIp,
|
||||
nameServers: seq[TransportAddress] = DefaultDnsServers,
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
renewCheckTime: Duration = DefaultRenewCheckTime,
|
||||
renewBufferTime: Duration = DefaultRenewBufferTime,
|
||||
): T =
|
||||
T(
|
||||
dnsResolver: DnsResolver.new(nameServers),
|
||||
acmeServerURL: acmeServerURL,
|
||||
ipAddress: ipAddress,
|
||||
renewCheckTime: renewCheckTime,
|
||||
renewBufferTime: renewBufferTime,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsService],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
config: AutotlsConfig = AutotlsConfig.new(),
|
||||
): T =
|
||||
T(
|
||||
acmeClient: ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
|
||||
brokerClient: PeerIDAuthClient.new(),
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
config: config,
|
||||
managerFut: nil,
|
||||
peerInfo: nil,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
method setup*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
trace "Setting up AutotlsService"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetup:
|
||||
self.peerInfo = switch.peerInfo
|
||||
if self.config.ipAddress.isNone():
|
||||
try:
|
||||
self.config.ipAddress = Opt.some(getPublicIPAddress())
|
||||
except AutoTLSError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
self.managerFut = self.run(switch)
|
||||
return hasBeenSetup
|
||||
|
||||
method issueCertificate(
|
||||
self: AutotlsService
|
||||
) {.base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError]).} =
|
||||
trace "Issuing certificate"
|
||||
|
||||
assert not self.peerInfo.isNil(), "Cannot issue new certificate: peerInfo not set"
|
||||
|
||||
# generate autotls domain string: "*.{peerID}.libp2p.direct"
|
||||
let baseDomain =
|
||||
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
|
||||
let domain = api.Domain("*." & baseDomain)
|
||||
|
||||
let acmeClient = self.acmeClient
|
||||
|
||||
trace "Requesting ACME challenge"
|
||||
let dns01Challenge = await acmeClient.getChallenge(@[domain])
|
||||
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
|
||||
let strMultiaddresses: seq[string] = self.peerInfo.addrs.mapIt($it)
|
||||
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
|
||||
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
|
||||
|
||||
trace "Sending challenge to AutoTLS broker"
|
||||
let (bearer, response) =
|
||||
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
|
||||
if self.bearer.isNone():
|
||||
# save bearer token for future
|
||||
self.bearer = Opt.some(bearer)
|
||||
if response.status != HttpOk:
|
||||
raise newException(
|
||||
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
)
|
||||
|
||||
debug "Waiting for DNS record to be set"
|
||||
let dnsSet = await checkDNSRecords(
|
||||
self.config.dnsResolver, self.config.ipAddress.get(), baseDomain, keyAuth
|
||||
)
|
||||
if not dnsSet:
|
||||
raise newException(AutoTLSError, "DNS records not set")
|
||||
|
||||
debug "Notifying challenge completion to ACME and downloading cert"
|
||||
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
|
||||
|
||||
debug "Installing certificate"
|
||||
let newCert =
|
||||
try:
|
||||
AutotlsCert.new(
|
||||
TLSCertificate.init(certResponse.rawCertificate),
|
||||
asMoment(certResponse.certificateExpiry),
|
||||
)
|
||||
except TLSStreamProtocolError:
|
||||
raise newException(AutoTLSError, "Could not parse downloaded certificates")
|
||||
self.cert = Opt.some(newCert)
|
||||
self.certReady.fire()
|
||||
debug "Certificate installed"
|
||||
|
||||
method run*(
|
||||
self: AutotlsService, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Certificate Management", self.config.renewCheckTime:
|
||||
if self.cert.isNone():
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
break
|
||||
|
||||
# AutotlsService will renew the cert 1h before it expires
|
||||
let cert = self.cert.get
|
||||
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
|
||||
if waitTime <= self.config.renewBufferTime:
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to renew certificate", err = exc.msg
|
||||
break
|
||||
|
||||
method stop*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
await self.acmeClient.close()
|
||||
await self.brokerClient.close()
|
||||
await self.managerFut.cancelAndWait()
|
||||
self.managerFut = nil
|
||||
return hasBeenStopped
|
||||
109
libp2p/autotls/utils.nim
Normal file
109
libp2p/autotls/utils.nim
Normal file
@@ -0,0 +1,109 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, strutils
|
||||
from times import DateTime, toTime, toUnix
|
||||
|
||||
import chronos, stew/base36, chronicles
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
../errors,
|
||||
../peerid,
|
||||
../multihash,
|
||||
../cid,
|
||||
../multicodec,
|
||||
../nameresolving/dnsresolver
|
||||
|
||||
const
|
||||
DefaultDnsRetries = 10
|
||||
DefaultDnsRetryTime = 1.seconds
|
||||
|
||||
type AutoTLSError* = object of LPError
|
||||
|
||||
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
# This is so that we don't need to catch Exceptions directly
|
||||
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
|
||||
try:
|
||||
return getPrimaryIPAddr()
|
||||
except Exception as exc:
|
||||
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
|
||||
|
||||
proc isIPv4*(ip: IpAddress): bool =
|
||||
ip.family == IpAddressFamily.IPv4
|
||||
|
||||
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
|
||||
let ip = $ip
|
||||
try:
|
||||
not (
|
||||
ip.startsWith("10.") or
|
||||
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
|
||||
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(AutoTLSError, "Failed to parse IP address", exc)
|
||||
|
||||
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
let ip = checkedGetPrimaryIPAddr()
|
||||
if not ip.isIPv4():
|
||||
raise newException(AutoTLSError, "Host does not have an IPv4 address")
|
||||
if not ip.isPublic():
|
||||
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
|
||||
return ip
|
||||
|
||||
proc asMoment*(dt: DateTime): Moment =
|
||||
let unixTime: int64 = dt.toTime.toUnix
|
||||
return Moment.init(unixTime, Second)
|
||||
|
||||
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
|
||||
var mh: MultiHash
|
||||
let decodeResult = MultiHash.decode(peerId.data, mh)
|
||||
if decodeResult.isErr() or decodeResult.get() == -1:
|
||||
raise
|
||||
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
|
||||
|
||||
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
|
||||
if cidResult.isErr():
|
||||
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
|
||||
|
||||
return Base36.encode(cidResult.get().data.buffer)
|
||||
|
||||
proc checkDNSRecords*(
|
||||
dnsResolver: DnsResolver,
|
||||
ipAddress: IpAddress,
|
||||
baseDomain: api.Domain,
|
||||
keyAuth: KeyAuthorization,
|
||||
retries: int = DefaultDnsRetries,
|
||||
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
# if my ip address is 100.10.10.3 then the ip4Domain will be:
|
||||
# 100-10-10-3.{peerIdBase36}.libp2p.direct
|
||||
# and acme challenge TXT domain will be:
|
||||
# _acme-challenge.{peerIdBase36}.libp2p.direct
|
||||
let dashedIpAddr = ($ipAddress).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
|
||||
var txt: seq[string]
|
||||
var ip4: seq[TransportAddress]
|
||||
for _ in 0 .. retries:
|
||||
txt = await dnsResolver.resolveTxt(acmeChalDomain)
|
||||
try:
|
||||
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to resolve IP", description = exc.msg # retry
|
||||
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
|
||||
return true
|
||||
await sleepAsync(DefaultDnsRetryTime)
|
||||
|
||||
return false
|
||||
@@ -15,7 +15,7 @@ runnableExamples:
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, tables, chronos, chronicles, sequtils
|
||||
import options, tables, chronos, chronicles, sequtils, uri
|
||||
import
|
||||
switch,
|
||||
peerid,
|
||||
@@ -30,6 +30,7 @@ import
|
||||
connmanager,
|
||||
upgrademngrs/muxedupgrade,
|
||||
observedaddrmanager,
|
||||
autotls/service,
|
||||
nameresolving/nameresolver,
|
||||
errors,
|
||||
utility
|
||||
@@ -42,8 +43,9 @@ export
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.public.} = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise
|
||||
@@ -65,6 +67,7 @@ type
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
autotls: AutotlsService
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
@@ -156,7 +159,9 @@ proc withTransport*(
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
@@ -167,7 +172,7 @@ proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
|
||||
@@ -179,7 +184,7 @@ proc withWsTransport*(
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
|
||||
)
|
||||
|
||||
@@ -188,13 +193,13 @@ when defined(libp2p_quic_support):
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
MemoryTransport.new(upgr)
|
||||
)
|
||||
|
||||
@@ -252,6 +257,12 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonat = true
|
||||
b
|
||||
|
||||
proc withAutotls*(
|
||||
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
|
||||
): SwitchBuilder {.public.} =
|
||||
b.autotls = AutotlsService.new(config = config)
|
||||
b
|
||||
|
||||
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
||||
b.circuitRelay = r
|
||||
b
|
||||
@@ -303,10 +314,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
|
||||
if not b.autotls.isNil():
|
||||
b.services.insert(b.autotls, 0)
|
||||
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade, seckey))
|
||||
transports.add(tProvider(muxedUpgrade, seckey, b.autotls))
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
|
||||
@@ -123,7 +123,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
return err(CidError.Incorrect)
|
||||
if len(data) == 46:
|
||||
if data[0] == 'Q' and data[1] == 'm':
|
||||
buffer = newSeq[byte](BTCBase58.decodedLength(len(data)))
|
||||
buffer = newSeqUninitialized[byte](BTCBase58.decodedLength(len(data)))
|
||||
if BTCBase58.decode(data, buffer, plen) != Base58Status.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
@@ -131,7 +131,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
let length = MultiBase.decodedLength(data[0], len(data))
|
||||
if length == -1:
|
||||
return err(CidError.Incorrect)
|
||||
buffer = newSeq[byte](length)
|
||||
buffer = newSeqUninitialized[byte](length)
|
||||
if MultiBase.decode(data, buffer, plen) != MultiBaseStatus.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
|
||||
@@ -873,7 +873,7 @@ proc stretchKeys*(
|
||||
var seed = "key expansion"
|
||||
result.macsize = 20
|
||||
let length = result.ivsize + result.keysize + result.macsize
|
||||
result.data = newSeq[byte](2 * length)
|
||||
result.data = newSeqUninitialized[byte](2 * length)
|
||||
|
||||
if hashType == "SHA256":
|
||||
makeSecret(result.data, HMAC[sha256], sharedSecret, seed)
|
||||
@@ -904,7 +904,7 @@ template macOpenArray*(secret: Secret, id: int): untyped =
|
||||
|
||||
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
## Get array of bytes with with initial vector.
|
||||
result = newSeq[byte](secret.ivsize)
|
||||
result = newSeqUninitialized[byte](secret.ivsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
@@ -913,7 +913,7 @@ proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
|
||||
|
||||
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.keysize)
|
||||
result = newSeqUninitialized[byte](secret.keysize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
@@ -923,7 +923,7 @@ proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
|
||||
|
||||
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.macsize)
|
||||
result = newSeqUninitialized[byte](secret.macsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
|
||||
@@ -458,7 +458,7 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?seckey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?seckey.toBytes(res)
|
||||
@@ -471,7 +471,7 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?pubkey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?pubkey.toBytes(res)
|
||||
@@ -483,7 +483,7 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to ASN.1 DER binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?sig.toBytes(res)
|
||||
@@ -494,7 +494,7 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?seckey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?seckey.toRawBytes(res)
|
||||
@@ -507,7 +507,7 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?pubkey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?pubkey.toRawBytes(res)
|
||||
@@ -519,7 +519,7 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
var res = newSeqUninitialized[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ?sig.toBytes(res)
|
||||
@@ -929,7 +929,7 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
|
||||
var data: array[Secret521Length, byte]
|
||||
let res = toSecret(pubkey, seckey, data)
|
||||
if res > 0:
|
||||
result = newSeq[byte](res)
|
||||
result = newSeqUninitialized[byte](res)
|
||||
copyMem(addr result[0], addr data[0], res)
|
||||
|
||||
proc sign*[T: byte | char](
|
||||
@@ -943,7 +943,7 @@ proc sign*[T: byte | char](
|
||||
var impl = ecGetDefault()
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var sig = new EcSignature
|
||||
sig.buffer = newSeq[byte](256)
|
||||
sig.buffer = newSeqUninitialized[byte](256)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
|
||||
@@ -679,15 +679,15 @@ proc init*(t: typedesc[Asn1Buffer], data: string): Asn1Buffer =
|
||||
|
||||
proc init*(t: typedesc[Asn1Buffer]): Asn1Buffer =
|
||||
## Initialize empty ``Asn1Buffer``.
|
||||
Asn1Buffer(buffer: newSeq[byte]())
|
||||
Asn1Buffer(buffer: newSeqUninitialized[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], tag: Asn1Tag): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag ``tag``.
|
||||
Asn1Composite(tag: tag, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: tag, buffer: newSeqUninitialized[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], idx: int): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag context-specific id ``id``.
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninitialized[byte](0))
|
||||
|
||||
proc `$`*(buffer: Asn1Buffer): string =
|
||||
## Return string representation of ``buffer``.
|
||||
|
||||
@@ -124,7 +124,7 @@ proc random*[T: RsaKP](
|
||||
length = eko + ((bits + 7) shr 3)
|
||||
|
||||
let res = new T
|
||||
res.buffer = newSeq[byte](length)
|
||||
res.buffer = newSeqUninitialized[byte](length)
|
||||
|
||||
var keygen = rsaKeygenGetDefault()
|
||||
|
||||
@@ -169,7 +169,7 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
|
||||
key.pubk.elen.uint + key.pexplen.uint
|
||||
result = new RsaPrivateKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninitialized[byte](length)
|
||||
let po: uint = 0
|
||||
let qo = po + key.seck.plen
|
||||
let dpo = qo + key.seck.qlen
|
||||
@@ -207,7 +207,7 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
if len(key.buffer) > 0:
|
||||
let length = key.key.nlen + key.key.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninitialized[byte](length)
|
||||
let no = 0
|
||||
let eo = no + key.key.nlen
|
||||
copyMem(addr result.buffer[no], key.key.n, key.key.nlen)
|
||||
@@ -226,7 +226,7 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
|
||||
doAssert(not isNil(key))
|
||||
let length = key.pubk.nlen + key.pubk.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninitialized[byte](length)
|
||||
result.key.n = addr result.buffer[0]
|
||||
result.key.e = addr result.buffer[key.pubk.nlen]
|
||||
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
|
||||
@@ -357,7 +357,7 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninitialized[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -370,7 +370,7 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninitialized[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -382,7 +382,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
|
||||
## Serialize RSA signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(RsaSignatureError)
|
||||
var res = newSeq[byte](4096)
|
||||
var res = newSeqUninitialized[byte](4096)
|
||||
let length = ?sig.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
@@ -753,7 +753,7 @@ proc sign*[T: byte | char](
|
||||
var hash: array[32, byte]
|
||||
let impl = rsaPkcs1SignGetDefault()
|
||||
var res = new RsaSignature
|
||||
res.buffer = newSeq[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
res.buffer = newSeqUninitialized[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
|
||||
@@ -182,7 +182,7 @@ proc getBytes*(key: SkPublicKey): seq[byte] {.inline.} =
|
||||
|
||||
proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
|
||||
## Serialize Secp256k1 `signature` and return it.
|
||||
result = newSeq[byte](72)
|
||||
result = newSeqUninitialized[byte](72)
|
||||
let length = toBytes(sig, result)
|
||||
result.setLen(length)
|
||||
|
||||
|
||||
@@ -496,7 +496,7 @@ proc recvMessage(
|
||||
size: uint
|
||||
length: int
|
||||
res: VarintResult[void]
|
||||
var buffer = newSeq[byte](10)
|
||||
var buffer = newSeqUninitialized[byte](10)
|
||||
try:
|
||||
for i in 0 ..< len(buffer):
|
||||
await conn.readExactly(addr buffer[i], 1)
|
||||
@@ -957,8 +957,7 @@ proc openStream*(
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.STREAMINFO.int, res).isOk():
|
||||
let resPb = initProtoBuffer(res)
|
||||
# stream.peer = newSeq[byte]()
|
||||
var raddress = newSeq[byte]()
|
||||
var raddress = newSeqUninitialized[byte](0)
|
||||
stream.protocol = ""
|
||||
resPb.getRequiredField(1, stream.peer).tryGet()
|
||||
resPb.getRequiredField(2, raddress).tryGet()
|
||||
@@ -977,7 +976,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
var message = await transp.recvMessage()
|
||||
var pb = initProtoBuffer(message)
|
||||
var stream = new P2PStream
|
||||
var raddress = newSeq[byte]()
|
||||
var raddress = newSeqUninitialized[byte](0)
|
||||
stream.protocol = ""
|
||||
pb.getRequiredField(1, stream.peer).tryGet()
|
||||
pb.getRequiredField(2, raddress).tryGet()
|
||||
@@ -1116,7 +1115,7 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError
|
||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
||||
|
||||
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte] {.raises: [DaemonLocalError].} =
|
||||
result = newSeq[byte]()
|
||||
result = newSeqUninitialized[byte](0)
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
@@ -1453,8 +1452,8 @@ proc pubsubPublish*(
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
|
||||
result.data = newSeq[byte]()
|
||||
result.seqno = newSeq[byte]()
|
||||
result.data = newSeqUninitialized[byte](0)
|
||||
result.seqno = newSeqUninitialized[byte](0)
|
||||
discard pb.getField(1, result.peer)
|
||||
discard pb.getField(2, result.data)
|
||||
discard pb.getField(3, result.seqno)
|
||||
|
||||
@@ -223,7 +223,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
|
||||
|
||||
proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
## P2P address bufferToString() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninitialized[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -232,7 +232,7 @@ proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
|
||||
proc p2pVB(vb: var VBuffer): bool =
|
||||
## P2P address validateBuffer() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninitialized[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -555,7 +555,7 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
##
|
||||
## If current MultiAddress do not have argument value, then result array will
|
||||
## be empty.
|
||||
var buffer = newSeq[byte](len(ma.data.buffer))
|
||||
var buffer = newSeqUninitialized[byte](len(ma.data.buffer))
|
||||
let res = ?protoArgument(ma, buffer)
|
||||
buffer.setLen(res)
|
||||
ok(buffer)
|
||||
@@ -569,7 +569,7 @@ proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
|
||||
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninitialized[byte](0)
|
||||
var offset = 0
|
||||
var vb = ma
|
||||
var res: MultiAddress
|
||||
@@ -643,7 +643,7 @@ proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
|
||||
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||
## Iterates over all addresses inside of MultiAddress ``ma``.
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninitialized[byte](0)
|
||||
var vb = ma
|
||||
while true:
|
||||
if vb.data.isEmpty():
|
||||
|
||||
@@ -533,7 +533,7 @@ proc decode*(
|
||||
let empty: seq[byte] = @[]
|
||||
ok(empty) # empty
|
||||
else:
|
||||
var buffer = newSeq[byte](mb.decl(length - 1))
|
||||
var buffer = newSeqUninitialized[byte](mb.decl(length - 1))
|
||||
var outlen = 0
|
||||
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
|
||||
if res != MultiBaseStatus.Success:
|
||||
|
||||
@@ -11,8 +11,7 @@
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import
|
||||
./coder, ../muxer, ../../stream/[bufferstream, connection, streamseq], ../../peerinfo
|
||||
import ./coder, ../muxer, ../../stream/[bufferstream, connection], ../../peerinfo
|
||||
|
||||
export connection
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
import ../muxer, ../../stream/connection
|
||||
import ../../utils/zeroqueue
|
||||
|
||||
export muxer
|
||||
|
||||
@@ -151,7 +152,7 @@ type
|
||||
opened: bool
|
||||
isSending: bool
|
||||
sendQueue: seq[ToSend]
|
||||
recvQueue: seq[byte]
|
||||
recvQueue: ZeroQueue
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: AsyncEvent
|
||||
@@ -229,7 +230,6 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal and not channel.isSending:
|
||||
@@ -257,7 +257,7 @@ proc updateRecvWindow(
|
||||
return
|
||||
|
||||
let delta = channel.maxRecvWindow - inWindow
|
||||
channel.recvWindow.inc(delta)
|
||||
channel.recvWindow.inc(delta.int)
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
@@ -279,7 +279,7 @@ method readOnce*(
|
||||
newLPStreamConnDownError()
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
if channel.recvQueue.isEmpty():
|
||||
channel.receivedData.clear()
|
||||
let
|
||||
closedRemotelyFut = channel.closedRemotely.wait()
|
||||
@@ -290,28 +290,23 @@ method readOnce*(
|
||||
if not receivedDataFut.finished():
|
||||
await receivedDataFut.cancelAndWait()
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.len == 0:
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
|
||||
channel.isEof = true
|
||||
return
|
||||
0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0 ..< toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead ..^ 1]
|
||||
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
await channel.updateRecvWindow()
|
||||
channel.activity = true
|
||||
return toRead
|
||||
return consumed
|
||||
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel, b: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.recvQueue.push(b)
|
||||
channel.receivedData.fire()
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
|
||||
@@ -517,6 +512,7 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
||||
channel.sendQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
channel.closedLocally = true
|
||||
channel.isReset = true
|
||||
channel.opened = false
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
|
||||
@@ -37,18 +37,18 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
||||
let dataLen = requestStream.getPosition()
|
||||
requestStream.setPosition(0)
|
||||
|
||||
var buf = newSeq[byte](dataLen)
|
||||
var buf = newSeqUninitialized[byte](dataLen)
|
||||
discard requestStream.readData(addr buf[0], dataLen)
|
||||
buf
|
||||
except IOError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninitialized[byte](0)
|
||||
except OSError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninitialized[byte](0)
|
||||
except ValueError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
newSeqUninitialized[byte](0)
|
||||
|
||||
proc getDnsResponse(
|
||||
dnsServer: TransportAddress, address: string, kind: QKind
|
||||
|
||||
@@ -142,7 +142,7 @@ func init*(pid: var PeerId, data: string): bool =
|
||||
## Initialize peer id from base58 encoded string representation.
|
||||
##
|
||||
## Returns ``true`` if peer was successfully initialiazed.
|
||||
var p = newSeq[byte](len(data) + 4)
|
||||
var p = newSeqUninitialized[byte](len(data) + 4)
|
||||
var length = 0
|
||||
if Base58.decode(data, p, length) == Base58Status.Success:
|
||||
p.setLen(length)
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import base64, json, strutils, uri, times
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
|
||||
import base64, json, strutils, uri, times, stew/byteutils
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles
|
||||
import ../peerinfo, ../crypto/crypto, ../varint.nim
|
||||
|
||||
logScope:
|
||||
@@ -22,6 +22,8 @@ const
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
export Domain
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
@@ -55,7 +57,9 @@ type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
|
||||
proc new*(
|
||||
T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext = newRng()
|
||||
): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
@@ -90,12 +94,12 @@ proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
|
||||
proc genDataToSign(
|
||||
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
|
||||
): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
var buf: seq[byte] = prefix.toByteSeq()
|
||||
var buf: seq[byte] = prefix.toBytes()
|
||||
for p in parts:
|
||||
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
|
||||
raise newException(PeerIDAuthError, "could not encode fields length to varint")
|
||||
buf.add varintLen
|
||||
buf.add (p.k & "=").toByteSeq()
|
||||
buf.add (p.k & "=").toBytes()
|
||||
buf.add p.v
|
||||
return buf
|
||||
|
||||
@@ -104,15 +108,15 @@ proc getSigParams(
|
||||
): seq[SigParam] =
|
||||
if clientSender:
|
||||
@[
|
||||
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
SigParam(k: "challenge-client", v: challenge.toBytes()),
|
||||
SigParam(k: "hostname", v: hostname.toBytes()),
|
||||
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
|
||||
]
|
||||
else:
|
||||
@[
|
||||
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
|
||||
SigParam(k: "challenge-server", v: challenge.toBytes()),
|
||||
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
SigParam(k: "hostname", v: hostname.toBytes()),
|
||||
]
|
||||
|
||||
proc sign(
|
||||
@@ -139,7 +143,7 @@ proc checkSignature*(
|
||||
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
|
||||
var serverSignature: Signature
|
||||
try:
|
||||
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
|
||||
if not serverSignature.init(base64.decode(serverSig).toBytes()):
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
|
||||
)
|
||||
@@ -151,12 +155,12 @@ proc checkSignature*(
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
self: PeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
uri,
|
||||
$uri,
|
||||
body = payload,
|
||||
headers = [
|
||||
("Content-Type", "application/json"),
|
||||
@@ -174,9 +178,15 @@ method post*(
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.
|
||||
async: (raises: [PeerIDAuthError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
if self.session.isNil():
|
||||
raise newException(PeerIDAuthError, "Session is nil")
|
||||
let req = HttpClientRequestRef.get(self.session, $uri).valueOr:
|
||||
raise newException(PeerIDAuthError, "Could not get request obj")
|
||||
let rawResponse = await req.send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
@@ -190,7 +200,7 @@ proc requestAuthentication*(
|
||||
.} =
|
||||
let response =
|
||||
try:
|
||||
await self.get($uri)
|
||||
await self.get(uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
@@ -200,7 +210,7 @@ proc requestAuthentication*(
|
||||
|
||||
let serverPubkey: PublicKey =
|
||||
try:
|
||||
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
|
||||
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toBytes()).valueOr:
|
||||
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
|
||||
@@ -248,7 +258,7 @@ proc requestAuthorization*(
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
@@ -303,12 +313,12 @@ proc sendWithBearer(
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
|
||||
if bearer.expires.isSome() and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
@@ -320,14 +330,14 @@ proc send*(
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken = BearerToken(),
|
||||
bearer: Opt[BearerToken] = Opt.none(BearerToken),
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.token == "":
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
if bearer.isSome():
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer.get)
|
||||
else:
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer)
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
|
||||
@@ -9,10 +9,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto
|
||||
|
||||
import ./client
|
||||
import ../crypto/crypto, ./client
|
||||
|
||||
export client
|
||||
|
||||
@@ -27,14 +26,14 @@ proc new*(
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
self: MockPeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: string
|
||||
self: MockPeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
|
||||
@@ -142,18 +142,17 @@ proc initProtoBuffer*(
|
||||
result.options = options
|
||||
|
||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
||||
result.buffer = newSeq[byte]()
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``
|
||||
result.options = options
|
||||
if WithVarintLength in options:
|
||||
# Our buffer will start from position 10, so we can store length of buffer
|
||||
# in [0, 9].
|
||||
result.buffer.setLen(10)
|
||||
result.buffer = newSeqUninitialized[byte](10)
|
||||
result.offset = 10
|
||||
elif {WithUint32LeLength, WithUint32BeLength} * options != {}:
|
||||
# Our buffer will start from position 4, so we can store length of buffer
|
||||
# in [0, 3].
|
||||
result.buffer.setLen(4)
|
||||
result.buffer = newSeqUninitialized[byte](4)
|
||||
result.offset = 4
|
||||
|
||||
proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
|
||||
|
||||
@@ -112,7 +112,7 @@ proc randomKeyInBucketRange*(
|
||||
let totalBits = raw.len * 8
|
||||
let lsbStart = bucketIndex + 1
|
||||
let lsbBytes = (totalBits - lsbStart + 7) div 8
|
||||
var randomBuf = newSeq[byte](lsbBytes)
|
||||
var randomBuf = newSeqUninitialized[byte](lsbBytes)
|
||||
hmacDrbgGenerate(rng[], randomBuf)
|
||||
|
||||
for i in lsbStart ..< totalBits:
|
||||
|
||||
@@ -192,7 +192,10 @@ method init*(f: FloodSub) =
|
||||
f.codec = FloodSubCodec
|
||||
|
||||
method publish*(
|
||||
f: FloodSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
f: FloodSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(f).publish(topic, data)
|
||||
|
||||
@@ -775,7 +775,10 @@ proc makePeersForPublishDefault(
|
||||
return peers
|
||||
|
||||
method publish*(
|
||||
g: GossipSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
g: GossipSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
@@ -789,8 +792,10 @@ method publish*(
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
let pubParams = publishParams.get(PublishParams())
|
||||
|
||||
let peers =
|
||||
if useCustomConn:
|
||||
if pubParams.useCustomConn:
|
||||
g.makePeersForPublishUsingCustomConn(topic)
|
||||
else:
|
||||
g.makePeersForPublishDefault(topic, data)
|
||||
@@ -828,7 +833,8 @@ method publish*(
|
||||
trace "Dropping already-seen message"
|
||||
return 0
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
if not pubParams.skipMCache:
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
|
||||
g.sendIDontWant(msg, msgId, peers)
|
||||
@@ -837,7 +843,7 @@ method publish*(
|
||||
peers,
|
||||
RPCMsg(messages: @[msg]),
|
||||
isHighPriority = true,
|
||||
useCustomConn = useCustomConn,
|
||||
useCustomConn = pubParams.useCustomConn,
|
||||
)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
|
||||
@@ -145,6 +145,10 @@ type
|
||||
## we have to store it, which may be an attack vector.
|
||||
## This callback can be used to reject topic we're not interested in
|
||||
|
||||
PublishParams* = object
|
||||
useCustomConn*: bool
|
||||
skipMCache*: bool
|
||||
|
||||
PubSub* {.public.} = ref object of LPProtocol
|
||||
switch*: Switch # the switch used to dial/connect to peers
|
||||
peerInfo*: PeerInfo # this peer's info
|
||||
@@ -570,7 +574,10 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
|
||||
p.updateTopicMetrics(topic)
|
||||
|
||||
method publish*(
|
||||
p: PubSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
p: PubSub,
|
||||
topic: string,
|
||||
data: seq[byte],
|
||||
publishParams: Option[PublishParams] = none(PublishParams),
|
||||
): Future[int] {.base, async: (raises: []), public.} =
|
||||
## publish to a ``topic``
|
||||
##
|
||||
|
||||
@@ -221,7 +221,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
|
||||
conn, peer = p, closed = conn.closed, data = data.shortLog
|
||||
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
data = newSeqUninitialized[byte](0) # Release memory
|
||||
except PeerRateLimitError as exc:
|
||||
debug "Peer rate limit exceeded, exiting read while",
|
||||
conn, peer = p, description = exc.msg
|
||||
|
||||
@@ -15,11 +15,12 @@ import chronicles
|
||||
import bearssl/[rand, hash]
|
||||
import stew/[endians2, byteutils]
|
||||
import nimcrypto/[utils, sha2, hmac]
|
||||
import ../../stream/[connection, streamseq]
|
||||
import ../../stream/[connection]
|
||||
import ../../peerid
|
||||
import ../../peerinfo
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../utility
|
||||
import ../../utils/bytesview
|
||||
|
||||
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
||||
|
||||
@@ -237,13 +238,14 @@ template write_e(): untyped =
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR().
|
||||
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
hs.e = genKeyPair(p.rng[])
|
||||
msg.add hs.e.publicKey
|
||||
hs.ss.mixHash(hs.e.publicKey)
|
||||
|
||||
hs.e.publicKey.getBytes
|
||||
|
||||
template write_s(): untyped =
|
||||
trace "noise write s"
|
||||
# Appends EncryptAndHash(s.public_key) to the buffer.
|
||||
msg.add hs.ss.encryptAndHash(hs.s.publicKey)
|
||||
hs.ss.encryptAndHash(hs.s.publicKey)
|
||||
|
||||
template dh_ee(): untyped =
|
||||
trace "noise dh ee"
|
||||
@@ -281,9 +283,10 @@ template read_e(): untyped =
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message.
|
||||
# Calls MixHash(re.public_key).
|
||||
hs.re[0 .. Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
|
||||
msg.consume(Curve25519Key.len)
|
||||
hs.ss.mixHash(hs.re)
|
||||
|
||||
Curve25519Key.len
|
||||
|
||||
template read_s(): untyped =
|
||||
trace "noise read s", size = msg.len
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
|
||||
@@ -300,7 +303,7 @@ template read_s(): untyped =
|
||||
Curve25519Key.len
|
||||
hs.rs[0 .. Curve25519Key.high] = hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
|
||||
|
||||
msg.consume(rsLen)
|
||||
rsLen
|
||||
|
||||
proc readFrame(
|
||||
sconn: Connection
|
||||
@@ -316,28 +319,25 @@ proc readFrame(
|
||||
await sconn.readExactly(addr buffer[0], buffer.len)
|
||||
return buffer
|
||||
|
||||
proc writeFrame(
|
||||
sconn: Connection, buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
doAssert buf.len <= uint16.high.int
|
||||
var
|
||||
lesize = buf.len.uint16
|
||||
besize = lesize.toBytesBE
|
||||
outbuf = newSeqOfCap[byte](besize.len + buf.len)
|
||||
trace "writeFrame", sconn, size = lesize, data = shortLog(buf)
|
||||
outbuf &= besize
|
||||
outbuf &= buf
|
||||
sconn.write(outbuf)
|
||||
|
||||
proc receiveHSMessage(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
readFrame(sconn)
|
||||
|
||||
proc sendHSMessage(
|
||||
sconn: Connection, buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
writeFrame(sconn, buf)
|
||||
template sendHSMessage(sconn: Connection, parts: varargs[seq[byte]]): untyped =
|
||||
# sends message (message frame) using multiple seq[byte] that
|
||||
# concatenated represent entire mesage.
|
||||
|
||||
var msgSize: int
|
||||
for p in parts:
|
||||
msgSize += p.len
|
||||
|
||||
trace "sendHSMessage", sconn, size = msgSize
|
||||
doAssert msgSize <= uint16.high.int
|
||||
|
||||
await sconn.write(@(msgSize.uint16.toBytesBE))
|
||||
for p in parts:
|
||||
await sconn.write(p)
|
||||
|
||||
proc handshakeXXOutbound(
|
||||
p: Noise, conn: Connection, p2pSecret: seq[byte]
|
||||
@@ -348,38 +348,30 @@ proc handshakeXXOutbound(
|
||||
try:
|
||||
hs.ss.mixHash(p.commonPrologue)
|
||||
hs.s = p.noiseKeys
|
||||
var remoteP2psecret: seq[byte]
|
||||
|
||||
# -> e
|
||||
var msg: StreamSeq
|
||||
block: # -> e
|
||||
let ebytes = write_e()
|
||||
# IK might use this btw!
|
||||
let hbytes = hs.ss.encryptAndHash([])
|
||||
|
||||
write_e()
|
||||
conn.sendHSMessage(ebytes, hbytes)
|
||||
|
||||
# IK might use this btw!
|
||||
msg.add hs.ss.encryptAndHash([])
|
||||
block: # <- e, ee, s, es
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_e())
|
||||
dh_ee()
|
||||
msg.consume(read_s())
|
||||
dh_es()
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
block: # -> s, se
|
||||
let sbytes = write_s()
|
||||
dh_se()
|
||||
# last payload must follow the encrypted way of sending
|
||||
let hbytes = hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
# <- e, ee, s, es
|
||||
|
||||
msg.assign(await conn.receiveHSMessage())
|
||||
|
||||
read_e()
|
||||
dh_ee()
|
||||
read_s()
|
||||
dh_es()
|
||||
|
||||
let remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
msg.clear()
|
||||
|
||||
# -> s, se
|
||||
|
||||
write_s()
|
||||
dh_se()
|
||||
|
||||
# last payload must follow the encrypted way of sending
|
||||
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
conn.sendHSMessage(sbytes, hbytes)
|
||||
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return
|
||||
@@ -397,41 +389,30 @@ proc handshakeXXInbound(
|
||||
try:
|
||||
hs.ss.mixHash(p.commonPrologue)
|
||||
hs.s = p.noiseKeys
|
||||
var remoteP2psecret: seq[byte]
|
||||
|
||||
# -> e
|
||||
block: # <- e
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_e())
|
||||
# we might use this early data one day, keeping it here for clarity
|
||||
let earlyData {.used.} = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
var msg: StreamSeq
|
||||
msg.add(await conn.receiveHSMessage())
|
||||
block: # -> e, ee, s, es
|
||||
let ebytes = write_e()
|
||||
dh_ee()
|
||||
let sbytes = write_s()
|
||||
dh_es()
|
||||
let hbytes = hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
read_e()
|
||||
conn.sendHSMessage(ebytes, sbytes, hbytes)
|
||||
|
||||
# we might use this early data one day, keeping it here for clarity
|
||||
let earlyData {.used.} = hs.ss.decryptAndHash(msg.data)
|
||||
block: # <- s, se
|
||||
var msg = BytesView.init(await conn.receiveHSMessage())
|
||||
msg.consume(read_s())
|
||||
dh_se()
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data())
|
||||
|
||||
# <- e, ee, s, es
|
||||
|
||||
msg.consume(msg.len)
|
||||
|
||||
write_e()
|
||||
dh_ee()
|
||||
write_s()
|
||||
dh_es()
|
||||
|
||||
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||
|
||||
await conn.sendHSMessage(msg.data)
|
||||
msg.clear()
|
||||
|
||||
# -> s, se
|
||||
|
||||
msg.add(await conn.receiveHSMessage())
|
||||
|
||||
read_s()
|
||||
dh_se()
|
||||
|
||||
let
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
(cs1, cs2) = hs.ss.split()
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return
|
||||
HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
|
||||
@@ -15,7 +15,7 @@ import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../protocol,
|
||||
../../stream/streamseq,
|
||||
../../utils/zeroqueue,
|
||||
../../stream/connection,
|
||||
../../multiaddress,
|
||||
../../peerinfo
|
||||
@@ -32,7 +32,7 @@ type
|
||||
|
||||
SecureConn* = ref object of Connection
|
||||
stream*: Connection
|
||||
buf: StreamSeq
|
||||
buf: ZeroQueue
|
||||
|
||||
func shortLog*(conn: SecureConn): auto =
|
||||
try:
|
||||
@@ -174,11 +174,11 @@ method readOnce*(
|
||||
if s.isEof:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
if s.buf.data().len() == 0:
|
||||
if s.buf.isEmpty:
|
||||
try:
|
||||
let buf = await s.readMessage() # Always returns >0 bytes or raises
|
||||
s.activity = true
|
||||
s.buf.add(buf)
|
||||
s.buf.push(buf)
|
||||
except LPStreamEOFError as err:
|
||||
s.isEof = true
|
||||
await s.close()
|
||||
@@ -191,5 +191,4 @@ method readOnce*(
|
||||
await s.close()
|
||||
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
return s.buf.consumeTo(pbytes, nbytes)
|
||||
|
||||
@@ -10,10 +10,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import stew/byteutils
|
||||
import chronos, chronicles, metrics
|
||||
import ../stream/connection
|
||||
import ./streamseq
|
||||
import ../utils/zeroqueue
|
||||
|
||||
export connection
|
||||
|
||||
@@ -24,7 +23,7 @@ const BufferStreamTrackerName* = "BufferStream"
|
||||
|
||||
type BufferStream* = ref object of Connection
|
||||
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
|
||||
readBuf*: StreamSeq # overflow buffer for readOnce
|
||||
readBuf: ZeroQueue # zero queue buffer for readOnce
|
||||
pushing*: bool # number of ongoing push operations
|
||||
reading*: bool # is there an ongoing read? (only allow one)
|
||||
pushedEof*: bool # eof marker has been put on readQueue
|
||||
@@ -43,7 +42,7 @@ chronicles.formatIt(BufferStream):
|
||||
shortLog(it)
|
||||
|
||||
proc len*(s: BufferStream): int =
|
||||
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
|
||||
s.readBuf.len.int + (if s.readQueue.len > 0: s.readQueue[0].len()
|
||||
else: 0)
|
||||
|
||||
method initStream*(s: BufferStream) =
|
||||
@@ -62,7 +61,7 @@ proc new*(T: typedesc[BufferStream], timeout: Duration = DefaultConnectionTimeou
|
||||
bufferStream
|
||||
|
||||
method pushData*(
|
||||
s: BufferStream, data: seq[byte]
|
||||
s: BufferStream, data: sink seq[byte]
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Write bytes to internal read buffer, use this to fill up the
|
||||
## buffer with data.
|
||||
@@ -107,7 +106,7 @@ method pushEof*(
|
||||
s.pushing = false
|
||||
|
||||
method atEof*(s: BufferStream): bool =
|
||||
s.isEof and s.readBuf.len == 0
|
||||
s.isEof and s.readBuf.isEmpty
|
||||
|
||||
method readOnce*(
|
||||
s: BufferStream, pbytes: pointer, nbytes: int
|
||||
@@ -118,20 +117,12 @@ method readOnce*(
|
||||
if s.returnedEof:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
|
||||
# First consume leftovers from previous read
|
||||
var rbytes = s.readBuf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
|
||||
if rbytes < nbytes and not s.isEof:
|
||||
# There's space in the buffer - consume some data from the read queue
|
||||
s.reading = true
|
||||
if not s.isEof and s.readBuf.len < nbytes:
|
||||
let buf =
|
||||
try:
|
||||
s.reading = true
|
||||
await s.readQueue.popFirst()
|
||||
except CancelledError as exc:
|
||||
# Not very efficient, but shouldn't happen often
|
||||
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
|
||||
raise exc
|
||||
finally:
|
||||
s.reading = false
|
||||
@@ -141,28 +132,18 @@ method readOnce*(
|
||||
trace "EOF", s
|
||||
s.isEof = true
|
||||
else:
|
||||
let remaining = min(buf.len, nbytes - rbytes)
|
||||
toOpenArray(p, rbytes, nbytes - 1)[0 ..< remaining] =
|
||||
buf.toOpenArray(0, remaining - 1)
|
||||
rbytes += remaining
|
||||
|
||||
if remaining < buf.len:
|
||||
trace "add leftovers", s, len = buf.len - remaining
|
||||
s.readBuf.add(buf.toOpenArray(remaining, buf.high))
|
||||
|
||||
if s.isEof and s.readBuf.len() == 0:
|
||||
# We can clear the readBuf memory since it won't be used any more
|
||||
s.readBuf = StreamSeq()
|
||||
s.readBuf.push(buf)
|
||||
|
||||
let consumed = s.readBuf.consumeTo(pbytes, nbytes)
|
||||
s.activity = true
|
||||
|
||||
# We want to return 0 exactly once - after that, we'll start raising instead -
|
||||
# this is a bit nuts in a mixed exception / return value world, but allows the
|
||||
# consumer of the stream to rely on the 0-byte read as a "regular" EOF marker
|
||||
# (instead of _sometimes_ getting an exception).
|
||||
s.returnedEof = rbytes == 0
|
||||
s.returnedEof = consumed == 0
|
||||
|
||||
return rbytes
|
||||
return consumed
|
||||
|
||||
method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
|
||||
## close the stream and clear the buffer
|
||||
@@ -171,7 +152,6 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
|
||||
# First, make sure any new calls to `readOnce` and `pushData` etc will fail -
|
||||
# there may already be such calls in the event queue however
|
||||
s.isEof = true
|
||||
s.readBuf = StreamSeq()
|
||||
s.pushedEof = true
|
||||
|
||||
# Essentially we need to handle the following cases
|
||||
|
||||
@@ -358,7 +358,9 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
|
||||
for fut in startFuts:
|
||||
if fut.failed:
|
||||
await s.stop()
|
||||
raise newException(LPError, "starting transports failed", fut.error)
|
||||
raise newException(
|
||||
LPError, "starting transports failed: " & $fut.error.msg, fut.error
|
||||
)
|
||||
|
||||
for t in s.transports: # for each transport
|
||||
if t.addrs.len > 0 or t.running:
|
||||
|
||||
@@ -98,7 +98,7 @@ func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
|
||||
##
|
||||
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
|
||||
let prefixLen = P2P_SIGNING_PREFIX.len.int
|
||||
let msg = newSeq[byte](prefixLen + pubKey.len)
|
||||
let msg = newSeqUninitialized[byte](prefixLen + pubKey.len)
|
||||
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
|
||||
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import
|
||||
transport,
|
||||
tcptransport,
|
||||
../switch,
|
||||
../autotls/service,
|
||||
../builders,
|
||||
../stream/[lpstream, connection, chronosstream],
|
||||
../multiaddress,
|
||||
@@ -303,7 +304,7 @@ proc new*(
|
||||
flags: set[ServerFlags] = {},
|
||||
): TorSwitch {.raises: [LPError], public.} =
|
||||
var builder = SwitchBuilder.new().withRng(rng).withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
TorTransport.new(torServer, flags, upgr)
|
||||
)
|
||||
if addresses.len != 0:
|
||||
|
||||
28
libp2p/utils/bytesview.nim
Normal file
28
libp2p/utils/bytesview.nim
Normal file
@@ -0,0 +1,28 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
type BytesView* = object
|
||||
data: seq[byte]
|
||||
rpos: int
|
||||
|
||||
proc init*(t: typedesc[BytesView], data: sink seq[byte]): BytesView =
|
||||
BytesView(data: data, rpos: 0)
|
||||
|
||||
func len*(v: BytesView): int {.inline.} =
|
||||
v.data.len - v.rpos
|
||||
|
||||
func consume*(v: var BytesView, n: int) {.inline.} =
|
||||
doAssert v.data.len >= v.rpos + n
|
||||
v.rpos += n
|
||||
|
||||
template toOpenArray*(v: BytesView, b, e: int): openArray[byte] =
|
||||
v.data.toOpenArray(v.rpos + b, v.rpos + e - b)
|
||||
|
||||
template data*(v: BytesView): openArray[byte] =
|
||||
v.data.toOpenArray(v.rpos, v.data.len - 1)
|
||||
84
libp2p/utils/zeroqueue.nim
Normal file
84
libp2p/utils/zeroqueue.nim
Normal file
@@ -0,0 +1,84 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import std/deques
|
||||
|
||||
type Chunk = ref object
|
||||
data: seq[byte]
|
||||
size: int
|
||||
start: int
|
||||
|
||||
template clone(c: Chunk): Chunk =
|
||||
Chunk(data: c.data, size: c.size, start: c.start)
|
||||
|
||||
template newChunk(b: sink seq[byte]): Chunk =
|
||||
Chunk(data: b, size: b.len, start: 0)
|
||||
|
||||
template len(c: Chunk): int =
|
||||
c.size - c.start
|
||||
|
||||
type ZeroQueue* = object
|
||||
# ZeroQueue is queue structure optimized for efficient pushing and popping of
|
||||
# byte sequences `seq[byte]` (called chunks). This type is useful for streaming or buffering
|
||||
# scenarios where chunks of binary data are accumulated and consumed incrementally.
|
||||
chunks: Deque[Chunk]
|
||||
|
||||
proc clear*(q: var ZeroQueue) =
|
||||
q.chunks.clear()
|
||||
|
||||
proc isEmpty*(q: ZeroQueue): bool =
|
||||
return q.chunks.len() == 0
|
||||
|
||||
proc len*(q: ZeroQueue): int64 =
|
||||
var l: int64
|
||||
for b in q.chunks.items():
|
||||
l += b.len()
|
||||
return l
|
||||
|
||||
proc push*(q: var ZeroQueue, b: sink seq[byte]) =
|
||||
if b.len > 0:
|
||||
q.chunks.addLast(newChunk(b))
|
||||
|
||||
proc popChunk(q: var ZeroQueue, count: int): Chunk {.inline.} =
|
||||
var first = q.chunks.popFirst()
|
||||
|
||||
# first chunk has up to requested count elements,
|
||||
# queue will return this chunk (chunk might have less then requested)
|
||||
if first.len() <= count:
|
||||
return first
|
||||
|
||||
# first chunk has more elements then requested count,
|
||||
# queue will return view of first count elements, leaving the rest in the queue
|
||||
var ret = first.clone()
|
||||
ret.size = ret.start + count
|
||||
first.start += count
|
||||
q.chunks.addFirst(first)
|
||||
return ret
|
||||
|
||||
proc consumeTo*(q: var ZeroQueue, pbytes: pointer, nbytes: int): int =
|
||||
var consumed = 0
|
||||
while consumed < nbytes and not q.isEmpty():
|
||||
let chunk = q.popChunk(nbytes - consumed)
|
||||
let dest = cast[pointer](cast[ByteAddress](pbytes) + consumed)
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest, offsetPtr, chunk.len())
|
||||
consumed += chunk.len()
|
||||
|
||||
return consumed
|
||||
|
||||
proc popChunkSeq*(q: var ZeroQueue, count: int): seq[byte] =
|
||||
if q.isEmpty:
|
||||
return @[]
|
||||
|
||||
let chunk = q.popChunk(count)
|
||||
var dest = newSeqUninitialized[byte](chunk.len())
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest[0].addr, offsetPtr, chunk.len())
|
||||
|
||||
return dest
|
||||
@@ -1,13 +1,17 @@
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import helpers
|
||||
import ../libp2p
|
||||
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
||||
import
|
||||
../libp2p/
|
||||
[autotls/service, daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
||||
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
|
||||
|
||||
type
|
||||
SwitchCreator = proc(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov: TransportProvider = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.gcsafe, raises: [LPError].}
|
||||
@@ -76,7 +80,7 @@ proc testPubSubDaemonPublish(
|
||||
|
||||
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
|
||||
|
||||
proc pubsubHandler(
|
||||
@@ -86,12 +90,12 @@ proc testPubSubDaemonPublish(
|
||||
|
||||
asyncDiscard daemonNode.pubsubSubscribe(testTopic, pubsubHandler)
|
||||
pubsub.subscribe(testTopic, nativeHandler)
|
||||
await sleepAsync(5.seconds)
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
proc publisher() {.async.} =
|
||||
while not finished:
|
||||
await daemonNode.pubsubPublish(testTopic, msgData)
|
||||
await sleepAsync(500.millis)
|
||||
await sleepAsync(250.millis)
|
||||
|
||||
await wait(publisher(), 5.minutes) # should be plenty of time
|
||||
|
||||
@@ -128,7 +132,7 @@ proc testPubSubNodePublish(
|
||||
|
||||
await nativeNode.connect(daemonPeer.peer, daemonPeer.addresses)
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
await daemonNode.connect(nativePeer.peerId, nativePeer.addrs)
|
||||
|
||||
var times = 0
|
||||
@@ -148,12 +152,12 @@ proc testPubSubNodePublish(
|
||||
discard
|
||||
|
||||
pubsub.subscribe(testTopic, nativeHandler)
|
||||
await sleepAsync(5.seconds)
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
proc publisher() {.async.} =
|
||||
while not finished:
|
||||
discard await pubsub.publish(testTopic, msgData)
|
||||
await sleepAsync(500.millis)
|
||||
await sleepAsync(250.millis)
|
||||
|
||||
await wait(publisher(), 5.minutes) # should be plenty of time
|
||||
|
||||
@@ -206,7 +210,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "native -> daemon connection":
|
||||
var protos = @["/test-stream"]
|
||||
@@ -288,7 +292,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await stream.close()
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "native -> daemon websocket connection":
|
||||
var protos = @["/test-stream"]
|
||||
@@ -318,7 +322,9 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
|
||||
let nativeNode = swCreator(
|
||||
ma = wsAddress,
|
||||
prov = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
WsTransport.new(upgr),
|
||||
)
|
||||
|
||||
@@ -337,7 +343,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
|
||||
await stream.close()
|
||||
await nativeNode.stop()
|
||||
await daemonNode.close()
|
||||
await sleepAsync(1.seconds)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
asyncTest "daemon -> native websocket connection":
|
||||
var protos = @["/test-stream"]
|
||||
|
||||
@@ -386,38 +386,3 @@ suite "GossipSub Integration - Control Messages":
|
||||
# Then IDONTWANT is sent to B on publish
|
||||
checkUntilTimeout:
|
||||
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
|
||||
|
||||
asyncTest "IDONTWANT is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
|
||||
let
|
||||
topic = "foobar"
|
||||
nodeA = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeB = generateNodes(1, gossip = true).toGossipSub()[0]
|
||||
nodeC = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_11)
|
||||
.toGossipSub()[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await connectNodes(nodeA, nodeB)
|
||||
await connectNodes(nodeB, nodeC)
|
||||
|
||||
let (bFinished, handlerB) = createCompleteHandler()
|
||||
|
||||
nodeA.subscribe(topic, voidTopicHandler)
|
||||
nodeB.subscribe(topic, handlerB)
|
||||
nodeC.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], topic)
|
||||
|
||||
check:
|
||||
nodeC.mesh.peers(topic) == 1
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodeA.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
discard await bFinished
|
||||
|
||||
# Then B doesn't send IDONTWANT to both A and C (because C.gossipSubVersion == GossipSubCodec_11)
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
|
||||
@@ -37,15 +37,13 @@ suite "GossipSub Integration - Custom Connection Support":
|
||||
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
|
||||
let
|
||||
topic = "test"
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
var
|
||||
customConnCreated = false
|
||||
peerSelectionCalled = false
|
||||
|
||||
GossipSub(nodes[0]).customConnCallbacks = some(
|
||||
nodes[0].customConnCallbacks = some(
|
||||
CustomConnectionCallbacks(
|
||||
customConnCreationCB: proc(
|
||||
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
|
||||
@@ -66,10 +64,12 @@ suite "GossipSub Integration - Custom Connection Support":
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, handler)
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true), 1
|
||||
tryPublish await nodes[0].publish(
|
||||
topic, "hello".toBytes(), publishParams = some(PublishParams(useCustomConn: true))
|
||||
), 1
|
||||
|
||||
check:
|
||||
peerSelectionCalled
|
||||
@@ -78,19 +78,21 @@ suite "GossipSub Integration - Custom Connection Support":
|
||||
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
|
||||
let
|
||||
topic = "test"
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
nodes = generateNodes(2, gossip = true)
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, handler)
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard await nodes[0].publish(topic, "hello".toBytes(), useCustomConn = true)
|
||||
discard await nodes[0].publish(
|
||||
topic,
|
||||
"hello".toBytes(),
|
||||
publishParams = some(PublishParams(useCustomConn: true)),
|
||||
)
|
||||
except Defect:
|
||||
raised = true
|
||||
|
||||
|
||||
@@ -251,25 +251,3 @@ suite "GossipSub Integration - Gossip Protocol":
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
let
|
||||
topic = "foobar"
|
||||
node0 = generateNodes(1, gossip = true)[0]
|
||||
node1 = generateNodes(1, gossip = true, gossipSubVersion = GossipSubCodec_10)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await connectNodes(node0, node1)
|
||||
|
||||
node0.subscribe(topic, voidTopicHandler)
|
||||
node1.subscribe(topic, voidTopicHandler)
|
||||
await waitSubGraph(@[node0, node1], topic)
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault(topic).toSeq[0].codec == GossipSubCodec_10
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import chronicles
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Compatibility":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Protocol negotiation selects highest common version":
|
||||
let
|
||||
node0 = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
codecs = @[GossipSubCodec_12, GossipSubCodec_11, GossipSubCodec_10],
|
||||
# Order from highest to lowest version is required because
|
||||
# multistream protocol negotiation selects the first protocol
|
||||
# in the dialer's list that both peers support
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
node1 = generateNodes(
|
||||
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
node2 =
|
||||
generateNodes(1, gossip = true, codecs = @[GossipSubCodec_10]).toGossipSub()[0]
|
||||
nodes = @[node0, node1, node2]
|
||||
node0PeerId = node0.peerInfo.peerId
|
||||
node1PeerId = node1.peerInfo.peerId
|
||||
node2PeerId = node2.peerInfo.peerId
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
checkUntilTimeout:
|
||||
node0.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_11
|
||||
node0.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
|
||||
|
||||
node1.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_11
|
||||
node1.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
|
||||
|
||||
node2.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_10
|
||||
node2.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_10
|
||||
|
||||
asyncTest "IDONTWANT is sent only for GossipSubCodec_12":
|
||||
# 4 nodes: nodeCenter in the center connected to the rest
|
||||
var nodes = generateNodes(3, gossip = true).toGossipSub()
|
||||
let
|
||||
nodeCenter = nodes[0]
|
||||
nodeSender = nodes[1]
|
||||
nodeCodec12 = nodes[2]
|
||||
nodeCodec11 = generateNodes(
|
||||
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
|
||||
)
|
||||
.toGossipSub()[0]
|
||||
|
||||
nodes &= nodeCodec11
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await connectNodes(nodeCenter, nodeSender)
|
||||
await connectNodes(nodeCenter, nodeCodec12)
|
||||
await connectNodes(nodeCenter, nodeCodec11)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When A sends a message to the topic
|
||||
tryPublish await nodeSender.publish(topic, newSeq[byte](10000)), 1
|
||||
|
||||
# Then nodeCenter sends IDONTWANT only to nodeCodec12 (because nodeCodec11.codec == GossipSubCodec_11)
|
||||
checkUntilTimeout:
|
||||
nodeCodec12.mesh.getOrDefault(topic).toSeq()[0].iDontWants.anyIt(it.len == 1)
|
||||
nodeCodec11.mesh.getOrDefault(topic).toSeq()[0].iDontWants.allIt(it.len == 0)
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import std/[sequtils, strutils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
@@ -18,13 +18,14 @@ import ../../helpers
|
||||
import ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Integration - Scoring":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
@@ -58,7 +59,6 @@ suite "GossipSub Integration - Scoring":
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "Should not rate limit decodable messages below the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
@@ -101,7 +101,6 @@ suite "GossipSub Integration - Scoring":
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
|
||||
asyncTest "Should rate limit undecodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
@@ -140,7 +139,6 @@ suite "GossipSub Integration - Scoring":
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit decodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
@@ -202,7 +200,6 @@ suite "GossipSub Integration - Scoring":
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit invalid messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
@@ -250,7 +247,6 @@ suite "GossipSub Integration - Scoring":
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "DirectPeers: don't kick direct peer with low score":
|
||||
const topic = "foobar"
|
||||
let nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
@@ -280,9 +276,7 @@ suite "GossipSub Integration - Scoring":
|
||||
futResult.isCompleted(true)
|
||||
|
||||
asyncTest "Peers disconnections mechanics":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
const numberOfNodes = 10
|
||||
let nodes =
|
||||
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
|
||||
|
||||
@@ -357,6 +351,8 @@ suite "GossipSub Integration - Scoring":
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
@@ -379,3 +375,161 @@ suite "GossipSub Integration - Scoring":
|
||||
check:
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
|
||||
asyncTest "Nodes publishing invalid messages are penalised and disconnected":
|
||||
# Given GossipSub nodes with Topic Params
|
||||
const numberOfNodes = 3
|
||||
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
verifySignature = false,
|
||||
# Disable signature verification to isolate validation penalties
|
||||
decayInterval = 200.milliseconds, # scoring heartbeat interval
|
||||
heartbeatInterval = 5.seconds,
|
||||
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
|
||||
publishThreshold = -150.0,
|
||||
graylistThreshold = -200.0,
|
||||
disconnectBadPeers = false,
|
||||
)
|
||||
.toGossipSub()
|
||||
centerNode = nodes[0]
|
||||
node1peerId = nodes[1].peerInfo.peerId
|
||||
node2peerId = nodes[2].peerInfo.peerId
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
for node in nodes:
|
||||
node.topicParams[topic].invalidMessageDeliveriesWeight = -10.0
|
||||
node.topicParams[topic].invalidMessageDeliveriesDecay = 0.9
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And Node 0 is center node, connected to others
|
||||
await connectNodes(nodes[0], nodes[1]) # center to Node 1 (valid messages)
|
||||
await connectNodes(nodes[0], nodes[2]) # center to Node 2 (invalid messages)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
# And center node has message validator: accept from node 1, reject from node 2
|
||||
var validatedMessageCount = 0
|
||||
proc validationHandler(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
validatedMessageCount.inc
|
||||
if string.fromBytes(message.data).contains("invalid"):
|
||||
return ValidationResult.Reject # reject invalid messages
|
||||
else:
|
||||
return ValidationResult.Accept
|
||||
|
||||
nodes[0].addValidator(topic, validationHandler)
|
||||
|
||||
# 1st scoring heartbeat
|
||||
checkUntilTimeout:
|
||||
centerNode.gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
centerNode.getPeerScore(node1peerId) > 0
|
||||
centerNode.getPeerScore(node2peerId) > 0
|
||||
|
||||
# When messages are broadcasted
|
||||
const messagesToSend = 5
|
||||
for i in 0 ..< messagesToSend:
|
||||
nodes[1].broadcast(
|
||||
nodes[1].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: ("valid_" & $i).toBytes())]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
nodes[2].broadcast(
|
||||
nodes[2].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: ("invalid_" & $i).toBytes())]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
# And messages are processed
|
||||
# Then invalidMessageDeliveries stats are applied
|
||||
checkUntilTimeout:
|
||||
validatedMessageCount == messagesToSend * (numberOfNodes - 1)
|
||||
centerNode.getPeerTopicInfo(node1peerId, topic).invalidMessageDeliveries == 0.0
|
||||
# valid messages
|
||||
centerNode.getPeerTopicInfo(node2peerId, topic).invalidMessageDeliveries == 5.0
|
||||
# invalid messages
|
||||
|
||||
# When scoring hartbeat occurs (2nd scoring heartbeat)
|
||||
# Then peer scores are calculated
|
||||
checkUntilTimeout:
|
||||
# node1: p1 (time in mesh) + p2 (first message deliveries)
|
||||
centerNode.getPeerScore(node1peerId) > 5.0 and
|
||||
centerNode.getPeerScore(node1peerId) < 6.0
|
||||
# node2: p1 (time in mesh) - p4 (invalid message deliveries)
|
||||
centerNode.getPeerScore(node2peerId) < -249.0 and
|
||||
centerNode.getPeerScore(node2peerId) > -250.0
|
||||
# all peers are still connected
|
||||
centerNode.mesh[topic].toSeq().len == 2
|
||||
|
||||
# When disconnecting peers with bad score (score < graylistThreshold) is enabled
|
||||
for node in nodes:
|
||||
node.parameters.disconnectBadPeers = true
|
||||
|
||||
# Then peers with bad score are disconnected on scoring heartbeat (3rd scoring heartbeat)
|
||||
checkUntilTimeout:
|
||||
centerNode.mesh[topic].toSeq().len == 1
|
||||
|
||||
asyncTest "Nodes not meeting Mesh Message Deliveries Threshold are penalised":
|
||||
# Given GossipSub nodes with Topic Params
|
||||
const numberOfNodes = 2
|
||||
|
||||
let
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
decayInterval = 200.milliseconds, # scoring heartbeat interval
|
||||
heartbeatInterval = 5.seconds,
|
||||
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
|
||||
disconnectBadPeers = false,
|
||||
)
|
||||
.toGossipSub()
|
||||
node1PeerId = nodes[1].peerInfo.peerId
|
||||
|
||||
nodes.setDefaultTopicParams(topic)
|
||||
for node in nodes:
|
||||
node.topicParams[topic].meshMessageDeliveriesThreshold = 5
|
||||
node.topicParams[topic].meshMessageDeliveriesActivation = 1.milliseconds
|
||||
# active from the start
|
||||
node.topicParams[topic].meshMessageDeliveriesDecay = 0.9
|
||||
node.topicParams[topic].meshMessageDeliveriesWeight = -10.0
|
||||
node.topicParams[topic].meshFailurePenaltyDecay = 0.9
|
||||
node.topicParams[topic].meshFailurePenaltyWeight = -5.0
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And Nodes are connected and subscribed to the topic
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
# When scoring heartbeat occurs
|
||||
# Then Peer has negative score due to active meshMessageDeliveries deficit
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
nodes[0].mesh.getOrDefault(topic).len == numberOfNodes - 1
|
||||
# p1 (time in mesh) - p3 (mesh message deliveries)
|
||||
nodes[0].getPeerScore(node1PeerId) < -249.0
|
||||
|
||||
# When Peer is unsubscribed
|
||||
nodes[1].unsubscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then meshFailurePenalty is applied due to active meshMessageDeliveries deficit
|
||||
checkUntilTimeout:
|
||||
nodes[0].getPeerTopicInfo(node1PeerId, topic).meshFailurePenalty == 25
|
||||
|
||||
# When next scoring heartbeat occurs
|
||||
# Then Peer has negative score
|
||||
checkUntilTimeout:
|
||||
# p3b (mesh failure penalty) [p1 and p3 not calculated when peer was pruned]
|
||||
nodes[0].getPeerScore(node1PeerId) == -125.0
|
||||
|
||||
# When Peer subscribes again
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Then Peer is not grafted to the mesh due to negative score (score was retained)
|
||||
checkUntilTimeout:
|
||||
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
|
||||
nodes[0].mesh.getOrDefault(topic).len == 0
|
||||
|
||||
217
tests/pubsub/integration/testgossipsubsignatureflags.nim
Normal file
217
tests/pubsub/integration/testgossipsubsignatureflags.nim
Normal file
@@ -0,0 +1,217 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import unittest2
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, pubsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/futures
|
||||
|
||||
suite "GossipSub Integration - Signature Flags":
|
||||
const
|
||||
topic = "foobar"
|
||||
testData = "test message".toBytes()
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Default - messages are signed when sign=true and contain fromPeer and seqno when anonymize=false":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = true, verifySignature = true, anonymize = false
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.fromPeer.data.len > 0
|
||||
receivedMessage.seqno.len > 0
|
||||
receivedMessage.signature.len > 0
|
||||
receivedMessage.key.len > 0
|
||||
|
||||
asyncTest "Sign flag - messages are not signed when sign=false":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = false, verifySignature = false, anonymize = false
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.signature.len == 0
|
||||
receivedMessage.key.len == 0
|
||||
|
||||
asyncTest "Anonymize flag - messages are anonymous when anonymize=true":
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, sign = true, verifySignature = true, anonymize = true
|
||||
) # anonymize = true takes precedence
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes.subscribeAllNodes(topic, voidTopicHandler)
|
||||
|
||||
var (receivedMessages, checkForMessage) = createCheckForMessages()
|
||||
nodes[1].addOnRecvObserver(checkForMessage)
|
||||
|
||||
let testData = "anonymous message".toBytes()
|
||||
tryPublish await nodes[0].publish(topic, testData), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len > 0
|
||||
|
||||
let receivedMessage = receivedMessages[][0]
|
||||
check:
|
||||
receivedMessage.data == testData
|
||||
receivedMessage.fromPeer.data.len == 0
|
||||
receivedMessage.seqno.len == 0
|
||||
receivedMessage.signature.len == 0
|
||||
receivedMessage.key.len == 0
|
||||
|
||||
type NodeConfig = object
|
||||
sign: bool
|
||||
verify: bool
|
||||
anonymize: bool
|
||||
|
||||
type Scenario = object
|
||||
senderConfig: NodeConfig
|
||||
receiverConfig: NodeConfig
|
||||
shouldWork: bool
|
||||
|
||||
let scenarios: seq[Scenario] =
|
||||
@[
|
||||
# valid combos
|
||||
# S default, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S default, R anonymous
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S anonymous, R anonymous
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S only sign, R only verify
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: true, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S only verify, R only sign
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: true, verify: true, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S anonymous (not signed despite the flag), R minimal
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: true, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: true, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
# S unsigned, R unsigned
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
shouldWork: true,
|
||||
),
|
||||
|
||||
# invalid combos
|
||||
# S anonymous, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: false,
|
||||
),
|
||||
# S unsigned, R anonymous but verify
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: true),
|
||||
shouldWork: false,
|
||||
),
|
||||
# S unsigned, R default
|
||||
Scenario(
|
||||
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
|
||||
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
|
||||
shouldWork: false,
|
||||
),
|
||||
]
|
||||
|
||||
for scenario in scenarios:
|
||||
let title = "Compatibility matrix: " & $scenario
|
||||
asyncTest title:
|
||||
let
|
||||
sender = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
sign = scenario.senderConfig.sign,
|
||||
verifySignature = scenario.senderConfig.verify,
|
||||
anonymize = scenario.senderConfig.anonymize,
|
||||
)[0]
|
||||
receiver = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
sign = scenario.receiverConfig.sign,
|
||||
verifySignature = scenario.receiverConfig.verify,
|
||||
anonymize = scenario.receiverConfig.anonymize,
|
||||
)[0]
|
||||
nodes = @[sender, receiver]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
let (messageReceivedFut, handler) = createCompleteHandler()
|
||||
|
||||
nodes.subscribeAllNodes(topic, handler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
discard await sender.publish(topic, testData)
|
||||
|
||||
let messageReceived = await waitForState(messageReceivedFut, HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
if scenario.shouldWork:
|
||||
messageReceived.isCompleted(true)
|
||||
else:
|
||||
messageReceived.isCancelled()
|
||||
61
tests/pubsub/integration/testgossipsubskipmcache.nim
Normal file
61
tests/pubsub/integration/testgossipsubskipmcache.nim
Normal file
@@ -0,0 +1,61 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Integration - Skip MCache Support":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "publish with skipMCache prevents message from being added to mcache":
|
||||
let
|
||||
topic = "test"
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
let publishData = "hello".toBytes()
|
||||
|
||||
tryPublish await nodes[0].publish(
|
||||
topic, publishData, publishParams = some(PublishParams(skipMCache: true))
|
||||
), 1
|
||||
|
||||
check:
|
||||
nodes[0].mcache.msgs.len == 0
|
||||
|
||||
asyncTest "publish without skipMCache adds message to mcache":
|
||||
let
|
||||
topic = "test"
|
||||
nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe(topic, voidTopicHandler)
|
||||
await waitSub(nodes[0], nodes[1], topic)
|
||||
|
||||
let publishData = "hello".toBytes()
|
||||
|
||||
tryPublish await nodes[0].publish(
|
||||
topic, publishData, publishParams = none(PublishParams)
|
||||
), 1
|
||||
|
||||
check:
|
||||
nodes[0].mcache.msgs.len == 1
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsubcontrolmessages, testgossipsubcustomconn,
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubheartbeat,
|
||||
testgossipsubmeshmanagement, testgossipsubmessagecache, testgossipsubmessagehandling,
|
||||
testgossipsubscoring
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubgossipcompatibility,
|
||||
testgossipsubheartbeat, testgossipsubmeshmanagement, testgossipsubmessagecache,
|
||||
testgossipsubmessagehandling, testgossipsubscoring, testgossipsubsignatureflags,
|
||||
testgossipsubskipmcache
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,19 +9,270 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronicles
|
||||
import chronos/rateLimit
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import
|
||||
../../libp2p/protocols/pubsub/[floodsub, gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
asyncTest "onNewPeer - sets peer stats and budgets and disconnects if bad score":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
peerStats = PeerStats(
|
||||
score: gossipSub.parameters.graylistThreshold - 1.0,
|
||||
appScore: 10.0,
|
||||
behaviourPenalty: 5.0,
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And existing peer stats are set
|
||||
gossipSub.peerStats[peer.peerId] = peerStats
|
||||
|
||||
# And the peer is connected
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conns[0]))
|
||||
check:
|
||||
gossipSub.switch.isConnected(peer.peerId)
|
||||
|
||||
# When onNewPeer is called
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.onNewPeer(peer)
|
||||
|
||||
# Then peer stats are updated
|
||||
check:
|
||||
peer.score == peerStats.score
|
||||
peer.appScore == peerStats.appScore
|
||||
peer.behaviourPenalty == peerStats.behaviourPenalty
|
||||
|
||||
# And peer budgets are set to default values
|
||||
check:
|
||||
peer.iHaveBudget == IHavePeerBudget
|
||||
peer.pingBudget == PingsPeerBudget
|
||||
|
||||
# And peer is disconnected because score < graylistThreshold
|
||||
checkUntilTimeout:
|
||||
not gossipSub.switch.isConnected(peer.peerId)
|
||||
|
||||
asyncTest "onPubSubPeerEvent - StreamClosed removes peer from mesh and fanout":
|
||||
# Given a GossipSub instance with one peer in both mesh and fanout
|
||||
let
|
||||
(gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateMesh = true, populateFanout = true)
|
||||
peer = peers[0]
|
||||
event = PubSubPeerEvent(kind: PubSubPeerEventKind.StreamClosed)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check:
|
||||
gossipSub.mesh.hasPeerId(topic, peer.peerId)
|
||||
gossipSub.fanout.hasPeerId(topic, peer.peerId)
|
||||
|
||||
# When StreamClosed event is handled
|
||||
gossipSub.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# Then peer is removed from both mesh and fanout
|
||||
check:
|
||||
not gossipSub.mesh.hasPeerId(topic, peer.peerId)
|
||||
not gossipSub.fanout.hasPeerId(topic, peer.peerId)
|
||||
|
||||
asyncTest "onPubSubPeerEvent - DisconnectionRequested disconnects peer":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
1, topic, populateGossipsub = true, populateMesh = true, populateFanout = true
|
||||
)
|
||||
peer = peers[0]
|
||||
event = PubSubPeerEvent(kind: PubSubPeerEventKind.DisconnectionRequested)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And the peer is connected
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conns[0]))
|
||||
check:
|
||||
gossipSub.switch.isConnected(peer.peerId)
|
||||
gossipSub.mesh.hasPeerId(topic, peer.peerId)
|
||||
gossipSub.fanout.hasPeerId(topic, peer.peerId)
|
||||
gossipSub.gossipsub.hasPeerId(topic, peer.peerId)
|
||||
|
||||
# When DisconnectionRequested event is handled
|
||||
gossipSub.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# Then peer should be disconnected
|
||||
checkUntilTimeout:
|
||||
not gossipSub.switch.isConnected(peer.peerId)
|
||||
not gossipSub.mesh.hasPeerId(topic, peer.peerId)
|
||||
not gossipSub.fanout.hasPeerId(topic, peer.peerId)
|
||||
not gossipSub.gossipsub.hasPeerId(topic, peer.peerId)
|
||||
|
||||
asyncTest "unsubscribePeer - handles nil peer gracefully":
|
||||
# Given a GossipSub instance
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(0, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And a non-existent peer ID
|
||||
let nonExistentPeerId = randomPeerId()
|
||||
|
||||
# When unsubscribePeer is called with non-existent peer
|
||||
gossipSub.unsubscribePeer(nonExistentPeerId)
|
||||
|
||||
# Then no errors occur (method returns early for nil peers)
|
||||
check:
|
||||
true
|
||||
|
||||
asyncTest "unsubscribePeer - removes peer from mesh, gossipsub, fanout and subscribedDirectPeers":
|
||||
# Given a GossipSub instance with one peer in mesh
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
1, topic, populateGossipsub = true, populateMesh = true, populateFanout = true
|
||||
)
|
||||
peer = peers[0]
|
||||
peerId = peer.peerId
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer is configured as a direct peer
|
||||
gossipSub.parameters.directPeers[peerId] = @[]
|
||||
discard gossipSub.subscribedDirectPeers.addPeer(topic, peer)
|
||||
|
||||
check:
|
||||
gossipSub.mesh.hasPeerId(topic, peerId)
|
||||
gossipSub.gossipsub.hasPeerId(topic, peerId)
|
||||
gossipSub.fanout.hasPeerId(topic, peerId)
|
||||
gossipSub.subscribedDirectPeers.hasPeerId(topic, peerId)
|
||||
|
||||
# When unsubscribePeer is called
|
||||
gossipSub.unsubscribePeer(peerId)
|
||||
|
||||
# Then peer is removed from mesh
|
||||
check:
|
||||
not gossipSub.mesh.hasPeerId(topic, peerId)
|
||||
not gossipSub.gossipsub.hasPeerId(topic, peerId)
|
||||
not gossipSub.fanout.hasPeerId(topic, peerId)
|
||||
not gossipSub.subscribedDirectPeers.hasPeerId(topic, peerId)
|
||||
|
||||
asyncTest "unsubscribePeer - resets firstMessageDeliveries in peerStats":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
peerId = peer.peerId
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer stats with firstMessageDeliveries set
|
||||
gossipSub.peerStats[peerId] = PeerStats()
|
||||
gossipSub.peerStats[peerId].topicInfos[topic] =
|
||||
TopicInfo(firstMessageDeliveries: 5.0)
|
||||
check:
|
||||
gossipSub.peerStats[peerId].topicInfos[topic].firstMessageDeliveries == 5.0
|
||||
|
||||
# When unsubscribePeer is called
|
||||
gossipSub.unsubscribePeer(peerId)
|
||||
|
||||
# Then firstMessageDeliveries is reset to 0
|
||||
gossipSub.peerStats.withValue(peerId, stats):
|
||||
check:
|
||||
stats[].topicInfos[topic].firstMessageDeliveries == 0.0
|
||||
|
||||
asyncTest "unsubscribePeer - removes peer from peersInIP":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
peerId = peer.peerId
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer has an address and is in peersInIP
|
||||
let testAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
peer.address = some(testAddress)
|
||||
gossipSub.peersInIP[testAddress] = initHashSet[PeerId]()
|
||||
gossipSub.peersInIP[testAddress].incl(peerId)
|
||||
|
||||
# And verify peer is initially in peersInIP
|
||||
check:
|
||||
peerId in gossipSub.peersInIP[testAddress]
|
||||
|
||||
# When unsubscribePeer is called
|
||||
gossipSub.unsubscribePeer(peerId)
|
||||
|
||||
# Then peer is removed from peersInIP
|
||||
check:
|
||||
testAddress notin gossipSub.peersInIP
|
||||
|
||||
asyncTest "handleSubscribe via rpcHandler - subscribe and unsubscribe with direct peer":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And the peer is configured as a direct peer
|
||||
gossipSub.parameters.directPeers[peer.peerId] = @[]
|
||||
|
||||
# When a subscribe message is sent via RPC handler
|
||||
await gossipSub.rpcHandler(
|
||||
peer, encodeRpcMsg(RPCMsg.withSubs(@[topic], true), false)
|
||||
)
|
||||
|
||||
# Then the peer is added to gossipsub for the topic
|
||||
# And the peer is added to subscribedDirectPeers
|
||||
check:
|
||||
gossipSub.gossipsub.hasPeer(topic, peer)
|
||||
gossipSub.subscribedDirectPeers.hasPeer(topic, peer)
|
||||
|
||||
# When Peer is added to the mesh and fanout
|
||||
discard gossipSub.mesh.addPeer(topic, peer)
|
||||
discard gossipSub.fanout.addPeer(topic, peer)
|
||||
|
||||
# And an unsubscribe message is sent via RPC handler
|
||||
await gossipSub.rpcHandler(
|
||||
peer, encodeRpcMsg(RPCMsg.withSubs(@[topic], false), false)
|
||||
)
|
||||
|
||||
# Then the peer is removed from gossipsub, mesh and fanout
|
||||
# And the peer is removed from subscribedDirectPeers
|
||||
check:
|
||||
not gossipSub.gossipsub.hasPeer(topic, peer)
|
||||
not gossipSub.mesh.hasPeer(topic, peer)
|
||||
not gossipSub.fanout.hasPeer(topic, peer)
|
||||
not gossipSub.subscribedDirectPeers.hasPeer(topic, peer)
|
||||
|
||||
asyncTest "handleSubscribe via rpcHandler - subscribe unknown peer":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer is not in gossipSub.peers
|
||||
let nonExistentPeerId = randomPeerId()
|
||||
peer.peerId = nonExistentPeerId # override PeerId
|
||||
|
||||
# When a subscribe message is sent via RPC handler
|
||||
await gossipSub.rpcHandler(
|
||||
peer, encodeRpcMsg(RPCMsg.withSubs(@[topic], true), false)
|
||||
)
|
||||
|
||||
# Then the peer is ignored
|
||||
check:
|
||||
not gossipSub.gossipsub.hasPeer(topic, peer)
|
||||
|
||||
asyncTest "subscribe and unsubscribeAll":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
@@ -43,8 +294,7 @@ suite "GossipSub":
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
asyncTest "rpcHandler - drop messages of topics without subscription":
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
@@ -60,13 +310,13 @@ suite "GossipSub":
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
asyncTest "rpcHandler - subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
tooManyTopics &= topic & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
@@ -81,9 +331,8 @@ suite "GossipSub":
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
asyncTest "rpcHandler - invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
@@ -92,4 +341,355 @@ suite "GossipSub":
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
asyncTest "rpcHandler - peer is disconnected and rate limit is hit when overhead rate limit is exceeded":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And peer disconnection is enabled when rate limit is exceeded
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = true
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# And a message is created that will exceed the overhead rate limit
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
# Then it throws an exception due to peer disconnection
|
||||
expect(PeerRateLimitError):
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
|
||||
asyncTest "rpcHandler - peer is disconnected and rate limit is hit when overhead rate limit is exceeded when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer disconnection is enabled when rate limit is exceeded
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = true
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerRateLimitError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
|
||||
asyncTest "rpcHandler - peer is punished and rate limit is hit when overhead rate limit is exceeded when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer disconnection is disabled when rate limit is exceeded to not raise PeerRateLimitError
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = false
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerMessageDecodeError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
peer.behaviourPenalty == 0.1
|
||||
|
||||
asyncTest "rpcHandler - peer is punished when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerMessageDecodeError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# Then the peer is penalized with behavior penalty
|
||||
check:
|
||||
peer.behaviourPenalty == 0.1
|
||||
|
||||
asyncTest "rpcHandler - message already seen - valid message dropped when ID already in seenMsgs":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to focus on seen message logic
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a message is created
|
||||
let msg = Message.init(peer.peerId, "bar".toBytes, topic, some(1'u64))
|
||||
let data = encodeRpcMsg(RPCMsg(messages: @[msg]), false)
|
||||
|
||||
# And the message ID is marked as already seen
|
||||
let messageId = gossipSub.msgIdProvider(msg).get
|
||||
let saltedMessageId = gossipSub.salt(messageId)
|
||||
check:
|
||||
not gossipSub.addSeen(saltedMessageId)
|
||||
|
||||
# When the message is processed again
|
||||
await gossipSub.rpcHandler(peer, data)
|
||||
|
||||
# Then the message should be dropped (not cached)
|
||||
check:
|
||||
gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "rpcHandler - peer is punished when message contains invalid sequence number":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a message is created with invalid sequence number
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
msg.seqno = ("1").toBytes()
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "rpcHandler - peer is punished when message id generation fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom msgIdProvider is set that always returns an error
|
||||
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
err(ValidationResult.Reject)
|
||||
gossipSub.msgIdProvider = customMsgIdProvider
|
||||
|
||||
# And a message is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "rpcHandler - peer is punished when signature verification fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification enabled
|
||||
gossipSub.verifySignature = true
|
||||
|
||||
# And a message without signature is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "rpcHandler - peer is punished when message validation is rejected":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped earlier
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom validator that always rejects messages
|
||||
proc rejectingValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Reject
|
||||
|
||||
# Register the rejecting validator for the topic
|
||||
gossipSub.addValidator(topic, rejectingValidator)
|
||||
|
||||
# And a message is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "rpcHandler - message validation ignore drops message":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom validator that ignores messages
|
||||
proc ignoringValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Ignore
|
||||
|
||||
gossipSub.addValidator(topic, ignoringValidator)
|
||||
|
||||
# And a message is created
|
||||
let msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
let msgId = gossipSub.msgIdProvider(msg).tryGet()
|
||||
|
||||
# When the message is processed via rpcHandler
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the message should not be cached
|
||||
check:
|
||||
gossipSub.mcache.get(msgId).isNone
|
||||
|
||||
# And the peer should not be punished
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).invalidMessageDeliveries == 0.0
|
||||
|
||||
asyncTest "rpcHandler - message validation accept and successful relay":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(5, topic, populateGossipsub = true, populateMesh = true)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom validator that accepts messages
|
||||
proc acceptingValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Accept
|
||||
|
||||
gossipSub.addValidator(topic, acceptingValidator)
|
||||
|
||||
# And a message is created
|
||||
let msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
let msgId = gossipSub.msgIdProvider(msg).tryGet()
|
||||
|
||||
# When the message is processed via rpcHandler
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the message should be cached
|
||||
checkUntilTimeout:
|
||||
gossipSub.mcache.get(msgId).isSome
|
||||
|
||||
# And the peer should be rewarded for delivery
|
||||
check:
|
||||
gossipSub.getPeerTopicInfo(peer.peerId, topic).firstMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "onTopicSubscription - subscribe removes topic from fanout and rebalances mesh":
|
||||
# Given a GossipSub instance with peers in gossipsub and fanout
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(5, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And the topic is in fanout with peers
|
||||
check:
|
||||
gossipSub.fanout[topic].len == peers.len
|
||||
gossipSub.mesh[topic].len == 0
|
||||
|
||||
# When onTopicSubscription is called with subscribed = true
|
||||
gossipSub.onTopicSubscription(topic, true)
|
||||
|
||||
check:
|
||||
# Then the topic should be removed from fanout
|
||||
topic notin gossipSub.fanout
|
||||
# And mesh should be populated with peers (rebalanced)
|
||||
gossipSub.mesh[topic].len == peers.len
|
||||
|
||||
asyncTest "onTopicSubscription - unsubscribe removes topic from mesh":
|
||||
# Given a GossipSub instance with peers in mesh
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(3, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check:
|
||||
gossipSub.mesh[topic].len == peers.len
|
||||
|
||||
# When onTopicSubscription is called with subscribed = false
|
||||
gossipSub.onTopicSubscription(topic, false)
|
||||
|
||||
# Then the topic should be removed from mesh
|
||||
check:
|
||||
topic notin gossipSub.mesh
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,18 +9,24 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos
|
||||
import math
|
||||
import std/[options, tables, sets]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/gossipsub/[types, scoring]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../../libp2p/[multiaddress, peerid]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
@@ -35,10 +41,437 @@ suite "GossipSub Scoring":
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
await sleepAsync(50.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
asyncTest "Time in mesh scoring (P1)":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(3, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
timeInMeshWeight: 1.0,
|
||||
timeInMeshQuantum: 1.seconds,
|
||||
timeInMeshCap: 10.0,
|
||||
)
|
||||
|
||||
let now = Moment.now()
|
||||
|
||||
# Set different mesh times for peers
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true, graftTime: now - 2.seconds # seconds in mesh
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 12.seconds,
|
||||
# seconds in mesh (should be capped at timeInMeshCap)
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: false # Not in mesh
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Score calculation breakdown:
|
||||
# P1 formula: min(meshTime / timeInMeshQuantum, timeInMeshCap) * timeInMeshWeight * topicWeight
|
||||
|
||||
check:
|
||||
# Peer 0: min(2.0s / 1s, 10.0) * 1.0 * 1.0 = 2.0
|
||||
round(peers[0].score, 1) == 2.0
|
||||
# Peer 1: min(12.0s / 1s, 10.0) * 1.0 * 1.0 = 10.0 (capped at timeInMeshCap)
|
||||
round(peers[1].score, 1) == 10.0
|
||||
# Peer 2: not in mesh, score should be 0
|
||||
round(peers[2].score, 1) == 0.0
|
||||
|
||||
asyncTest "First message deliveries scoring (P2)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
firstMessageDeliveriesWeight: 2.0,
|
||||
firstMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set different first message delivery counts
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 4.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 0.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 2.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check scores: firstMessageDeliveries * weight
|
||||
check:
|
||||
round(peers[0].score, 1) == 8.0 # 4.0 * 2.0
|
||||
round(peers[1].score, 1) == 0.0 # 0.0 * 2.0
|
||||
round(peers[2].score, 1) == 4.0 # 2.0 * 2.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].firstMessageDeliveries, 1) == 2.0 # 4.0 * 0.5
|
||||
|
||||
asyncTest "Mesh message deliveries scoring (P3)":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(3, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let now = Moment.now()
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
meshMessageDeliveriesWeight: -1.0,
|
||||
meshMessageDeliveriesThreshold: 4.0,
|
||||
meshMessageDeliveriesActivation: 1.seconds,
|
||||
meshMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set up peers with different mesh message delivery counts
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 2.seconds,
|
||||
meshMessageDeliveries: 2.0, # Below threshold
|
||||
meshMessageDeliveriesActive: true,
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 2.seconds,
|
||||
meshMessageDeliveries: 6.0, # Above threshold
|
||||
meshMessageDeliveriesActive: true,
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 500.milliseconds, # Recently grafted, not active yet
|
||||
meshMessageDeliveries: 2.0,
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
# Peer 0: deficit = 4 - 2 = 2, penalty = 2^2 * -1 = -4
|
||||
round(peers[0].score, 1) == -4.0
|
||||
# Peer 1: above threshold, no penalty
|
||||
round(peers[1].score, 1) == 0.0
|
||||
# Peer 2: not active yet, no penalty
|
||||
round(peers[2].score, 1) == 0.0
|
||||
|
||||
asyncTest "Mesh failure penalty scoring (P3b)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(2, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0, meshFailurePenaltyWeight: -2.0, meshFailurePenaltyDecay: 0.5
|
||||
)
|
||||
|
||||
# Set mesh failure penalty
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(meshFailurePenalty: 2.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(meshFailurePenalty: 0.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty application
|
||||
check:
|
||||
round(peers[0].score, 1) == -4.0 # 2.0 * -2.0
|
||||
round(peers[1].score, 1) == 0.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].meshFailurePenalty, 1) == 1.0 # 2.0 * 0.5
|
||||
|
||||
asyncTest "Invalid message deliveries scoring (P4)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(2, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
invalidMessageDeliveriesWeight: -4.0,
|
||||
invalidMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set invalid message deliveries
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(invalidMessageDeliveries: 2.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(invalidMessageDeliveries: 0.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty: 2^2 * -4 = -16
|
||||
check:
|
||||
round(peers[0].score, 1) == -16.0
|
||||
round(peers[1].score, 1) == 0.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].invalidMessageDeliveries, 1) == 1.0 # 2.0 * 0.5
|
||||
|
||||
asyncTest "App-specific scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.appSpecificWeight = 0.5
|
||||
|
||||
# Set different app scores
|
||||
peers[0].appScore = 8.0
|
||||
peers[1].appScore = -6.0
|
||||
peers[2].appScore = 0.0
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
round(peers[0].score, 1) == 4.0 # 8.0 * 0.5
|
||||
round(peers[1].score, 1) == -3.0 # -6.0 * 0.5
|
||||
round(peers[2].score, 1) == 0.0 # 0.0 * 0.5
|
||||
|
||||
asyncTest "Behaviour penalty scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.behaviourPenaltyWeight = -0.25
|
||||
gossipSub.parameters.behaviourPenaltyDecay = 0.5
|
||||
|
||||
# Set different behaviour penalties
|
||||
peers[0].behaviourPenalty = 4.0
|
||||
peers[1].behaviourPenalty = 2.0
|
||||
peers[2].behaviourPenalty = 0.0
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty: penalty^2 * weight
|
||||
check:
|
||||
round(peers[0].score, 1) == -4.0 # 4^2 * -0.25 = -4.0
|
||||
round(peers[1].score, 1) == -1.0 # 2^2 * -0.25 = -1.0
|
||||
round(peers[2].score, 1) == 0.0 # 0^2 * -0.25 = 0.0
|
||||
|
||||
# Check decay was applied
|
||||
check:
|
||||
round(peers[0].behaviourPenalty, 1) == 2.0 # 4.0 * 0.5
|
||||
round(peers[1].behaviourPenalty, 1) == 1.0 # 2.0 * 0.5
|
||||
round(peers[2].behaviourPenalty, 1) == 0.0
|
||||
|
||||
asyncTest "Colocation factor scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(5, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.ipColocationFactorWeight = -1.0
|
||||
gossipSub.parameters.ipColocationFactorThreshold = 2.0
|
||||
|
||||
# Simulate peers from same IP
|
||||
let sharedAddress = MultiAddress.init("/ip4/192.168.1.1/tcp/4001").tryGet()
|
||||
peers[0].address = some(sharedAddress)
|
||||
peers[1].address = some(sharedAddress)
|
||||
peers[2].address = some(sharedAddress)
|
||||
|
||||
# Add to peersInIP to simulate colocation detection
|
||||
gossipSub.peersInIP[sharedAddress] =
|
||||
toHashSet([peers[0].peerId, peers[1].peerId, peers[2].peerId])
|
||||
|
||||
# Different IP for other peers
|
||||
peers[3].address = some(MultiAddress.init("/ip4/192.168.1.2/tcp/4001").tryGet())
|
||||
peers[4].address = some(MultiAddress.init("/ip4/192.168.1.3/tcp/4001").tryGet())
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
# First 3 peers should have colocation penalty
|
||||
# over = 3 - 2 = 1, penalty = 1^2 * -1.0 = -1.0
|
||||
round(peers[0].score, 1) == -1.0
|
||||
round(peers[1].score, 1) == -1.0
|
||||
round(peers[2].score, 1) == -1.0
|
||||
# Other peers should have no penalty
|
||||
round(peers[3].score, 1) == 0.0
|
||||
round(peers[4].score, 1) == 0.0
|
||||
|
||||
asyncTest "Score decay to zero":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.decayToZero = 0.01
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
firstMessageDeliveriesDecay: 0.1,
|
||||
meshMessageDeliveriesDecay: 0.1,
|
||||
meshFailurePenaltyDecay: 0.1,
|
||||
invalidMessageDeliveriesDecay: 0.1,
|
||||
)
|
||||
|
||||
# Set small values that should decay to zero
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
firstMessageDeliveries: 0.02,
|
||||
meshMessageDeliveries: 0.04,
|
||||
meshFailurePenalty: 0.06,
|
||||
invalidMessageDeliveries: 0.08,
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# All values should be decayed to zero
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
let info = stats[].topicInfos[topic]
|
||||
check:
|
||||
round(info.firstMessageDeliveries, 1) == 0.0
|
||||
round(info.meshMessageDeliveries, 1) == 0.0
|
||||
round(info.meshFailurePenalty, 1) == 0.0
|
||||
round(info.invalidMessageDeliveries, 1) == 0.0
|
||||
|
||||
asyncTest "Peer stats expiration and eviction":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let now = Moment.now()
|
||||
|
||||
# Create expired peer stats for disconnected peer
|
||||
let expiredPeerId = randomPeerId()
|
||||
gossipSub.peerStats[expiredPeerId] = PeerStats(
|
||||
expire: now - 1.seconds, # Already expired
|
||||
score: -5.0,
|
||||
)
|
||||
|
||||
# Create non-expired stats for connected peer
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.expire = now + 10.seconds
|
||||
stats.score = 2.0
|
||||
|
||||
check:
|
||||
gossipSub.peerStats.len == 2 # Before cleanup: expired + connected peer
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Expired peer should be evicted, connected peer should remain
|
||||
check:
|
||||
gossipSub.peerStats.len == 1
|
||||
expiredPeerId notin gossipSub.peerStats
|
||||
peers[0].peerId in gossipSub.peerStats
|
||||
|
||||
asyncTest "Combined scoring":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Set up all topic parameters
|
||||
let now = Moment.now()
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 2.0,
|
||||
timeInMeshWeight: 0.25, # P1
|
||||
timeInMeshQuantum: 1.seconds,
|
||||
timeInMeshCap: 10.0,
|
||||
firstMessageDeliveriesWeight: 1.0, # P2
|
||||
meshMessageDeliveriesWeight: -1.0, # P3
|
||||
meshMessageDeliveriesThreshold: 4.0,
|
||||
meshMessageDeliveriesActivation: 1.seconds,
|
||||
meshFailurePenaltyWeight: -2.0, # P3b
|
||||
invalidMessageDeliveriesWeight: -1.0, # P4
|
||||
)
|
||||
|
||||
gossipSub.parameters.appSpecificWeight = 0.5
|
||||
gossipSub.parameters.behaviourPenaltyWeight = -0.25
|
||||
|
||||
# Set up peer state
|
||||
let peer = peers[0]
|
||||
peer.appScore = 6.0
|
||||
peer.behaviourPenalty = 2.0
|
||||
|
||||
gossipSub.withPeerStats(peer.peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 4.seconds, # seconds in mesh
|
||||
meshMessageDeliveriesActive: true,
|
||||
firstMessageDeliveries: 3.0, # P2 component
|
||||
meshMessageDeliveries: 2.0, # P3 component (below threshold)
|
||||
meshFailurePenalty: 1.0, # P3b component
|
||||
invalidMessageDeliveries: 2.0, # P4 component
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Calculate expected score step by step:
|
||||
#
|
||||
# P1 (time in mesh): meshTime / timeInMeshQuantum * timeInMeshWeight
|
||||
# = 4.0s / 1s * 0.25 = 1.0
|
||||
#
|
||||
# P2 (first message deliveries): firstMessageDeliveries * firstMessageDeliveriesWeight
|
||||
# = 3.0 * 1.0 = 3.0
|
||||
#
|
||||
# P3 (mesh message deliveries): deficit = max(0, threshold - deliveries)
|
||||
# deficit = max(0, 4.0 - 2.0) = 2.0
|
||||
# penalty = deficit^2 * weight = 2.0^2 * -1.0 = -4.0
|
||||
#
|
||||
# P3b (mesh failure penalty): meshFailurePenalty * meshFailurePenaltyWeight
|
||||
# = 1.0 * -2.0 = -2.0
|
||||
#
|
||||
# P4 (invalid message deliveries): invalidMessageDeliveries^2 * invalidMessageDeliveriesWeight
|
||||
# = 2.0^2 * -1.0 = -4.0
|
||||
#
|
||||
# Topic score = (P1 + P2 + P3 + P3b + P4) * topicWeight
|
||||
# = (1.0 + 3.0 + (-4.0) + (-2.0) + (-4.0)) * 2.0
|
||||
# = (1.0 + 3.0 - 4.0 - 2.0 - 4.0) * 2.0
|
||||
# = -6.0 * 2.0 = -12.0
|
||||
#
|
||||
# App score = appScore * appSpecificWeight = 6.0 * 0.5 = 3.0
|
||||
#
|
||||
# Behaviour penalty = behaviourPenalty^2 * behaviourPenaltyWeight
|
||||
# = 2.0^2 * -0.25 = 4.0 * -0.25 = -1.0
|
||||
#
|
||||
# Final score = topicScore + appScore + behaviourPenalty
|
||||
# = -12.0 + 3.0 + (-1.0) = -10.0
|
||||
|
||||
check:
|
||||
round(peer.score, 1) == -10.0
|
||||
|
||||
asyncTest "Zero topic weight skips scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Set topic weight to zero
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 0.0,
|
||||
firstMessageDeliveriesWeight: 100.0, # High weight but should be ignored
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 10.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Score should be zero since topic weight is zero
|
||||
check:
|
||||
round(peers[0].score, 1) == 0.0
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
# compile time options here
|
||||
const
|
||||
libp2p_pubsub_sign {.booldefine.} = true
|
||||
libp2p_pubsub_verify {.booldefine.} = true
|
||||
libp2p_pubsub_anonymize {.booldefine.} = false
|
||||
|
||||
import hashes, random, tables, sets, sequtils
|
||||
import chronos, results, stew/byteutils, chronos/ratelimit
|
||||
import
|
||||
@@ -173,9 +167,9 @@ proc generateNodes*(
|
||||
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
|
||||
gossip: bool = false,
|
||||
triggerSelf: bool = false,
|
||||
verifySignature: bool = libp2p_pubsub_verify,
|
||||
anonymize: bool = libp2p_pubsub_anonymize,
|
||||
sign: bool = libp2p_pubsub_sign,
|
||||
sign: bool = true,
|
||||
verifySignature: bool = true,
|
||||
anonymize: bool = false,
|
||||
sendSignedPeerRecord = false,
|
||||
unsubscribeBackoff = 1.seconds,
|
||||
pruneBackoff = 1.minutes,
|
||||
@@ -184,7 +178,7 @@ proc generateNodes*(
|
||||
enablePX: bool = false,
|
||||
overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] =
|
||||
Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
gossipSubVersion: string = "",
|
||||
codecs: seq[string] = @[],
|
||||
sendIDontWantOnPublish: bool = false,
|
||||
heartbeatInterval: Duration = TEST_GOSSIPSUB_HEARTBEAT_INTERVAL,
|
||||
floodPublish: bool = false,
|
||||
@@ -195,6 +189,9 @@ proc generateNodes*(
|
||||
historyGossip = 5,
|
||||
gossipThreshold = -100.0,
|
||||
decayInterval = 1.seconds,
|
||||
publishThreshold = -1000.0,
|
||||
graylistThreshold = -10000.0,
|
||||
disconnectBadPeers: bool = false,
|
||||
): seq[PubSub] =
|
||||
for i in 0 ..< num:
|
||||
let switch = newStandardSwitch(
|
||||
@@ -225,17 +222,16 @@ proc generateNodes*(
|
||||
p.opportunisticGraftThreshold = opportunisticGraftThreshold
|
||||
p.gossipThreshold = gossipThreshold
|
||||
p.decayInterval = decayInterval
|
||||
p.publishThreshold = publishThreshold
|
||||
p.graylistThreshold = graylistThreshold
|
||||
p.disconnectBadPeers = disconnectBadPeers
|
||||
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
|
||||
applyDValues(p, dValues)
|
||||
p
|
||||
),
|
||||
)
|
||||
# set some testing params, to enable scores
|
||||
g.topicParams.mgetOrPut("foobar", TopicParams.init()).topicWeight = 1.0
|
||||
g.topicParams.mgetOrPut("foo", TopicParams.init()).topicWeight = 1.0
|
||||
g.topicParams.mgetOrPut("bar", TopicParams.init()).topicWeight = 1.0
|
||||
if gossipSubVersion != "":
|
||||
g.codecs = @[gossipSubVersion]
|
||||
if codecs.len != 0:
|
||||
g.codecs = codecs
|
||||
g.PubSub
|
||||
else:
|
||||
FloodSub.init(
|
||||
@@ -254,6 +250,10 @@ proc generateNodes*(
|
||||
proc toGossipSub*(nodes: seq[PubSub]): seq[GossipSub] =
|
||||
return nodes.mapIt(GossipSub(it))
|
||||
|
||||
proc setDefaultTopicParams*(nodes: seq[GossipSub], topic: string): void =
|
||||
for node in nodes:
|
||||
node.topicParams.mgetOrPut(topic, TopicParams.init()).topicWeight = 1.0
|
||||
|
||||
proc getNodeByPeerId*[T: PubSub](nodes: seq[T], peerId: PeerId): GossipSub =
|
||||
let filteredNodes = nodes.filterIt(it.peerInfo.peerId == peerId)
|
||||
check:
|
||||
@@ -261,11 +261,22 @@ proc getNodeByPeerId*[T: PubSub](nodes: seq[T], peerId: PeerId): GossipSub =
|
||||
return filteredNodes[0]
|
||||
|
||||
proc getPeerByPeerId*[T: PubSub](node: T, topic: string, peerId: PeerId): PubSubPeer =
|
||||
let filteredPeers = node.gossipsub[topic].toSeq().filterIt(it.peerId == peerId)
|
||||
let filteredPeers =
|
||||
node.gossipsub.getOrDefault(topic).toSeq().filterIt(it.peerId == peerId)
|
||||
check:
|
||||
filteredPeers.len == 1
|
||||
return filteredPeers[0]
|
||||
|
||||
proc getPeerStats*(node: GossipSub, peerId: PeerId): PeerStats =
|
||||
node.peerStats.withValue(peerId, stats):
|
||||
return stats[]
|
||||
|
||||
proc getPeerScore*(node: GossipSub, peerId: PeerId): float64 =
|
||||
return node.getPeerStats(peerId).score
|
||||
|
||||
proc getPeerTopicInfo*(node: GossipSub, peerId: PeerId, topic: string): TopicInfo =
|
||||
return node.getPeerStats(peerId).topicInfos.getOrDefault(topic)
|
||||
|
||||
proc connectNodes*[T: PubSub](dialer: T, target: T) {.async.} =
|
||||
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not connect same peer"
|
||||
@@ -542,10 +553,10 @@ proc baseTestProcedure*(
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
proc currentRateLimitHits*(): float64 =
|
||||
proc currentRateLimitHits*(label: string = "nim-libp2p"): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @[label]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
@@ -481,7 +481,7 @@ suite "Autonat Service":
|
||||
awaiter = newFuture[void]()
|
||||
await autonatService.run(switch1)
|
||||
|
||||
await sleepAsync(200.millis)
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check autonatService.networkReachability == NetworkReachability.Reachable
|
||||
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1
|
||||
@@ -510,6 +510,6 @@ suite "Autonat Service":
|
||||
|
||||
await switch2.connect(switch1.peerInfo.peerId, switch1.peerInfo.addrs)
|
||||
|
||||
await sleepAsync(500.milliseconds)
|
||||
await sleepAsync(250.milliseconds)
|
||||
|
||||
await allFuturesThrowing(switch1.stop(), switch2.stop())
|
||||
|
||||
@@ -86,7 +86,7 @@ suite "Autorelay":
|
||||
let autorelay = AutoRelayService.new(3, relayClient, checkMA, newRng())
|
||||
switchClient = createSwitch(relayClient, autorelay)
|
||||
await allFutures(switchClient.start(), switchRelay.start())
|
||||
await sleepAsync(500.millis)
|
||||
await sleepAsync(250.millis)
|
||||
await switchClient.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs)
|
||||
await fut.wait(1.seconds)
|
||||
let addresses = autorelay.getAddresses()
|
||||
|
||||
@@ -63,7 +63,7 @@ suite "AutoTLS ACME API":
|
||||
|
||||
let challengeResponse =
|
||||
await api.requestNewOrder(@["some.dummy.domain.com"], key, "kid")
|
||||
check challengeResponse.status == ACMEChallengeStatus.pending
|
||||
check challengeResponse.status == ACMEOrderStatus.PENDING
|
||||
check challengeResponse.authorizations ==
|
||||
["http://example.com/expected-authorizations-url"]
|
||||
check challengeResponse.finalize == "http://example.com/expected-finalize-url"
|
||||
@@ -93,12 +93,12 @@ suite "AutoTLS ACME API":
|
||||
check authorizationsResponse.challenges.len > 0
|
||||
|
||||
let dns01 = authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.dns01
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0]
|
||||
check dns01.url == "http://example.com/expected-dns01-url"
|
||||
check dns01.`type` == ACMEChallengeType.dns01
|
||||
check dns01.`type` == ACMEChallengeType.DNS01
|
||||
check dns01.token == ACMEChallengeToken("expected-dns01-token")
|
||||
check dns01.status == ACMEChallengeStatus.pending
|
||||
check dns01.status == ACMEChallengeStatus.PENDING
|
||||
|
||||
asyncTest "register with unsupported keys":
|
||||
let unsupportedSchemes = [PKScheme.Ed25519, PKScheme.Secp256k1, PKScheme.ECDSA]
|
||||
@@ -110,8 +110,7 @@ suite "AutoTLS ACME API":
|
||||
asyncTest "challenge completed successful":
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"checkURL": "http://example.com/some-check-url"},
|
||||
headers: HttpTable.init(),
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
@@ -131,8 +130,7 @@ suite "AutoTLS ACME API":
|
||||
asyncTest "challenge completed max retries reached":
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"checkURL": "http://example.com/some-check-url"},
|
||||
headers: HttpTable.init(),
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
@@ -155,8 +153,7 @@ suite "AutoTLS ACME API":
|
||||
asyncTest "challenge completed invalid":
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"checkURL": "http://example.com/some-check-url"},
|
||||
headers: HttpTable.init(),
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
@@ -231,14 +228,14 @@ suite "AutoTLS ACME API":
|
||||
body: %*{"status": "invalid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
expect(ACMEError):
|
||||
discard await api.certificateFinalized(
|
||||
"some-domain",
|
||||
parseUri("http://example.com/some-finalize-url"),
|
||||
parseUri("http://example.com/some-order-url"),
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
let finalized = await api.certificateFinalized(
|
||||
"some-domain",
|
||||
parseUri("http://example.com/some-finalize-url"),
|
||||
parseUri("http://example.com/some-order-url"),
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
check finalized == false
|
||||
|
||||
asyncTest "expect error on invalid JSON response":
|
||||
# add a couple invalid responses as they get popped by every get or post call
|
||||
@@ -313,8 +310,9 @@ suite "AutoTLS ACME Client":
|
||||
)
|
||||
)
|
||||
|
||||
acme = await ACMEClient.new(api = Opt.some(ACMEApi(acmeApi)))
|
||||
check acme.kid == "some-expected-kid"
|
||||
acme = ACMEClient.new(api = ACMEApi(acmeApi))
|
||||
let kid = await acme.getOrInitKid()
|
||||
check kid == "some-expected-kid"
|
||||
|
||||
asyncTest "getCertificate succeeds on sendChallengeCompleted but fails on requestFinalize":
|
||||
# register successful
|
||||
@@ -327,8 +325,7 @@ suite "AutoTLS ACME Client":
|
||||
# request completed successful
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"checkURL": "http://example.com/some-check-url"},
|
||||
headers: HttpTable.init(),
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
# finalize is invalid
|
||||
@@ -344,16 +341,17 @@ suite "AutoTLS ACME Client":
|
||||
body: %*{"status": "invalid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
acme = await ACMEClient.new(api = Opt.some(ACMEApi(acmeApi)))
|
||||
check acme.kid == "some-expected-kid"
|
||||
acme = ACMEClient.new(api = ACMEApi(acmeApi))
|
||||
let kid = await acme.getOrInitKid()
|
||||
check kid == "some-expected-kid"
|
||||
|
||||
let challenge = ACMEChallengeResponseWrapper(
|
||||
finalize: "https://finalize.com",
|
||||
order: "https://order.com",
|
||||
dns01: ACMEChallenge(
|
||||
url: "https://some.domain",
|
||||
`type`: ACMEChallengeType.dns01,
|
||||
status: ACMEChallengeStatus.valid,
|
||||
`type`: ACMEChallengeType.DNS01,
|
||||
status: ACMEChallengeStatus.VALID,
|
||||
token: ACMEChallengeToken("some-token"),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -12,57 +12,140 @@
|
||||
import chronos
|
||||
import chronos/apps/http/httpclient
|
||||
import
|
||||
../libp2p/
|
||||
[
|
||||
stream/connection,
|
||||
upgrademngrs/upgrade,
|
||||
autotls/acme/api,
|
||||
autotls/acme/client,
|
||||
wire,
|
||||
]
|
||||
../libp2p/[
|
||||
stream/connection,
|
||||
upgrademngrs/upgrade,
|
||||
autotls/acme/api,
|
||||
autotls/acme/client,
|
||||
autotls/service,
|
||||
autotls/utils,
|
||||
multiaddress,
|
||||
switch,
|
||||
builders,
|
||||
nameresolving/dnsresolver,
|
||||
wire,
|
||||
]
|
||||
|
||||
import ./helpers
|
||||
|
||||
when defined(linux) and defined(amd64):
|
||||
{.used.}
|
||||
|
||||
suite "AutoTLS Integration":
|
||||
asyncTeardown:
|
||||
checkTrackers()
|
||||
suite "AutoTLS Integration":
|
||||
asyncTeardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "request challenge without ACMEClient (ACMEApi only)":
|
||||
let key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
let acmeApi = await ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
defer:
|
||||
await acmeApi.close()
|
||||
let registerResponse = await acmeApi.requestRegister(key)
|
||||
# account was registered (kid set)
|
||||
check registerResponse.kid != ""
|
||||
if registerResponse.kid == "":
|
||||
raiseAssert "unable to register acme account"
|
||||
asyncTest "request challenge without ACMEClient (ACMEApi only)":
|
||||
let key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
let acmeApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
defer:
|
||||
await acmeApi.close()
|
||||
let registerResponse = await acmeApi.requestRegister(key)
|
||||
# account was registered (kid set)
|
||||
check registerResponse.kid != ""
|
||||
if registerResponse.kid == "":
|
||||
raiseAssert "unable to register acme account"
|
||||
|
||||
# challenge requested
|
||||
let challenge = await acmeApi.requestChallenge(
|
||||
@["some.dummy.domain.com"], key, registerResponse.kid
|
||||
)
|
||||
check challenge.finalize.len() > 0
|
||||
check challenge.order.len() > 0
|
||||
# challenge requested
|
||||
let challenge = await acmeApi.requestChallenge(
|
||||
@["some.dummy.domain.com"], key, registerResponse.kid
|
||||
)
|
||||
check challenge.finalize.len > 0
|
||||
check challenge.order.len > 0
|
||||
|
||||
check challenge.dns01.url.len() > 0
|
||||
check challenge.dns01.`type` == ACMEChallengeType.dns01
|
||||
check challenge.dns01.status == ACMEChallengeStatus.pending
|
||||
check challenge.dns01.token.len() > 0
|
||||
check challenge.dns01.url.len > 0
|
||||
check challenge.dns01.`type` == ACMEChallengeType.DNS01
|
||||
check challenge.dns01.status == ACMEChallengeStatus.PENDING
|
||||
check challenge.dns01.token.len > 0
|
||||
|
||||
asyncTest "request challenge with ACMEClient":
|
||||
let acme = await ACMEClient.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
defer:
|
||||
await acme.close()
|
||||
asyncTest "request challenge with ACMEClient":
|
||||
let acme = ACMEClient.new(
|
||||
api = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
)
|
||||
defer:
|
||||
await acme.close()
|
||||
|
||||
let challenge = await acme.getChallenge(@["some.dummy.domain.com"])
|
||||
let challenge = await acme.getChallenge(@["some.dummy.domain.com"])
|
||||
|
||||
check challenge.finalize.len() > 0
|
||||
check challenge.order.len() > 0
|
||||
check challenge.dns01.url.len() > 0
|
||||
check challenge.dns01.`type` == ACMEChallengeType.dns01
|
||||
check challenge.dns01.status == ACMEChallengeStatus.pending
|
||||
check challenge.dns01.token.len() > 0
|
||||
check:
|
||||
challenge.finalize.len > 0
|
||||
challenge.order.len > 0
|
||||
challenge.dns01.url.len > 0
|
||||
challenge.dns01.`type` == ACMEChallengeType.DNS01
|
||||
challenge.dns01.status == ACMEChallengeStatus.PENDING
|
||||
challenge.dns01.token.len > 0
|
||||
|
||||
asyncTest "AutotlsService correctly downloads challenges":
|
||||
let ip =
|
||||
try:
|
||||
getPublicIPAddress()
|
||||
except:
|
||||
skip() # host doesn't have public IPv4 address
|
||||
return
|
||||
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddress(MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet())
|
||||
.withTcpTransport()
|
||||
.withAutotls(
|
||||
config = AutotlsConfig.new(
|
||||
acmeServerURL = parseUri(LetsEncryptURLStaging), renewCheckTime = 1.seconds
|
||||
)
|
||||
)
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
await switch.start()
|
||||
defer:
|
||||
await switch.stop()
|
||||
|
||||
# find autotls in list of services
|
||||
var autotls: AutotlsService = nil
|
||||
for service in switch.services:
|
||||
try:
|
||||
autotls = AutotlsService(service)
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
if autotls.isNil():
|
||||
raiseAssert "autotls service not found in switch"
|
||||
|
||||
# wait for cert to be ready
|
||||
await autotls.certReady.wait()
|
||||
# clear since we'll use it again for renewal
|
||||
autotls.certReady.clear()
|
||||
|
||||
let dnsResolver = DnsResolver.new(DefaultDnsServers)
|
||||
let base36PeerId = encodePeerId(switch.peerInfo.peerId)
|
||||
let dnsTXTRecord = (
|
||||
await dnsResolver.resolveTxt(
|
||||
"_acme-challenge." & base36PeerId & "." & AutoTLSDNSServer
|
||||
)
|
||||
)[0]
|
||||
|
||||
# check if DNS TXT record is set
|
||||
check dnsTXTRecord.len > 0
|
||||
|
||||
# certificate was downloaded and parsed
|
||||
let cert = autotls.cert.valueOr:
|
||||
raiseAssert "certificate not found"
|
||||
let certBefore = cert
|
||||
|
||||
# invalidate certificate
|
||||
let invalidCert = AutotlsCert.new(cert.cert, Moment.now - 2.hours)
|
||||
autotls.cert = Opt.some(invalidCert)
|
||||
|
||||
# wait for cert to be renewed
|
||||
await autotls.certReady.wait()
|
||||
|
||||
# certificate was indeed renewed
|
||||
let certAfter = autotls.cert.valueOr:
|
||||
raiseAssert "certificate not found"
|
||||
|
||||
check certBefore != certAfter
|
||||
|
||||
# cert is valid
|
||||
check certAfter.expiry > Moment.now
|
||||
|
||||
33
tests/testbytesview.nim
Normal file
33
tests/testbytesview.nim
Normal file
@@ -0,0 +1,33 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest2
|
||||
import ../libp2p/utils/bytesview
|
||||
|
||||
suite "BytesView":
|
||||
test "basics":
|
||||
var b = BytesView.init(@[byte 1, 2, 3, 4, 5, 6])
|
||||
check b.len() == 6
|
||||
check @(b.data()) == @([byte 1, 2, 3, 4, 5, 6])
|
||||
check @(b.toOpenArray(1, 3)) == @([byte 2, 3])
|
||||
|
||||
b.consume(2)
|
||||
check b.len() == 4
|
||||
check @(b.data()) == @([byte 3, 4, 5, 6])
|
||||
check @(b.toOpenArray(1, 3)) == @([byte 4, 5])
|
||||
|
||||
b.consume(2)
|
||||
check b.len() == 2
|
||||
check @(b.data()) == @([byte 5, 6])
|
||||
|
||||
b.consume(2)
|
||||
check b.len() == 0
|
||||
check b.data().len == 0
|
||||
@@ -88,7 +88,7 @@ proc pubsubTest(f: set[P2PDaemonFlags]): Future[bool] {.async.} =
|
||||
var ticket1 = await api1.pubsubSubscribe("test-topic", pubsubHandler1)
|
||||
var ticket2 = await api2.pubsubSubscribe("test-topic", pubsubHandler2)
|
||||
|
||||
await sleepAsync(2.seconds)
|
||||
await sleepAsync(1.seconds)
|
||||
|
||||
var topics1 = await api1.pubsubGetTopics()
|
||||
var topics2 = await api2.pubsubGetTopics()
|
||||
@@ -98,10 +98,10 @@ proc pubsubTest(f: set[P2PDaemonFlags]): Future[bool] {.async.} =
|
||||
var peers2 = await api2.pubsubListPeers("test-topic")
|
||||
if len(peers1) == 1 and len(peers2) == 1:
|
||||
# Publish test data via api1.
|
||||
await sleepAsync(500.milliseconds)
|
||||
await sleepAsync(250.milliseconds)
|
||||
await api1.pubsubPublish("test-topic", msgData)
|
||||
var res =
|
||||
await one(allFutures(handlerFuture1, handlerFuture2), sleepAsync(10.seconds))
|
||||
await one(allFutures(handlerFuture1, handlerFuture2), sleepAsync(5.seconds))
|
||||
|
||||
await api1.close()
|
||||
await api2.close()
|
||||
|
||||
@@ -115,7 +115,7 @@ suite "Dcutr":
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
): Future[void] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
await sleepAsync(100.millis)
|
||||
await sleepAsync(50.millis)
|
||||
|
||||
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
|
||||
let publicSwitch = newStandardSwitch()
|
||||
@@ -194,7 +194,7 @@ suite "Dcutr":
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
): Future[void] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
await sleepAsync(100.millis)
|
||||
await sleepAsync(50.millis)
|
||||
|
||||
await ductrServerTest(connectProc)
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ import ./helpers
|
||||
suite "Future":
|
||||
asyncTest "anyCompleted must complete with first completed future":
|
||||
proc fut1() {.async.} =
|
||||
await sleepAsync(100.milliseconds)
|
||||
await sleepAsync(50.milliseconds)
|
||||
|
||||
proc fut2() {.async.} =
|
||||
await sleepAsync(200.milliseconds)
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
proc fut3() {.async.} =
|
||||
raise newException(CatchableError, "fut3")
|
||||
@@ -55,10 +55,10 @@ suite "Future":
|
||||
|
||||
asyncTest "anyCompleted with timeout":
|
||||
proc fut1() {.async.} =
|
||||
await sleepAsync(100.milliseconds)
|
||||
await sleepAsync(50.milliseconds)
|
||||
|
||||
proc fut2() {.async.} =
|
||||
await sleepAsync(200.milliseconds)
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
proc fut3() {.async: (raises: [ValueError]).} =
|
||||
# fut3 intentionally specifies raised ValueError
|
||||
|
||||
@@ -32,7 +32,7 @@ suite "checkUntilTimeout helpers":
|
||||
var a = 1
|
||||
let b = 2
|
||||
proc makeConditionTrueLater() {.async.} =
|
||||
await sleepAsync(100.milliseconds)
|
||||
await sleepAsync(50.milliseconds)
|
||||
a = 2
|
||||
|
||||
asyncSpawn makeConditionTrueLater()
|
||||
@@ -57,7 +57,7 @@ suite "checkUntilTimeout helpers":
|
||||
var a = 1
|
||||
let b = 2
|
||||
proc makeConditionTrueLater() {.async.} =
|
||||
await sleepAsync(100.milliseconds)
|
||||
await sleepAsync(50.milliseconds)
|
||||
a = 2
|
||||
|
||||
asyncSpawn makeConditionTrueLater()
|
||||
|
||||
@@ -241,7 +241,7 @@ suite "Hole Punching":
|
||||
switchAux.peerInfo.peerId, switchAux.peerInfo.addrs
|
||||
)
|
||||
|
||||
await sleepAsync(200.millis)
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
await privatePeerSwitch1.connect(
|
||||
switchAux2.peerInfo.peerId, switchAux2.peerInfo.addrs
|
||||
@@ -270,7 +270,7 @@ suite "Hole Punching":
|
||||
privatePeerSwitch2.connectStub = rcvConnectStub
|
||||
|
||||
# wait for hole punching to finish in the background
|
||||
await sleepAsync(600.millis)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
await allFuturesThrowing(
|
||||
privatePeerSwitch1.stop(),
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{.used.}
|
||||
when defined(linux) and defined(amd64):
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
|
||||
import helpers, commoninterop
|
||||
import ../libp2p
|
||||
import ../libp2p/autotls/service
|
||||
import ../libp2p/crypto/crypto, ../libp2p/protocols/connectivity/relay/relay
|
||||
|
||||
proc switchMplexCreator(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov: TransportProvider = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.raises: [LPError].} =
|
||||
@@ -29,7 +32,9 @@ proc switchMplexCreator(
|
||||
|
||||
proc switchYamuxCreator(
|
||||
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
prov: TransportProvider = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new({}, upgr),
|
||||
relay: Relay = Relay.new(circuitRelayV1 = true),
|
||||
): Switch {.raises: [LPError].} =
|
||||
|
||||
@@ -556,7 +556,7 @@ suite "Mplex":
|
||||
listenJob.complete()
|
||||
|
||||
await mplexListen.handle()
|
||||
await sleepAsync(1.seconds) # give chronos some slack to process things
|
||||
await sleepAsync(500.millis) # give chronos some slack to process things
|
||||
await mplexListen.close()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import
|
||||
testvarint, testconnection, testbridgestream, testminprotobuf, teststreamseq,
|
||||
testsemaphore, testheartbeat, testfuture
|
||||
testsemaphore, testheartbeat, testfuture, testzeroqueue, testbytesview
|
||||
|
||||
import testminasn1, testrsa, testecnist, tested25519, testsecp256k1, testcrypto
|
||||
|
||||
|
||||
@@ -54,5 +54,5 @@ suite "PeerID Auth":
|
||||
doAssert bearer.token.len > 0
|
||||
|
||||
let (_, responseWithBearer) =
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload, bearer)
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload, Opt.some(bearer))
|
||||
check responseWithBearer.status != HttpPeerAuthFailed
|
||||
|
||||
@@ -16,18 +16,29 @@ import
|
||||
import ./helpers
|
||||
|
||||
proc createSwitch(
|
||||
isServer: bool = false, useMplex: bool = false, useYamux: bool = false
|
||||
isServer: bool = false,
|
||||
useQuic: bool = false,
|
||||
useMplex: bool = false,
|
||||
useYamux: bool = false,
|
||||
): Switch =
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withNoise()
|
||||
if useMplex:
|
||||
builder = builder.withMplex()
|
||||
if useYamux:
|
||||
builder = builder.withYamux()
|
||||
var builder = SwitchBuilder.new()
|
||||
builder = builder.withRng(newRng()).withNoise()
|
||||
|
||||
if useQuic:
|
||||
builder = builder.withQuicTransport().withAddresses(
|
||||
@[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
|
||||
)
|
||||
else:
|
||||
builder = builder.withTcpTransport().withAddresses(
|
||||
@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||
)
|
||||
|
||||
if useMplex:
|
||||
builder = builder.withMplex()
|
||||
elif useYamux:
|
||||
builder = builder.withYamux()
|
||||
else:
|
||||
raiseAssert "must use mplex or yamux"
|
||||
|
||||
var switch = builder.build()
|
||||
|
||||
@@ -43,13 +54,12 @@ proc runTest(server: Switch, client: Switch) {.async.} =
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn = await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
let perfClient = PerfClient.new()
|
||||
discard await perfClient.perf(conn, bytesToUpload, bytesToDownload)
|
||||
|
||||
let stats = perfClient.currentStats()
|
||||
@@ -58,10 +68,52 @@ proc runTest(server: Switch, client: Switch) {.async.} =
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes == bytesToDownload
|
||||
|
||||
proc runTestWithException(server: Switch, client: Switch) {.async.} =
|
||||
const
|
||||
bytesToUpload = 1.uint64
|
||||
bytesToDownload = 10000000000.uint64
|
||||
# use large downlaod request which will make perf to execute for longer
|
||||
# giving us change to stop it
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn = await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
let perfClient = PerfClient.new()
|
||||
let perfFut = perfClient.perf(conn, bytesToUpload, bytesToDownload)
|
||||
|
||||
# after some time upload should be finished and download should be ongoing
|
||||
await sleepAsync(200.milliseconds)
|
||||
var stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == false
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes > 0
|
||||
|
||||
perfFut.cancel() # cancelling future will raise exception in perfClient
|
||||
await sleepAsync(10.milliseconds)
|
||||
|
||||
# after cancelling perf, stats must indicate that it is final one
|
||||
stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes > 0
|
||||
stats.downloadBytes < bytesToDownload # download must not be completed
|
||||
|
||||
suite "Perf protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "quic":
|
||||
return # nim-libp2p#1482: currently it does not work with quic
|
||||
let server = createSwitch(isServer = true, useQuic = true)
|
||||
let client = createSwitch(useQuic = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "tcp::yamux":
|
||||
let server = createSwitch(isServer = true, useYamux = true)
|
||||
let client = createSwitch(useYamux = true)
|
||||
@@ -72,40 +124,12 @@ suite "Perf protocol":
|
||||
let client = createSwitch(useMplex = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "perf with exception":
|
||||
asyncTest "perf with exception::yamux":
|
||||
let server = createSwitch(isServer = true, useYamux = true)
|
||||
let client = createSwitch(useYamux = true)
|
||||
await runTestWithException(server, client)
|
||||
|
||||
asyncTest "perf with exception::mplex":
|
||||
let server = createSwitch(isServer = true, useMplex = true)
|
||||
let client = createSwitch(useMplex = true)
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn =
|
||||
await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
var perfFut: Future[Duration]
|
||||
try:
|
||||
# start perf future with large download request
|
||||
# this will make perf execute for longer so we can cancel it
|
||||
perfFut = perfClient.perf(conn, 1.uint64, 1000000000000.uint64)
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
# after some time upload should be finished
|
||||
await sleepAsync(50.milliseconds)
|
||||
var stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == false
|
||||
stats.uploadBytes == 1
|
||||
|
||||
perfFut.cancel() # cancelling future will raise exception
|
||||
await sleepAsync(50.milliseconds)
|
||||
|
||||
# after cancelling perf, stats must indicate that it is final one
|
||||
stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == 1
|
||||
await runTestWithException(server, client)
|
||||
|
||||
@@ -14,6 +14,7 @@ import chronos
|
||||
import stew/byteutils
|
||||
import
|
||||
../libp2p/[
|
||||
autotls/service,
|
||||
errors,
|
||||
dial,
|
||||
switch,
|
||||
@@ -994,7 +995,9 @@ suite "Switch":
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
.withNameResolver(resolver)
|
||||
@@ -1007,7 +1010,9 @@ suite "Switch":
|
||||
.withRng(crypto.newRng())
|
||||
.withMplex()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
.withTcpTransport()
|
||||
|
||||
115
tests/testzeroqueue.nim
Normal file
115
tests/testzeroqueue.nim
Normal file
@@ -0,0 +1,115 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest2
|
||||
import ../libp2p/utils/zeroqueue
|
||||
|
||||
proc toSeq(p: pointer, length: int): seq[byte] =
|
||||
let b = cast[ptr UncheckedArray[byte]](p)
|
||||
var res = newSeq[byte](length)
|
||||
copyMem(res[0].addr, p, length)
|
||||
return res
|
||||
|
||||
suite "ZeroQueue":
|
||||
test "push-pop":
|
||||
var q: ZeroQueue
|
||||
check q.len() == 0
|
||||
check q.isEmpty()
|
||||
check q.popChunkSeq(1).len == 0 # pop empty seq when queue is empty
|
||||
|
||||
q.push(@[1'u8, 2, 3])
|
||||
q.push(@[4'u8, 5])
|
||||
check q.len() == 5
|
||||
check not q.isEmpty()
|
||||
|
||||
check q.popChunkSeq(3) == @[1'u8, 2, 3] # pop eactly the size of the chunk
|
||||
check q.popChunkSeq(1) == @[4'u8] # pop less then size of the chunk
|
||||
check q.popChunkSeq(5) == @[5'u8] # pop more then size of the chunk
|
||||
check q.isEmpty()
|
||||
|
||||
# should not push empty seq
|
||||
q.push(@[])
|
||||
q.push(@[])
|
||||
check q.isEmpty()
|
||||
|
||||
test "clear":
|
||||
var q: ZeroQueue
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check not q.isEmpty()
|
||||
q.clear()
|
||||
check q.isEmpty()
|
||||
check q.len() == 0
|
||||
|
||||
test "consumeTo":
|
||||
var q: ZeroQueue
|
||||
let nbytes = 20
|
||||
var pbytes = alloc(nbytes)
|
||||
defer:
|
||||
dealloc(pbytes)
|
||||
|
||||
# consumeTo: on empty queue
|
||||
check q.consumeTo(pbytes, nbytes) == 0
|
||||
|
||||
# consumeTo: emptying whole queue (multiple pushes)
|
||||
q.push(@[1'u8, 2, 3])
|
||||
q.push(@[4'u8, 5])
|
||||
q.push(@[6'u8, 7])
|
||||
check q.consumeTo(pbytes, nbytes) == 7
|
||||
check toSeq(pbytes, 7) == @[1'u8, 2, 3, 4, 5, 6, 7]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: consuming one chunk of data in two steps
|
||||
q.push(@[1'u8, 2, 3])
|
||||
# first consume
|
||||
check q.consumeTo(pbytes, 1) == 1
|
||||
check toSeq(pbytes, 1) == @[1'u8]
|
||||
check q.len() == 2
|
||||
# second consime
|
||||
check q.consumeTo(pbytes, nbytes) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: consuming multiple chunks of data in two steps
|
||||
q.clear()
|
||||
q.push(@[4'u8, 5])
|
||||
q.push(@[1'u8, 2, 3])
|
||||
# first consume
|
||||
check q.consumeTo(pbytes, 3) == 3
|
||||
check toSeq(pbytes, 3) == @[4'u8, 5, 1]
|
||||
check q.len() == 2
|
||||
# second consume
|
||||
check q.consumeTo(pbytes, nbytes) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: parially consume big push multiple times
|
||||
q.clear()
|
||||
q.push(newSeq[byte](20))
|
||||
for i in 1 .. 10:
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check q.isEmpty()
|
||||
check q.consumeTo(pbytes, 2) == 0
|
||||
|
||||
# consumeTo: parially consuming while pushing
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[1'u8, 2]
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[3'u8, 1]
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[1'u8, 2]
|
||||
check q.consumeTo(pbytes, 2) == 1
|
||||
check toSeq(pbytes, 1) == @[3'u8]
|
||||
check q.isEmpty()
|
||||
Reference in New Issue
Block a user