mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 11:48:15 -05:00
Compare commits
25 Commits
autotls-ma
...
fix-quic-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4eccd1259 | ||
|
|
7c121323c0 | ||
|
|
7e04051b0a | ||
|
|
b80e950456 | ||
|
|
3fd08d169c | ||
|
|
f73c293992 | ||
|
|
b71285f0ae | ||
|
|
4d94892eb0 | ||
|
|
3ecb1744ce | ||
|
|
2f9c3fb3e2 | ||
|
|
2609c270b8 | ||
|
|
48b3e34cd3 | ||
|
|
abb2c43667 | ||
|
|
d1cfbb35d3 | ||
|
|
38a630eee0 | ||
|
|
be1a2023ce | ||
|
|
021d0c1700 | ||
|
|
f49cd377ce | ||
|
|
fc80840784 | ||
|
|
7742d06a58 | ||
|
|
e0ea1d48a4 | ||
|
|
f028ad8c12 | ||
|
|
9c153c822b | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 |
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -10,7 +10,8 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "bio",
|
||||
"https://github.com/vacp2p/nim-quic.git#62f6ca38b6363a47e1ba43643e25cca7398bf605",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import options, base64, sequtils, strutils, json
|
||||
import options, sequtils, strutils, json, uri
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
|
||||
|
||||
@@ -19,8 +19,10 @@ const
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Nonce* = string
|
||||
type Authorization* = string
|
||||
type Domain* = string
|
||||
type Kid* = string
|
||||
type Nonce* = string
|
||||
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
@@ -28,9 +30,9 @@ type ACMEDirectory* = object
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: ACMEDirectory
|
||||
directory: Opt[ACMEDirectory]
|
||||
session: HttpSessionRef
|
||||
acmeServerURL*: string
|
||||
acmeServerURL*: Uri
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
@@ -49,7 +51,7 @@ type ACMERequestType = enum
|
||||
type ACMERequestHeader = object
|
||||
alg: string
|
||||
typ: string
|
||||
nonce: string
|
||||
nonce: Nonce
|
||||
url: string
|
||||
case kind: ACMERequestType
|
||||
of ACMEJwkRequest:
|
||||
@@ -57,14 +59,16 @@ type ACMERequestHeader = object
|
||||
of ACMEKidRequest:
|
||||
kid: Kid
|
||||
|
||||
type Email = string
|
||||
|
||||
type ACMERegisterRequest* = object
|
||||
termsOfServiceAgreed: bool
|
||||
contact: seq[string]
|
||||
contact: seq[Email]
|
||||
|
||||
type ACMEAccountStatus = enum
|
||||
valid
|
||||
deactivated
|
||||
revoked
|
||||
valid = "valid"
|
||||
deactivated = "deactivated"
|
||||
revoked = "revoked"
|
||||
|
||||
type ACMERegisterResponseBody = object
|
||||
status*: ACMEAccountStatus
|
||||
@@ -74,16 +78,30 @@ type ACMERegisterResponse* = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
PENDING = "pending"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEChallenge = object
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEChallengeType* {.pure.} = enum
|
||||
DNS01 = "dns-01"
|
||||
HTTP01 = "http-01"
|
||||
TLSALPN01 = "tls-alpn-01"
|
||||
|
||||
type ACMEChallengeToken* = string
|
||||
|
||||
type ACMEChallenge* = object
|
||||
url*: string
|
||||
`type`*: string
|
||||
`type`*: ACMEChallengeType
|
||||
status*: ACMEChallengeStatus
|
||||
token*: string
|
||||
token*: ACMEChallengeToken
|
||||
|
||||
type ACMEChallengeIdentifier = object
|
||||
`type`: string
|
||||
@@ -93,33 +111,26 @@ type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEChallengeStatus
|
||||
authorizations: seq[string]
|
||||
status: ACMEOrderStatus
|
||||
authorizations: seq[Authorization]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEChallengeStatus
|
||||
authorizations*: seq[string]
|
||||
status*: ACMEOrderStatus
|
||||
authorizations*: seq[Authorization]
|
||||
finalize*: string
|
||||
orderURL*: string
|
||||
order*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalizeURL*: string
|
||||
orderURL*: string
|
||||
finalize*: string
|
||||
order*: string
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
checkURL: string
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
ready = "ready"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
url: string
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
@@ -141,8 +152,8 @@ type ACMEOrderResponse* = object
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate: string
|
||||
certificateExpiry: DateTime
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
@@ -161,41 +172,48 @@ template handleError*(msg: string, body: untyped): untyped =
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
|
||||
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
|
||||
): ACMEApi =
|
||||
let session = HttpSessionRef.new()
|
||||
let directory = handleError("new API"):
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(session, acmeServerURL & "/directory").get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEDirectory)
|
||||
|
||||
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
|
||||
ACMEApi(
|
||||
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
|
||||
)
|
||||
|
||||
proc getDirectory(
|
||||
self: ACMEApi
|
||||
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("getDirectory"):
|
||||
self.directory.valueOr:
|
||||
let acmeResponse = await self.get(self.acmeServerURL / "directory")
|
||||
let directory = acmeResponse.body.to(ACMEDirectory)
|
||||
self.directory = Opt.some(directory)
|
||||
directory
|
||||
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(self.directory.newNonce)
|
||||
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, url: string, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone:
|
||||
if not needsJwk and kid.isNone():
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
@@ -211,7 +229,7 @@ proc acmeHeader(
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
url: $uri,
|
||||
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
|
||||
)
|
||||
else:
|
||||
@@ -220,34 +238,34 @@ proc acmeHeader(
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
url: $uri,
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, url, body = payload, headers = ACMEHttpHeaders)
|
||||
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, url).get().send()
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
url: string,
|
||||
uri: Uri,
|
||||
payload: auto,
|
||||
key: KeyPair,
|
||||
needsJwk: bool = false,
|
||||
@@ -256,7 +274,7 @@ proc createSignedAcmeRequest(
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let acmeHeader = await self.acmeHeader(url, key, needsJwk, kid)
|
||||
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
|
||||
handleError("createSignedAcmeRequest"):
|
||||
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
|
||||
let derPrivKey = key.seckey.rsakey.getBytes.get
|
||||
@@ -270,9 +288,13 @@ proc requestRegister*(
|
||||
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
|
||||
handleError("acmeRegister"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newAccount, registerRequest, key, needsJwk = true
|
||||
parseUri((await self.getDirectory()).newAccount),
|
||||
registerRequest,
|
||||
key,
|
||||
needsJwk = true,
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newAccount, payload)
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
@@ -280,7 +302,7 @@ proc requestRegister*(
|
||||
)
|
||||
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
let orderRequest = ACMEChallengeRequest(
|
||||
@@ -288,44 +310,56 @@ proc requestNewOrder*(
|
||||
)
|
||||
handleError("requestNewOrder"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
|
||||
parseUri((await self.getDirectory()).newOrder),
|
||||
orderRequest,
|
||||
key,
|
||||
kid = Opt.some(kid),
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newOrder, payload)
|
||||
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len() == 0:
|
||||
if challengeResponseBody.authorizations.len == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
orderURL: acmeResponse.headers.keyOrError("location"),
|
||||
order: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
|
||||
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let acmeResponse = await self.get(authorizations[0])
|
||||
let acmeResponse = await self.get(parseUri(authorizations[0]))
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let challengeResponse = await self.requestNewOrder(domains, key, kid)
|
||||
let orderResponse = await self.requestNewOrder(domains, key, kid)
|
||||
if orderResponse.status != ACMEOrderStatus.PENDING and
|
||||
orderResponse.status != ACMEOrderStatus.READY:
|
||||
# ready is a valid status when renewing certs before expiry
|
||||
raise newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
|
||||
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
|
||||
if authorizationsResponse.challenges.len == 0:
|
||||
raise newException(ACMEError, "No challenges received")
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalizeURL: challengeResponse.finalize,
|
||||
orderURL: challengeResponse.orderURL,
|
||||
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
|
||||
finalize: orderResponse.finalize,
|
||||
order: orderResponse.order,
|
||||
dns01: authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0],
|
||||
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
|
||||
)
|
||||
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: string, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
@@ -359,10 +393,10 @@ proc requestCheck*(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc requestCompleted*(
|
||||
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
|
||||
proc sendChallengeCompleted*(
|
||||
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCompleted (send notify)"):
|
||||
handleError("sendChallengeCompleted"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
@@ -370,7 +404,7 @@ proc requestCompleted*(
|
||||
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
checkURL: string,
|
||||
checkURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
@@ -378,9 +412,9 @@ proc checkChallengeCompleted*(
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.pending:
|
||||
of ACMEChallengeStatus.PENDING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.valid:
|
||||
of ACMEChallengeStatus.VALID:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
@@ -392,42 +426,39 @@ proc checkChallengeCompleted*(
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: string,
|
||||
chalURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.requestCompleted(chalURL, key, kid)
|
||||
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: string, finalizeURL: string, key: KeyPair, kid: Kid
|
||||
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let derCSR = createCSR(domain)
|
||||
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
|
||||
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalizeURL, payload)
|
||||
let acmeResponse = await self.post(finalize, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
orderURL: string,
|
||||
order: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
|
||||
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
of ACMEOrderStatus.VALID:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
of ACMEOrderStatus.PROCESSING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
raise newException(
|
||||
@@ -441,28 +472,28 @@ proc checkCertFinalized*(
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: string,
|
||||
finalizeURL: string,
|
||||
orderURL: string,
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
order: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(orderURL, key, kid, retries = retries)
|
||||
return await self.checkCertFinalized(order, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, orderURL: string
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let acmeResponse = await self.get(orderURL)
|
||||
let acmeResponse = await self.get(order)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, orderURL: string
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let orderResponse = await self.requestGetOrder(orderURL)
|
||||
let orderResponse = await self.requestGetOrder(order)
|
||||
|
||||
handleError("downloadCertificate"):
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
@@ -474,5 +505,5 @@ proc downloadCertificate*(
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
72
libp2p/autotls/acme/client.nim
Normal file
72
libp2p/autotls/acme/client.nim
Normal file
@@ -0,0 +1,72 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, results, bio
|
||||
|
||||
import ./api, ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export api
|
||||
|
||||
type KeyAuthorization* = string
|
||||
|
||||
type ACMEClient* = object
|
||||
api: ACMEApi
|
||||
key*: KeyPair
|
||||
kid*: Kid
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEClient],
|
||||
api: Opt[ACMEApi] = Opt.none(ACMEApi),
|
||||
key: Opt[KeyPair] = Opt.none(KeyPair),
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let api = api.valueOr:
|
||||
ACMEApi.new()
|
||||
let key = key.valueOr:
|
||||
KeyPair.random(PKScheme.RSA, rng[]).get()
|
||||
let registerResponse = await api.requestRegister(key)
|
||||
T(api: api, key: key, kid: registerResponse.kid)
|
||||
|
||||
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
|
||||
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toByteSeq).data))
|
||||
|
||||
proc getChallenge*(
|
||||
self: ACMEClient, domains: seq[api.Domain]
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
await self.api.requestChallenge(domains, self.key, self.kid)
|
||||
|
||||
proc getCertificate*(
|
||||
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let chalURL = parseUri(challenge.dns01.url)
|
||||
let orderURL = parseUri(challenge.order)
|
||||
let finalizeURL = parseUri(challenge.finalize)
|
||||
discard await self.api.sendChallengeCompleted(chalURL, self.key, self.kid)
|
||||
|
||||
let completed = await self.api.checkChallengeCompleted(chalURL, self.key, self.kid)
|
||||
if not completed:
|
||||
raise
|
||||
newException(ACMEError, "Failed to signal ACME server about challenge completion")
|
||||
|
||||
let finalized = await self.api.certificateFinalized(
|
||||
domain, finalizeURL, orderURL, self.key, self.kid
|
||||
)
|
||||
if not finalized:
|
||||
raise newException(ACMEError, "Failed to finalize certificate for domain " & domain)
|
||||
|
||||
await self.api.downloadCertificate(orderURL)
|
||||
|
||||
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
|
||||
await self.api.close()
|
||||
@@ -1,37 +1,39 @@
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
parent*: ACMEApi
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: JsonNode
|
||||
mockedResponses*: seq[HTTPResponse]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[MockACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(), directory: directory, acmeServerURL: LetsEncryptURL
|
||||
session: HttpSessionRef.new(),
|
||||
directory: Opt.some(directory),
|
||||
acmeServerURL: parseUri(LetsEncryptURL),
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return self.acmeServerURL & "/acme/1234"
|
||||
return $self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, url: string, payload: string
|
||||
self: MockACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, url: string
|
||||
self: MockACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
@@ -16,12 +19,26 @@ proc base64UrlEncode*(data: seq[byte]): string =
|
||||
encoded.removeSuffix("=")
|
||||
return encoded
|
||||
|
||||
proc thumbprint*(key: KeyPair): string =
|
||||
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
|
||||
let n = base64UrlEncode(nArray)
|
||||
let e = base64UrlEncode(eArray)
|
||||
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
|
||||
let digest = sha256.digest($keyJson)
|
||||
return base64UrlEncode(@(digest.data))
|
||||
|
||||
proc getResponseBody*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let responseBody = bytesToString(await response.getBodyBytes()).parseJson()
|
||||
return responseBody
|
||||
let bodyBytes = await response.getBodyBytes()
|
||||
if bodyBytes.len > 0:
|
||||
return bytesToString(bodyBytes).parseJson()
|
||||
return %*{} # empty body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -46,3 +63,5 @@ proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
|
||||
base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
203
libp2p/autotls/manager.nim
Normal file
203
libp2p/autotls/manager.nim
Normal file
@@ -0,0 +1,203 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, results, json, sequtils
|
||||
|
||||
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
./utils,
|
||||
../crypto/crypto,
|
||||
../nameresolving/dnsresolver,
|
||||
../peeridauth/client,
|
||||
../peerinfo,
|
||||
../utils/heartbeat,
|
||||
../wire
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autotls"
|
||||
|
||||
export LetsEncryptURL, AutoTLSError
|
||||
|
||||
const
|
||||
DefaultDnsServers* =
|
||||
@[
|
||||
initTAddress("1.1.1.1:53"),
|
||||
initTAddress("1.0.0.1:53"),
|
||||
initTAddress("[2606:4700:4700::1111]:53"),
|
||||
]
|
||||
DefaultRenewCheckTime = 1.hours
|
||||
DefaultRenewBufferTime = 1.hours
|
||||
|
||||
AutoTLSBroker* = "registration.libp2p.direct"
|
||||
AutoTLSDNSServer* = "libp2p.direct"
|
||||
HttpOk* = 200
|
||||
HttpCreated* = 201
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
type AutoTLSManager* = ref object
|
||||
rng: ref HmacDrbgContext
|
||||
managerFut: Future[void]
|
||||
cert*: Opt[TLSCertificate]
|
||||
certExpiry*: Opt[Moment]
|
||||
certReady*: AsyncEvent
|
||||
acmeClient: Opt[ACMEClient]
|
||||
brokerClient: PeerIDAuthClient
|
||||
dnsResolver*: DnsResolver
|
||||
bearer*: Opt[BearerToken]
|
||||
renewCheckTime*: Duration
|
||||
renewBufferTime*: Duration
|
||||
peerInfo: Opt[PeerInfo]
|
||||
acmeServerURL: Uri
|
||||
ipAddress: Opt[IpAddress]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutoTLSManager],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
acmeClient: Opt[ACMEClient] = Opt.none(ACMEClient),
|
||||
brokerClient: PeerIDAuthClient = PeerIDAuthClient.new(),
|
||||
dnsResolver: DnsResolver = DnsResolver.new(DefaultDnsServers),
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
ipAddress: Opt[IpAddress] = Opt.none(IpAddress),
|
||||
renewCheckTime: Duration = DefaultRenewCheckTime,
|
||||
renewBufferTime: Duration = DefaultRenewBufferTime,
|
||||
): AutoTLSManager =
|
||||
T(
|
||||
rng: rng,
|
||||
managerFut: nil,
|
||||
cert: Opt.none(TLSCertificate),
|
||||
certExpiry: Opt.none(Moment),
|
||||
certReady: newAsyncEvent(),
|
||||
acmeClient: acmeClient,
|
||||
brokerClient: brokerClient,
|
||||
dnsResolver: dnsResolver,
|
||||
bearer: Opt.none(BearerToken),
|
||||
renewCheckTime: renewCheckTime,
|
||||
renewBufferTime: renewBufferTime,
|
||||
peerInfo: Opt.none(PeerInfo),
|
||||
acmeServerURL: acmeServerURL,
|
||||
ipAddress: ipAddress,
|
||||
)
|
||||
|
||||
proc getIpAddress(self: AutoTLSManager): IpAddress {.raises: [AutoTLSError].} =
|
||||
return self.ipAddress.valueOr:
|
||||
getPublicIPAddress()
|
||||
|
||||
method issueCertificate(
|
||||
self: AutoTLSManager
|
||||
) {.base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError]).} =
|
||||
trace "Issuing new certificate"
|
||||
|
||||
assert self.peerInfo.isSome(), "Cannot issue new certificate: peerInfo not set"
|
||||
|
||||
let peerInfo = self.peerInfo.get()
|
||||
|
||||
# generate autotls domain string: "*.{peerID}.libp2p.direct"
|
||||
let base36PeerId = encodePeerId(peerInfo.peerId)
|
||||
let baseDomain = api.Domain(base36PeerId & "." & AutoTLSDNSServer)
|
||||
let domain = api.Domain("*." & baseDomain)
|
||||
|
||||
let acmeClient = self.acmeClient.valueOr:
|
||||
raise newException(AutoTLSError, "Cannot find ACMEClient on manager")
|
||||
|
||||
trace "Requesting ACME challenge"
|
||||
let dns01Challenge = await acmeClient.getChallenge(@[domain])
|
||||
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
|
||||
let strMultiaddresses: seq[string] = peerInfo.addrs.mapIt($it)
|
||||
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
|
||||
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
|
||||
|
||||
trace "Sending challenge to AutoTLS broker"
|
||||
let (bearer, response) =
|
||||
await self.brokerClient.send(registrationURL, peerInfo, payload, self.bearer)
|
||||
if self.bearer.isNone():
|
||||
# save bearer token for future
|
||||
self.bearer = Opt.some(bearer)
|
||||
if response.status != HttpOk:
|
||||
raise newException(
|
||||
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
)
|
||||
|
||||
debug "Waiting for DNS record to be set"
|
||||
let dnsSet =
|
||||
await checkDNSRecords(self.dnsResolver, self.getIpAddress(), baseDomain, keyAuth)
|
||||
if not dnsSet:
|
||||
raise newException(AutoTLSError, "DNS records not set")
|
||||
|
||||
debug "Notifying challenge completion to ACME and downloading cert"
|
||||
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
|
||||
|
||||
trace "Installing certificate"
|
||||
try:
|
||||
self.cert = Opt.some(TLSCertificate.init(certResponse.rawCertificate))
|
||||
self.certExpiry = Opt.some(asMoment(certResponse.certificateExpiry))
|
||||
except TLSStreamProtocolError:
|
||||
raise newException(AutoTLSError, "Could not parse downloaded certificates")
|
||||
self.certReady.fire()
|
||||
|
||||
proc manageCertificate(
|
||||
self: AutoTLSManager
|
||||
) {.async: (raises: [AutoTLSError, ACMEError, CancelledError]).} =
|
||||
debug "Registering ACME account"
|
||||
if self.acmeClient.isNone():
|
||||
self.acmeClient = Opt.some(await ACMEClient.new(acmeServerURL = self.acmeServerURL))
|
||||
|
||||
heartbeat "Certificate Management", self.renewCheckTime:
|
||||
if self.cert.isNone() or self.certExpiry.isNone():
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
break
|
||||
|
||||
# AutoTLSManager will renew the cert 1h before it expires
|
||||
let expiry = self.certExpiry.get
|
||||
let waitTime = expiry - Moment.now - self.renewBufferTime
|
||||
if waitTime <= self.renewBufferTime:
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to renew certificate", err = exc.msg
|
||||
break
|
||||
|
||||
method start*(
|
||||
self: AutoTLSManager, peerInfo: PeerInfo
|
||||
) {.base, async: (raises: [CancelledError]).} =
|
||||
if not self.managerFut.isNil():
|
||||
warn "Trying to start AutoTLSManager twice"
|
||||
return
|
||||
|
||||
self.peerInfo = Opt.some(peerInfo)
|
||||
trace "Starting AutoTLS manager"
|
||||
self.managerFut = self.manageCertificate()
|
||||
|
||||
method stop*(self: AutoTLSManager) {.base, async: (raises: [CancelledError]).} =
|
||||
trace "AutoTLS stop"
|
||||
if self.managerFut.isNil():
|
||||
warn "AutoTLS manager not running"
|
||||
return
|
||||
|
||||
await self.managerFut.cancelAndWait()
|
||||
self.managerFut = nil
|
||||
|
||||
if self.acmeClient.isSome():
|
||||
await self.acmeClient.get.close()
|
||||
|
||||
await self.brokerClient.close()
|
||||
116
libp2p/autotls/utils.nim
Normal file
116
libp2p/autotls/utils.nim
Normal file
@@ -0,0 +1,116 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, strutils
|
||||
from times import DateTime, toTime, toUnix
|
||||
|
||||
import chronos, stew/base36, chronicles
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
../errors,
|
||||
../peerid,
|
||||
../multihash,
|
||||
../cid,
|
||||
../multicodec,
|
||||
../nameresolving/dnsresolver
|
||||
|
||||
const
|
||||
DefaultDnsRetries = 10
|
||||
DefaultDnsRetryTime = 1.seconds
|
||||
|
||||
type AutoTLSError* = object of LPError
|
||||
|
||||
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
# This is so that we don't need to catch Exceptions directly
|
||||
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
|
||||
try:
|
||||
return getPrimaryIPAddr()
|
||||
except Exception as exc:
|
||||
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
|
||||
|
||||
proc isIPv4*(ip: IpAddress): bool =
|
||||
ip.family == IpAddressFamily.IPv4
|
||||
|
||||
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
|
||||
let ip = $ip
|
||||
try:
|
||||
not (
|
||||
ip.startsWith("10.") or
|
||||
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
|
||||
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(AutoTLSError, "Failed to parse IP address", exc)
|
||||
|
||||
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
try:
|
||||
let ip = checkedGetPrimaryIPAddr()
|
||||
if not ip.isIPv4():
|
||||
raise newException(AutoTLSError, "Host does not have an IPv4 address")
|
||||
if not ip.isPublic():
|
||||
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
|
||||
return ip
|
||||
except AutoTLSError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(
|
||||
AutoTLSError, "Unexpected error while getting primary IP address for host", exc
|
||||
)
|
||||
|
||||
proc asMoment*(dt: DateTime): Moment =
|
||||
let unixTime: int64 = dt.toTime.toUnix
|
||||
return Moment.init(unixTime, Second)
|
||||
|
||||
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
|
||||
var mh: MultiHash
|
||||
let decodeResult = MultiHash.decode(peerId.data, mh)
|
||||
if decodeResult.isErr() or decodeResult.get() == -1:
|
||||
raise
|
||||
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
|
||||
|
||||
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
|
||||
if cidResult.isErr():
|
||||
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
|
||||
|
||||
return Base36.encode(cidResult.get().data.buffer)
|
||||
|
||||
proc checkDNSRecords*(
|
||||
dnsResolver: DnsResolver,
|
||||
ipAddress: IpAddress,
|
||||
baseDomain: api.Domain,
|
||||
keyAuth: KeyAuthorization,
|
||||
retries: int = DefaultDnsRetries,
|
||||
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
# if my ip address is 100.10.10.3 then the ip4Domain will be:
|
||||
# 100-10-10-3.{peerIdBase36}.libp2p.direct
|
||||
# and acme challenge TXT domain will be:
|
||||
# _acme-challenge.{peerIdBase36}.libp2p.direct
|
||||
let dashedIpAddr = ($ipAddress).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
|
||||
var txt: seq[string]
|
||||
var ip4: seq[TransportAddress]
|
||||
for _ in 0 .. retries:
|
||||
txt = await dnsResolver.resolveTxt(acmeChalDomain)
|
||||
try:
|
||||
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to resolve IP", description = exc.msg # retry
|
||||
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
|
||||
return true
|
||||
await sleepAsync(DefaultDnsRetryTime)
|
||||
|
||||
return false
|
||||
@@ -15,7 +15,7 @@ runnableExamples:
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, tables, chronos, chronicles, sequtils
|
||||
import options, tables, chronos, chronicles, sequtils, uri
|
||||
import
|
||||
switch,
|
||||
peerid,
|
||||
@@ -30,6 +30,7 @@ import
|
||||
connmanager,
|
||||
upgrademngrs/muxedupgrade,
|
||||
observedaddrmanager,
|
||||
autotls/manager,
|
||||
nameresolving/nameresolver,
|
||||
errors,
|
||||
utility
|
||||
@@ -65,6 +66,7 @@ type
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
autotls: AutoTLSManager
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
@@ -252,6 +254,16 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonat = true
|
||||
b
|
||||
|
||||
proc withAutotls*(
|
||||
b: SwitchBuilder,
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
ipAddress = Opt.none(IpAddress),
|
||||
): SwitchBuilder {.public.} =
|
||||
b.autotls = AutoTLSManager.new(
|
||||
rng = b.rng, acmeServerURL = acmeServerURL, ipAddress = ipAddress
|
||||
)
|
||||
b
|
||||
|
||||
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
||||
b.circuitRelay = r
|
||||
b
|
||||
@@ -330,6 +342,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
secureManagers = secureManagerInstances,
|
||||
connManager = connManager,
|
||||
ms = ms,
|
||||
autotls = b.autotls,
|
||||
nameResolver = b.nameResolver,
|
||||
peerStore = peerStore,
|
||||
services = b.services,
|
||||
|
||||
@@ -342,7 +342,7 @@ proc getOutgoingSlot*(
|
||||
if forceDial:
|
||||
c.outSema.forceAcquire()
|
||||
elif not c.outSema.tryAcquire():
|
||||
trace "Too many outgoing connections!",
|
||||
debug "Too many outgoing connections!",
|
||||
available = c.outSema.count, max = c.outSema.size
|
||||
raise newTooManyConnectionsError()
|
||||
return ConnectionSlot(connManager: c, direction: Out)
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
import ../muxer, ../../stream/connection
|
||||
import ../../utils/zeroqueue
|
||||
|
||||
export muxer
|
||||
|
||||
@@ -151,7 +152,7 @@ type
|
||||
opened: bool
|
||||
isSending: bool
|
||||
sendQueue: seq[ToSend]
|
||||
recvQueue: seq[byte]
|
||||
recvQueue: ZeroQueue
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: AsyncEvent
|
||||
@@ -229,7 +230,6 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal and not channel.isSending:
|
||||
@@ -257,7 +257,7 @@ proc updateRecvWindow(
|
||||
return
|
||||
|
||||
let delta = channel.maxRecvWindow - inWindow
|
||||
channel.recvWindow.inc(delta)
|
||||
channel.recvWindow.inc(delta.int)
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
@@ -279,7 +279,7 @@ method readOnce*(
|
||||
newLPStreamConnDownError()
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
if channel.recvQueue.isEmpty():
|
||||
channel.receivedData.clear()
|
||||
let
|
||||
closedRemotelyFut = channel.closedRemotely.wait()
|
||||
@@ -290,28 +290,23 @@ method readOnce*(
|
||||
if not receivedDataFut.finished():
|
||||
await receivedDataFut.cancelAndWait()
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.len == 0:
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
|
||||
channel.isEof = true
|
||||
return
|
||||
0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0 ..< toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead ..^ 1]
|
||||
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
await channel.updateRecvWindow()
|
||||
channel.activity = true
|
||||
return toRead
|
||||
return consumed
|
||||
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel, b: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.recvQueue.push(b)
|
||||
channel.receivedData.fire()
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
|
||||
@@ -512,7 +507,15 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(isLocal = true)
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
channel.closedLocally = true
|
||||
channel.isReset = true
|
||||
channel.opened = false
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CancelledError as exc:
|
||||
|
||||
@@ -22,6 +22,8 @@ const
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
export Domain
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
@@ -55,7 +57,9 @@ type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
|
||||
proc new*(
|
||||
T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext = newRng()
|
||||
): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
@@ -151,12 +155,12 @@ proc checkSignature*(
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
self: PeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
uri,
|
||||
$uri,
|
||||
body = payload,
|
||||
headers = [
|
||||
("Content-Type", "application/json"),
|
||||
@@ -174,9 +178,15 @@ method post*(
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.
|
||||
async: (raises: [PeerIDAuthError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
if self.session.isNil():
|
||||
raise newException(PeerIDAuthError, "Session is nil")
|
||||
let req = HttpClientRequestRef.get(self.session, $uri).valueOr:
|
||||
raise newException(PeerIDAuthError, "Could not get request obj")
|
||||
let rawResponse = await req.send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
@@ -190,7 +200,7 @@ proc requestAuthentication*(
|
||||
.} =
|
||||
let response =
|
||||
try:
|
||||
await self.get($uri)
|
||||
await self.get(uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
@@ -248,7 +258,7 @@ proc requestAuthorization*(
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
@@ -303,12 +313,12 @@ proc sendWithBearer(
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
|
||||
if bearer.expires.isSome() and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
await self.post(uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
@@ -320,14 +330,14 @@ proc send*(
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken = BearerToken(),
|
||||
bearer: Opt[BearerToken] = Opt.none(BearerToken),
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.token == "":
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
if bearer.isSome():
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer.get)
|
||||
else:
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer)
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
|
||||
@@ -9,10 +9,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto
|
||||
|
||||
import ./client
|
||||
import ../crypto/crypto, ./client
|
||||
|
||||
export client
|
||||
|
||||
@@ -27,14 +26,14 @@ proc new*(
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
self: MockPeerIDAuthClient, uri: Uri, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: string
|
||||
self: MockPeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
|
||||
3
libp2p/protocols/kademlia.nim
Normal file
3
libp2p/protocols/kademlia.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
import ./kademlia/kademlia
|
||||
|
||||
export kademlia
|
||||
6
libp2p/protocols/kademlia/consts.nim
Normal file
6
libp2p/protocols/kademlia/consts.nim
Normal file
@@ -0,0 +1,6 @@
|
||||
const
|
||||
IdLength* = 32 # 256-bit IDs
|
||||
k* = 20 # replication parameter
|
||||
maxBuckets* = 256
|
||||
|
||||
const KadCodec* = "/ipfs/kad/1.0.0"
|
||||
74
libp2p/protocols/kademlia/kademlia.nim
Normal file
74
libp2p/protocols/kademlia/kademlia.nim
Normal file
@@ -0,0 +1,74 @@
|
||||
import chronos
|
||||
import chronicles
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import ./routingtable
|
||||
import ../protocol
|
||||
import ../../switch
|
||||
import ./protobuf
|
||||
import ../../utils/heartbeat
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht"
|
||||
|
||||
type KadDHT* = ref object of LPProtocol
|
||||
switch: Switch
|
||||
rng: ref HmacDrbgContext
|
||||
rtable*: RoutingTable
|
||||
maintenanceLoop: Future[void]
|
||||
|
||||
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "refresh buckets", 10.minutes:
|
||||
debug "TODO: implement bucket maintenance"
|
||||
|
||||
proc new*(
|
||||
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
|
||||
): T {.raises: [].} =
|
||||
var rtable = RoutingTable.init(switch.peerInfo.peerId)
|
||||
let kad = T(rng: rng, switch: switch, rtable: rtable)
|
||||
|
||||
kad.codec = KadCodec
|
||||
kad.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
while not conn.atEof:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msg = Message.decode(buf).tryGet()
|
||||
|
||||
# TODO: handle msg.msgType
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
error "could not handle request",
|
||||
peerId = conn.PeerId, err = getCurrentExceptionMsg()
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return kad
|
||||
|
||||
method start*(
|
||||
kad: KadDHT
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if kad.started:
|
||||
warn "Starting kad-dht twice"
|
||||
return fut
|
||||
|
||||
kad.maintenanceLoop = kad.maintainBuckets()
|
||||
kad.started = true
|
||||
|
||||
info "kad-dht started"
|
||||
|
||||
fut
|
||||
|
||||
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
|
||||
if not kad.started:
|
||||
return
|
||||
|
||||
kad.started = false
|
||||
kad.maintenanceLoop.cancelSoon()
|
||||
kad.maintenanceLoop = nil
|
||||
return
|
||||
48
libp2p/protocols/kademlia/keys.nim
Normal file
48
libp2p/protocols/kademlia/keys.nim
Normal file
@@ -0,0 +1,48 @@
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import chronicles
|
||||
import stew/byteutils
|
||||
|
||||
type
|
||||
KeyType* {.pure.} = enum
|
||||
Unhashed
|
||||
Raw
|
||||
PeerId
|
||||
|
||||
Key* = object
|
||||
case kind*: KeyType
|
||||
of KeyType.PeerId:
|
||||
peerId*: PeerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
data*: array[IdLength, byte]
|
||||
|
||||
proc toKey*(s: seq[byte]): Key =
|
||||
doAssert s.len == IdLength
|
||||
var data: array[IdLength, byte]
|
||||
for i in 0 ..< IdLength:
|
||||
data[i] = s[i]
|
||||
return Key(kind: KeyType.Raw, data: data)
|
||||
|
||||
proc toKey*(p: PeerId): Key =
|
||||
return Key(kind: KeyType.PeerId, peerId: p)
|
||||
|
||||
proc getBytes*(k: Key): seq[byte] =
|
||||
return
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
k.peerId.getBytes()
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
@(k.data)
|
||||
|
||||
template `==`*(a, b: Key): bool =
|
||||
a.getBytes() == b.getBytes() and a.kind == b.kind
|
||||
|
||||
proc shortLog*(k: Key): string =
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
"PeerId:" & $k.peerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
$k.kind & ":" & toHex(k.data)
|
||||
|
||||
chronicles.formatIt(Key):
|
||||
shortLog(it)
|
||||
129
libp2p/protocols/kademlia/routingtable.nim
Normal file
129
libp2p/protocols/kademlia/routingtable.nim
Normal file
@@ -0,0 +1,129 @@
|
||||
import algorithm
|
||||
import bearssl/rand
|
||||
import chronos
|
||||
import chronicles
|
||||
import ./consts
|
||||
import ./keys
|
||||
import ./xordistance
|
||||
import ../../peerid
|
||||
import sequtils
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht rtable"
|
||||
|
||||
type
|
||||
NodeEntry* = object
|
||||
nodeId*: Key
|
||||
lastSeen*: Moment
|
||||
|
||||
Bucket* = object
|
||||
peers*: seq[NodeEntry]
|
||||
|
||||
RoutingTable* = ref object
|
||||
selfId*: Key
|
||||
buckets*: seq[Bucket]
|
||||
|
||||
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
|
||||
return RoutingTable(selfId: selfId, buckets: @[])
|
||||
|
||||
proc bucketIndex*(selfId, key: Key): int =
|
||||
return xorDistance(selfId, key).leadingZeros
|
||||
|
||||
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
|
||||
for i, p in bucket.peers:
|
||||
if p.nodeId == nodeId:
|
||||
return Opt.some(i)
|
||||
return Opt.none(int)
|
||||
|
||||
proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
|
||||
if nodeId == rtable.selfId:
|
||||
return false # No self insertion
|
||||
|
||||
let idx = bucketIndex(rtable.selfId, nodeId)
|
||||
if idx >= maxBuckets:
|
||||
trace "cannot insert node. max buckets have been reached",
|
||||
nodeId, bucketIdx = idx, maxBuckets
|
||||
return false
|
||||
|
||||
if idx >= rtable.buckets.len:
|
||||
# expand buckets lazily if needed
|
||||
rtable.buckets.setLen(idx + 1)
|
||||
|
||||
var bucket = rtable.buckets[idx]
|
||||
let keyx = peerIndexInBucket(bucket, nodeId)
|
||||
if keyx.isSome:
|
||||
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
|
||||
elif bucket.peers.len < k:
|
||||
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
|
||||
else:
|
||||
# TODO: eviction policy goes here, rn we drop the node
|
||||
trace "cannot insert node in bucket, dropping node",
|
||||
nodeId, bucket = k, bucketIdx = idx
|
||||
return false
|
||||
|
||||
rtable.buckets[idx] = bucket
|
||||
return true
|
||||
|
||||
proc insert*(rtable: var RoutingTable, peerId: PeerId): bool =
|
||||
insert(rtable, peerId.toKey())
|
||||
|
||||
proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
|
||||
var allNodes: seq[Key] = @[]
|
||||
|
||||
for bucket in rtable.buckets:
|
||||
for p in bucket.peers:
|
||||
allNodes.add(p.nodeId)
|
||||
|
||||
allNodes.sort(
|
||||
proc(a, b: Key): int =
|
||||
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
|
||||
)
|
||||
|
||||
return allNodes[0 ..< min(count, allNodes.len)]
|
||||
|
||||
proc findClosestPeers*(rtable: RoutingTable, targetId: Key, count: int): seq[PeerId] =
|
||||
findClosest(rtable, targetId, count).mapIt(it.peerId)
|
||||
|
||||
proc isStale*(bucket: Bucket): bool =
|
||||
if bucket.peers.len == 0:
|
||||
return true
|
||||
for p in bucket.peers:
|
||||
if Moment.now() - p.lastSeen > 30.minutes:
|
||||
return true
|
||||
return false
|
||||
|
||||
proc randomKeyInBucketRange*(
|
||||
selfId: Key, bucketIndex: int, rng: ref HmacDrbgContext
|
||||
): Key =
|
||||
var raw = selfId.getBytes()
|
||||
|
||||
# zero out higher bits
|
||||
for i in 0 ..< bucketIndex:
|
||||
let byteIdx = i div 8
|
||||
let bitInByte = 7 - (i mod 8)
|
||||
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
|
||||
|
||||
# flip the target bit
|
||||
let tgtByte = bucketIndex div 8
|
||||
let tgtBitInByte = 7 - (bucketIndex mod 8)
|
||||
raw[tgtByte] = raw[tgtByte] xor (1'u8 shl tgtBitInByte)
|
||||
|
||||
# randomize all less significant bits
|
||||
let totalBits = raw.len * 8
|
||||
let lsbStart = bucketIndex + 1
|
||||
let lsbBytes = (totalBits - lsbStart + 7) div 8
|
||||
var randomBuf = newSeq[byte](lsbBytes)
|
||||
hmacDrbgGenerate(rng[], randomBuf)
|
||||
|
||||
for i in lsbStart ..< totalBits:
|
||||
let byteIdx = i div 8
|
||||
let bitInByte = 7 - (i mod 8)
|
||||
let lsbByte = (i - lsbStart) div 8
|
||||
let lsbBit = 7 - ((i - lsbStart) mod 8)
|
||||
let randBit = (randomBuf[lsbByte] shr lsbBit) and 1
|
||||
if randBit == 1:
|
||||
raw[byteIdx] = raw[byteIdx] or (1'u8 shl bitInByte)
|
||||
else:
|
||||
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
|
||||
|
||||
return raw.toKey()
|
||||
55
libp2p/protocols/kademlia/xordistance.nim
Normal file
55
libp2p/protocols/kademlia/xordistance.nim
Normal file
@@ -0,0 +1,55 @@
|
||||
import ./consts
|
||||
import ./keys
|
||||
import nimcrypto/sha2
|
||||
import ../../peerid
|
||||
|
||||
type XorDistance* = array[IdLength, byte]
|
||||
|
||||
proc countLeadingZeroBits*(b: byte): int =
|
||||
for i in 0 .. 7:
|
||||
if (b and (0x80'u8 shr i)) != 0:
|
||||
return i
|
||||
return 8
|
||||
|
||||
proc leadingZeros*(dist: XorDistance): int =
|
||||
for i in 0 ..< dist.len:
|
||||
if dist[i] != 0:
|
||||
return i * 8 + countLeadingZeroBits(dist[i])
|
||||
return dist.len * 8
|
||||
|
||||
proc cmp*(a, b: XorDistance): int =
|
||||
for i in 0 ..< IdLength:
|
||||
if a[i] < b[i]:
|
||||
return -1
|
||||
elif a[i] > b[i]:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
proc `<`*(a, b: XorDistance): bool =
|
||||
cmp(a, b) < 0
|
||||
|
||||
proc `<=`*(a, b: XorDistance): bool =
|
||||
cmp(a, b) <= 0
|
||||
|
||||
proc hashFor(k: Key): seq[byte] =
|
||||
return
|
||||
@(
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
sha256.digest(k.peerId.getBytes()).data
|
||||
of KeyType.Raw:
|
||||
sha256.digest(k.data).data
|
||||
of KeyType.Unhashed:
|
||||
k.data
|
||||
)
|
||||
|
||||
proc xorDistance*(a, b: Key): XorDistance =
|
||||
let hashA = a.hashFor()
|
||||
let hashB = b.hashFor()
|
||||
var response: XorDistance
|
||||
for i in 0 ..< hashA.len:
|
||||
response[i] = hashA[i] xor hashB[i]
|
||||
return response
|
||||
|
||||
proc xorDistance*(a: PeerId, b: Key): XorDistance =
|
||||
xorDistance(a.toKey(), b)
|
||||
@@ -369,10 +369,15 @@ method getOrCreatePeer*(
|
||||
async: (raises: [CancelledError, GetConnDialError])
|
||||
.} =
|
||||
try:
|
||||
return await p.switch.dial(peerId, protosToDial)
|
||||
echo "DIALING PEER!!!!!!!!!!!!!!!", peerId
|
||||
let x = await p.switch.dial(peerId, protosToDial)
|
||||
echo "DIALED PEER!", peerId
|
||||
return x
|
||||
except CancelledError as exc:
|
||||
debug "CANCLLED DIAL PEER", peerId
|
||||
raise exc
|
||||
except DialFailedError as e:
|
||||
debug "DIAL FAILED", peerId, err=e.msg
|
||||
raise (ref GetConnDialError)(parent: e)
|
||||
|
||||
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
|
||||
@@ -245,6 +245,7 @@ proc closeSendConn(
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
|
||||
debug "CLOSE SEND CONN", fin=p.connectedFut.finished
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
|
||||
@@ -263,16 +264,19 @@ proc connectOnce(
|
||||
p.connectedFut = newFuture[void]()
|
||||
let newConn =
|
||||
try:
|
||||
await p.getConn().wait(5.seconds)
|
||||
debug "TRYING TO GET CONN"
|
||||
let x = await p.getConn().wait(5.seconds)
|
||||
debug "GOT THE CONN!!!"
|
||||
x
|
||||
except AsyncTimeoutError as error:
|
||||
trace "getConn timed out", description = error.msg
|
||||
debug "getConn timed out", description = error.msg
|
||||
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
|
||||
|
||||
# When the send channel goes up, subscriptions need to be sent to the
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
# stop working so we make an effort to only keep a single channel alive
|
||||
|
||||
trace "Get new send connection", p, newConn
|
||||
debug "Get new send connection", p, newConn
|
||||
|
||||
# Careful to race conditions here.
|
||||
# Topic subscription relies on either connectedFut
|
||||
@@ -300,6 +304,7 @@ proc connectImpl(p: PubSubPeer) {.async: (raises: []).} =
|
||||
while true:
|
||||
if p.disconnected:
|
||||
if not p.connectedFut.finished:
|
||||
debug "CONNECT COMPLETE 2"
|
||||
p.connectedFut.complete()
|
||||
return
|
||||
await connectOnce(p)
|
||||
@@ -311,7 +316,9 @@ proc connectImpl(p: PubSubPeer) {.async: (raises: []).} =
|
||||
debug "Could not establish send connection", description = exc.msg
|
||||
|
||||
proc connect*(p: PubSubPeer) =
|
||||
debug "CONNECT..."
|
||||
if p.connected:
|
||||
echo "Already connected"
|
||||
return
|
||||
|
||||
asyncSpawn connectImpl(p)
|
||||
@@ -362,11 +369,15 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
debug "await connected fut"
|
||||
discard await race(p.connectedFut)
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, payload = shortLog(msg)
|
||||
if conn == nil:
|
||||
debug "No send connection - nil", p, payload = shortLog(msg)
|
||||
else:
|
||||
debug "No send connection - closed", p, payload = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
|
||||
@@ -32,6 +32,7 @@ import
|
||||
nameresolving/nameresolver,
|
||||
peerid,
|
||||
peerstore,
|
||||
autotls/manager,
|
||||
errors,
|
||||
utility,
|
||||
dialer
|
||||
@@ -58,6 +59,7 @@ type
|
||||
acceptFuts: seq[Future[void]]
|
||||
dialer*: Dial
|
||||
peerStore*: PeerStore
|
||||
autotls*: AutoTLSManager
|
||||
nameResolver*: NameResolver
|
||||
started: bool
|
||||
services*: seq[Service]
|
||||
@@ -332,6 +334,9 @@ proc stop*(s: Switch) {.public, async: (raises: [CancelledError]).} =
|
||||
except CatchableError as exc:
|
||||
warn "error cleaning up transports", description = exc.msg
|
||||
|
||||
if not s.autotls.isNil:
|
||||
await s.autotls.stop()
|
||||
|
||||
await s.ms.stop()
|
||||
|
||||
trace "Switch stopped"
|
||||
@@ -370,6 +375,8 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
|
||||
|
||||
await s.peerInfo.update()
|
||||
await s.ms.start()
|
||||
if not s.autotls.isNil:
|
||||
await s.autotls.start(s.peerInfo)
|
||||
s.started = true
|
||||
|
||||
debug "Started libp2p node", peer = s.peerInfo
|
||||
@@ -381,6 +388,7 @@ proc newSwitch*(
|
||||
connManager: ConnManager,
|
||||
ms: MultistreamSelect,
|
||||
peerStore: PeerStore,
|
||||
autotls: AutoTLSManager = nil,
|
||||
nameResolver: NameResolver = nil,
|
||||
services = newSeq[Service](),
|
||||
): Switch {.raises: [LPError].} =
|
||||
@@ -396,6 +404,7 @@ proc newSwitch*(
|
||||
dialer:
|
||||
Dialer.new(peerInfo.peerId, connManager, peerStore, transports, nameResolver),
|
||||
nameResolver: nameResolver,
|
||||
autotls: autotls,
|
||||
services: services,
|
||||
)
|
||||
|
||||
|
||||
@@ -45,9 +45,11 @@ proc new(
|
||||
template mapExceptions(body: untyped) =
|
||||
try:
|
||||
body
|
||||
except QuicError:
|
||||
except QuicError as ex:
|
||||
debug "QUIC ERROR", err=ex.msg
|
||||
raise newLPStreamEOFError()
|
||||
except CatchableError:
|
||||
except CatchableError as ex:
|
||||
debug "QUIC ERROR2", err=ex.msg
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readOnce*(
|
||||
@@ -61,6 +63,7 @@ method readOnce*(
|
||||
stream.cached = stream.cached[result ..^ 1]
|
||||
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
|
||||
except CatchableError as exc:
|
||||
debug "QUIC ERROR", err=exc.msg
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
{.push warning[LockLevel]: off.}
|
||||
@@ -97,7 +100,6 @@ proc getStream*(
|
||||
stream = await session.connection.incomingStream()
|
||||
of Direction.Out:
|
||||
stream = await session.connection.openStream()
|
||||
await stream.write(@[]) # QUIC streams do not exist until data is sent
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
except CatchableError as exc:
|
||||
# TODO: incomingStream is using {.async.} with no raises
|
||||
@@ -113,7 +115,7 @@ type QuicMuxer = ref object of Muxer
|
||||
|
||||
method newStream*(
|
||||
m: QuicMuxer, name: string = "", lazy: bool = false
|
||||
): Future[P2PConnection] {.
|
||||
): Future[connection.Connection] {.
|
||||
async: (raises: [CancelledError, LPStreamError, MuxerError])
|
||||
.} =
|
||||
try:
|
||||
|
||||
84
libp2p/utils/zeroqueue.nim
Normal file
84
libp2p/utils/zeroqueue.nim
Normal file
@@ -0,0 +1,84 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import std/deques
|
||||
|
||||
type Chunk = ref object
|
||||
data: seq[byte]
|
||||
size: int
|
||||
start: int
|
||||
|
||||
template clone(c: Chunk): Chunk =
|
||||
Chunk(data: c.data, size: c.size, start: c.start)
|
||||
|
||||
template newChunk(b: sink seq[byte]): Chunk =
|
||||
Chunk(data: b, size: b.len, start: 0)
|
||||
|
||||
template len(c: Chunk): int =
|
||||
c.size - c.start
|
||||
|
||||
type ZeroQueue* = object
|
||||
# ZeroQueue is queue structure optimized for efficient pushing and popping of
|
||||
# byte sequences `seq[byte]` (called chunks). This type is useful for streaming or buffering
|
||||
# scenarios where chunks of binary data are accumulated and consumed incrementally.
|
||||
chunks: Deque[Chunk]
|
||||
|
||||
proc clear*(q: var ZeroQueue) =
|
||||
q.chunks.clear()
|
||||
|
||||
proc isEmpty*(q: ZeroQueue): bool =
|
||||
return q.chunks.len() == 0
|
||||
|
||||
proc len*(q: ZeroQueue): int64 =
|
||||
var l: int64
|
||||
for b in q.chunks.items():
|
||||
l += b.len()
|
||||
return l
|
||||
|
||||
proc push*(q: var ZeroQueue, b: sink seq[byte]) =
|
||||
if b.len > 0:
|
||||
q.chunks.addLast(newChunk(b))
|
||||
|
||||
proc popChunk(q: var ZeroQueue, count: int): Chunk {.inline.} =
|
||||
var first = q.chunks.popFirst()
|
||||
|
||||
# first chunk has up to requested count elements,
|
||||
# queue will return this chunk (chunk might have less then requested)
|
||||
if first.len() <= count:
|
||||
return first
|
||||
|
||||
# first chunk has more elements then requested count,
|
||||
# queue will return view of first count elements, leaving the rest in the queue
|
||||
var ret = first.clone()
|
||||
ret.size = ret.start + count
|
||||
first.start += count
|
||||
q.chunks.addFirst(first)
|
||||
return ret
|
||||
|
||||
proc consumeTo*(q: var ZeroQueue, pbytes: pointer, nbytes: int): int =
|
||||
var consumed = 0
|
||||
while consumed < nbytes and not q.isEmpty():
|
||||
let chunk = q.popChunk(nbytes - consumed)
|
||||
let dest = cast[pointer](cast[ByteAddress](pbytes) + consumed)
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest, offsetPtr, chunk.len())
|
||||
consumed += chunk.len()
|
||||
|
||||
return consumed
|
||||
|
||||
proc popChunkSeq*(q: var ZeroQueue, count: int): seq[byte] =
|
||||
if q.isEmpty:
|
||||
return @[]
|
||||
|
||||
let chunk = q.popChunk(count)
|
||||
var dest = newSeq[byte](chunk.len())
|
||||
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
|
||||
copyMem(dest[0].addr, offsetPtr, chunk.len())
|
||||
|
||||
return dest
|
||||
83
tests/kademlia/testroutingtable.nim
Normal file
83
tests/kademlia/testroutingtable.nim
Normal file
@@ -0,0 +1,83 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest
|
||||
import chronos
|
||||
import ../../libp2p/crypto/crypto
|
||||
import ../../libp2p/protocols/kademlia/[routingtable, consts, keys]
|
||||
|
||||
proc testKey*(x: byte): Key =
|
||||
var buf: array[IdLength, byte]
|
||||
buf[31] = x
|
||||
return Key(kind: KeyType.Unhashed, data: buf)
|
||||
|
||||
let rng = crypto.newRng()
|
||||
|
||||
suite "routing table":
|
||||
test "inserts single key in correct bucket":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let other = testKey(0b10000000)
|
||||
discard rt.insert(other)
|
||||
|
||||
let idx = bucketIndex(selfId, other)
|
||||
check:
|
||||
rt.buckets.len > idx
|
||||
rt.buckets[idx].peers.len == 1
|
||||
rt.buckets[idx].peers[0].nodeId == other
|
||||
|
||||
test "does not insert beyond capacity":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let targetBucket = 6
|
||||
for _ in 0 ..< k + 5:
|
||||
var kid = randomKeyInBucketRange(selfId, targetBucket, rng)
|
||||
kid.kind = KeyType.Unhashed
|
||||
# Overriding so we don't use sha for comparing xor distances
|
||||
discard rt.insert(kid)
|
||||
|
||||
check targetBucket < rt.buckets.len
|
||||
let bucket = rt.buckets[targetBucket]
|
||||
check bucket.peers.len <= k
|
||||
|
||||
test "findClosest returns sorted keys":
|
||||
let selfId = testKey(0)
|
||||
var rt = RoutingTable.init(selfId)
|
||||
let ids = @[testKey(1), testKey(2), testKey(3), testKey(4), testKey(5)]
|
||||
for id in ids:
|
||||
discard rt.insert(id)
|
||||
|
||||
let res = rt.findClosest(testKey(1), 3)
|
||||
|
||||
check:
|
||||
res.len == 3
|
||||
res == @[testKey(1), testKey(3), testKey(2)]
|
||||
|
||||
test "isStale returns true for empty or old keys":
|
||||
var bucket: Bucket
|
||||
check isStale(bucket) == true
|
||||
|
||||
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now() - 40.minutes)]
|
||||
check isStale(bucket) == true
|
||||
|
||||
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now())]
|
||||
check isStale(bucket) == false
|
||||
|
||||
test "randomKeyInBucketRange returns id at correct distance":
|
||||
let selfId = testKey(0)
|
||||
let targetBucket = 3
|
||||
var rid = randomKeyInBucketRange(selfId, targetBucket, rng)
|
||||
rid.kind = KeyType.Unhashed
|
||||
# Overriding so we don't use sha for comparing xor distances
|
||||
let idx = bucketIndex(selfId, rid)
|
||||
check:
|
||||
idx == targetBucket
|
||||
rid != selfId
|
||||
54
tests/kademlia/testxordistance.nim
Normal file
54
tests/kademlia/testxordistance.nim
Normal file
@@ -0,0 +1,54 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest
|
||||
import chronos
|
||||
import ../../libp2p/protocols/kademlia/[consts, keys, xordistance]
|
||||
|
||||
suite "xor distance":
|
||||
test "countLeadingZeroBits works":
|
||||
check countLeadingZeroBits(0b00000000'u8) == 8
|
||||
check countLeadingZeroBits(0b10000000'u8) == 0
|
||||
check countLeadingZeroBits(0b01000000'u8) == 1
|
||||
check countLeadingZeroBits(0b00000001'u8) == 7
|
||||
|
||||
test "leadingZeros of xor distance":
|
||||
var d: XorDistance
|
||||
for i in 0 ..< IdLength:
|
||||
d[i] = 0
|
||||
check leadingZeros(d) == IdLength * 8
|
||||
|
||||
d[0] = 0b00010000
|
||||
check leadingZeros(d) == 3
|
||||
|
||||
d[0] = 0
|
||||
d[1] = 0b00100000
|
||||
check leadingZeros(d) == 10
|
||||
|
||||
test "xorDistance of identical keys is zero":
|
||||
let k = @[
|
||||
1'u8, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6,
|
||||
7, 8, 9, 0, 1, 2,
|
||||
].toKey()
|
||||
let dist = xorDistance(k, k)
|
||||
check:
|
||||
leadingZeros(dist) == IdLength * 8
|
||||
dist == default(XorDistance)
|
||||
|
||||
test "cmp gives correct order":
|
||||
var a: XorDistance
|
||||
var b: XorDistance
|
||||
a[0] = 0x01
|
||||
b[0] = 0x02
|
||||
check a < b
|
||||
check cmp(a, b) == -1
|
||||
check cmp(b, a) == 1
|
||||
check cmp(a, a) == 0
|
||||
@@ -12,8 +12,8 @@
|
||||
import sequtils, tables, sets
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
../utils,
|
||||
../../../libp2p/[
|
||||
switch,
|
||||
stream/connection,
|
||||
crypto/crypto,
|
||||
@@ -23,9 +23,9 @@ import
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
]
|
||||
import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
import ../../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
import ../../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
@@ -38,7 +38,7 @@ proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
dec ceil
|
||||
doAssert(ceil > 0, "waitSub timeout!")
|
||||
|
||||
suite "FloodSub":
|
||||
suite "FloodSub Integration":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -1,106 +1,15 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Control Messages":
|
||||
suite "GossipSub Integration - Control Messages":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
@@ -512,32 +421,3 @@ suite "GossipSub Control Messages":
|
||||
check:
|
||||
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
|
||||
asyncTest "Max IDONTWANT messages per heartbeat per peer":
|
||||
# Given GossipSub node with 1 peer
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 1
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let peer = peers[0]
|
||||
|
||||
# And sequence of iDontWants with more messages than max number (1200)
|
||||
proc generateMessageIds(count: int): seq[MessageId] =
|
||||
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
|
||||
|
||||
let iDontWants =
|
||||
@[
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
]
|
||||
|
||||
# When node handles iDontWants
|
||||
gossipSub.handleIDontWant(peer, iDontWants)
|
||||
|
||||
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
|
||||
check:
|
||||
peer.iDontWants[0].len == IDontWantMaxCount
|
||||
@@ -11,11 +11,11 @@
|
||||
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/stream/connection
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../../libp2p/stream/connection
|
||||
import ../../helpers
|
||||
|
||||
type DummyConnection* = ref object of Connection
|
||||
|
||||
@@ -30,7 +30,7 @@ proc new*(T: typedesc[DummyConnection]): DummyConnection =
|
||||
let instance = T()
|
||||
instance
|
||||
|
||||
suite "GossipSub Custom Connection Support":
|
||||
suite "GossipSub Integration - Custom Connection Support":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -9,66 +9,18 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
suite "GossipSub Integration - Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
asyncTest "GossipSub send over fanout A -> B":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
@@ -107,7 +59,7 @@ suite "GossipSub Fanout Management":
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
asyncTest "GossipSub send over fanout A -> B for subscribed topic":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
@@ -12,129 +12,15 @@
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
suite "GossipSub Integration - Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
@@ -314,7 +200,7 @@ suite "GossipSub Gossip Protocol":
|
||||
messages[].mapIt(it[].len)[1] == 0
|
||||
messages[].mapIt(it[].len)[0] == 0
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
asyncTest "GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
@@ -1,11 +1,11 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Heartbeat":
|
||||
suite "GossipSub Integration - Heartbeat":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -11,194 +11,14 @@
|
||||
|
||||
import chronicles
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Mesh Management":
|
||||
suite "GossipSub Integration - Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let
|
||||
topic = "foobar"
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
|
||||
# Given GossipSub node starting with 13 peers in mesh
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 13
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And mesh is larger than dHigh
|
||||
gossipSub.parameters.dLow = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dHigh = 8
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dScore = 13
|
||||
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
# When mesh is rebalanced
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
|
||||
# Then prunning is not triggered when mesh is not larger than dScore
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
@@ -242,7 +62,7 @@ suite "GossipSub Mesh Management":
|
||||
node.mesh.getOrDefault(topic).len <= dHigh
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
@@ -261,7 +81,7 @@ suite "GossipSub Mesh Management":
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Message Cache":
|
||||
suite "GossipSub Integration - Message Cache":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -11,12 +11,12 @@
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -72,62 +72,11 @@ proc createMessages(
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
suite "GossipSub Integration - Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
asyncTest "Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -154,7 +103,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
asyncTest "Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -181,7 +130,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
asyncTest "Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -208,7 +157,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
asyncTest "Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -469,7 +418,7 @@ suite "GossipSub Message Handling":
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
asyncTest "GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -499,7 +448,7 @@ suite "GossipSub Message Handling":
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
asyncTest "GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
@@ -565,7 +514,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
asyncTest "GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -595,7 +544,7 @@ suite "GossipSub Message Handling":
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
asyncTest "GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -607,7 +556,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
asyncTest "GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -620,7 +569,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
asyncTest "GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -662,7 +611,7 @@ suite "GossipSub Message Handling":
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
asyncTest "GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -711,7 +660,7 @@ suite "GossipSub Message Handling":
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
asyncTest "GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -11,41 +11,16 @@
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
suite "GossipSub Integration - Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
@@ -0,0 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsubcontrolmessages, testgossipsubcustomconn,
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubheartbeat,
|
||||
testgossipsubmeshmanagement, testgossipsubmessagecache, testgossipsubmessagehandling,
|
||||
testgossipsubscoring
|
||||
609
tests/pubsub/testbehavior.nim
Normal file
609
tests/pubsub/testbehavior.nim
Normal file
@@ -0,0 +1,609 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, tables]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers
|
||||
import ../utils/[futures]
|
||||
|
||||
suite "GossipSub Behavior":
|
||||
const
|
||||
topic = "foobar"
|
||||
MsgIdSuccess = "msg id gen success"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - do not handle IHave if peer score is below GossipThreshold threshold":
|
||||
const gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IHave message
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id])
|
||||
|
||||
# When IHave is handled
|
||||
let iWant = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then IHave is ignored
|
||||
check:
|
||||
iWant.messageIDs.len == 0
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - do not handle IWant if peer score is below GossipThreshold threshold":
|
||||
const gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IWant message with MsgId in mcache and sentIHaves
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[0].incl(id)
|
||||
let msg = ControlIWant(messageIDs: @[id])
|
||||
|
||||
# When IWant is handled
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then IWant is ignored
|
||||
check:
|
||||
messages.len == 0
|
||||
|
||||
asyncTest "handleIDontWant - Max IDONTWANT messages per heartbeat per peer":
|
||||
# Given GossipSub node with 1 peer
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let peer = peers[0]
|
||||
|
||||
# And sequence of iDontWants with more messages than max number (1200)
|
||||
proc generateMessageIds(count: int): seq[MessageId] =
|
||||
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
|
||||
|
||||
let iDontWants =
|
||||
@[
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
]
|
||||
|
||||
# When node handles iDontWants
|
||||
gossipSub.handleIDontWant(peer, iDontWants)
|
||||
|
||||
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
|
||||
check:
|
||||
peer.iDontWants[0].len == IDontWantMaxCount
|
||||
|
||||
asyncTest "handlePrune - do not trigger PeerExchange on Prune if peer score is below GossipThreshold threshold":
|
||||
const gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and RoutingRecordsHandler added
|
||||
var routingRecordsFut = newFuture[void]()
|
||||
gossipSub.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
routingRecordsFut.complete()
|
||||
)
|
||||
|
||||
# and Prune message
|
||||
let msg = ControlPrune(
|
||||
topicID: topic, peers: @[PeerInfoMsg(peerId: peer.peerId)], backoff: 123'u64
|
||||
)
|
||||
|
||||
# When Prune is handled
|
||||
gossipSub.handlePrune(peer, @[msg])
|
||||
|
||||
# Then handler is not triggered
|
||||
let result = await waitForState(routingRecordsFut, HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
result.isCancelled()
|
||||
|
||||
asyncTest "handleGraft - do not graft when peer score below PublishThreshold threshold":
|
||||
const publishThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below publishThreshold
|
||||
gossipSub.parameters.publishThreshold = publishThreshold
|
||||
peer.score = publishThreshold - 100.0
|
||||
|
||||
# and Graft message
|
||||
let msg = ControlGraft(topicID: topic)
|
||||
|
||||
# When Graft is handled
|
||||
let prunes = gossipSub.handleGraft(peer, @[msg])
|
||||
|
||||
# Then peer is ignored and not added to prunes
|
||||
check:
|
||||
gossipSub.mesh[topic].len == 0
|
||||
prunes.len == 0
|
||||
|
||||
asyncTest "handleGraft - penalizes direct peer attempting to graft":
|
||||
# Given a GossipSub instance with one direct peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And the peer is configured as a direct peer
|
||||
gossipSub.parameters.directPeers[peer.peerId] = @[]
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When a GRAFT message is handled
|
||||
let graftMsg = ControlGraft(topicID: topic)
|
||||
let prunes = gossipSub.handleGraft(peer, @[graftMsg])
|
||||
|
||||
# Then the peer is penalized with behavior penalty
|
||||
# And receives PRUNE in response
|
||||
check:
|
||||
peer.behaviourPenalty == 0.1
|
||||
prunes.len == 1
|
||||
|
||||
asyncTest "handleGraft - penalizes peer for grafting during backoff period":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And the peer is in backoff period for the topic
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
|
||||
Moment.now() + 1.hours
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When a GRAFT message is handled
|
||||
let graftMsg = ControlGraft(topicID: topic)
|
||||
let prunes = gossipSub.handleGraft(peer, @[graftMsg])
|
||||
|
||||
# Then the peer is penalized with behavior penalty
|
||||
# And receives PRUNE in response
|
||||
check:
|
||||
peer.behaviourPenalty == 0.1
|
||||
prunes.len == 1
|
||||
|
||||
asyncTest "replenishFanout - Degree Lo":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "dropFanoutPeers - drop expired fanout topics":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "dropFanoutPeers - leave unexpired fanout topics":
|
||||
const
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "getGossipPeers - should gather up to degree D non intersecting peers":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "getGossipPeers - should not crash on missing topics in mesh":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "getGossipPeers - should not crash on missing topics in fanout":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "getGossipPeers - should not crash on missing topics in gossip":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "getGossipPeers - do not select peer for IHave broadcast if peer score is below GossipThreshold threshold":
|
||||
const gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateGossipsub = true)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and message in cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message(topic: topic))
|
||||
|
||||
# When Node selects peers for IHave broadcast
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
|
||||
# Then peer is not selected
|
||||
check:
|
||||
gossipPeers.len == 0
|
||||
|
||||
asyncTest "rebalanceMesh - Degree Lo":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "rebalanceMesh - Degree Hi":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh - fail due to backoff":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh - fail due to backoff - remote":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh - Degree Hi - audit scenario":
|
||||
const
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "rebalanceMesh - Degree Hi - dScore controls number of peers to retain by score when pruning":
|
||||
# Given GossipSub node starting with 13 peers in mesh
|
||||
const totalPeers = 13
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And mesh is larger than dHigh
|
||||
gossipSub.parameters.dLow = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dHigh = 8
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dScore = 13
|
||||
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
# When mesh is rebalanced
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
|
||||
# Then prunning is not triggered when mesh is not larger than dScore
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
314
tests/pubsub/testgossipsub.nim
Normal file
314
tests/pubsub/testgossipsub.nim
Normal file
@@ -0,0 +1,314 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos/rateLimit
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= topic & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
asyncTest "Peer is disconnected and rate limit is hit when overhead rate limit is exceeded":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And peer disconnection is enabled when rate limit is exceeded
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = true
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# And a message is created that will exceed the overhead rate limit
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
# Then it throws an exception due to peer disconnection
|
||||
expect(PeerRateLimitError):
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
|
||||
asyncTest "Peer is disconnected and rate limit is hit when overhead rate limit is exceeded when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer disconnection is enabled when rate limit is exceeded
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = true
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerRateLimitError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
|
||||
asyncTest "Peer is punished and rate limit is hit when overhead rate limit is exceeded when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
rateLimitHits = currentRateLimitHits("unknown")
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And peer disconnection is disabled when rate limit is exceeded to not raise PeerRateLimitError
|
||||
gossipSub.parameters.disconnectPeerAboveRateLimit = false
|
||||
|
||||
# And low overheadRateLimit is set
|
||||
const
|
||||
bytes = 1
|
||||
interval = 1.millis
|
||||
overheadRateLimit = Opt.some((bytes, interval))
|
||||
|
||||
gossipSub.parameters.overheadRateLimit = overheadRateLimit
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(bytes, interval))
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerMessageDecodeError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# And the rate limit hit counter is incremented
|
||||
check:
|
||||
currentRateLimitHits("unknown") == rateLimitHits + 1
|
||||
peer.behaviourPenalty == 0.1
|
||||
|
||||
asyncTest "Peer is punished when decodeRpcMsg fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And initial behavior penalty is zero
|
||||
check:
|
||||
peer.behaviourPenalty == 0.0
|
||||
|
||||
# When invalid RPC data is sent that fails to decode
|
||||
expect(PeerMessageDecodeError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
# Then the peer is penalized with behavior penalty
|
||||
check:
|
||||
peer.behaviourPenalty == 0.1
|
||||
|
||||
asyncTest "Peer is punished when message contains invalid sequence number":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a message is created with invalid sequence number
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
msg.seqno = ("1").toBytes()
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
gossipSub.peerStats.withValue(peer.peerId, stats):
|
||||
check:
|
||||
stats[].topicInfos[topic].invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "Peer is punished when message id generation fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom msgIdProvider is set that always returns an error
|
||||
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
err(ValidationResult.Reject)
|
||||
gossipSub.msgIdProvider = customMsgIdProvider
|
||||
|
||||
# And a message is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
gossipSub.peerStats.withValue(peer.peerId, stats):
|
||||
check:
|
||||
stats[].topicInfos[topic].invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "Peer is punished when signature verification fails":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification enabled
|
||||
gossipSub.verifySignature = true
|
||||
|
||||
# And a message without signature is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
gossipSub.peerStats.withValue(peer.peerId, stats):
|
||||
check:
|
||||
stats[].topicInfos[topic].invalidMessageDeliveries == 1.0
|
||||
|
||||
asyncTest "Peer is punished when message validation is rejected":
|
||||
# Given a GossipSub instance with one peer
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And signature verification disabled to avoid message being dropped earlier
|
||||
gossipSub.verifySignature = false
|
||||
|
||||
# And a custom validator that always rejects messages
|
||||
proc rejectingValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Reject
|
||||
|
||||
# Register the rejecting validator for the topic
|
||||
gossipSub.addValidator(topic, rejectingValidator)
|
||||
|
||||
# And a message is created
|
||||
var msg = Message.init(peer.peerId, ("bar").toBytes(), topic, some(1'u64))
|
||||
|
||||
# When the GossipSub processes the message
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
# Then the peer's invalidMessageDeliveries counter is incremented
|
||||
gossipSub.peerStats.withValue(peer.peerId, stats):
|
||||
check:
|
||||
stats[].topicInfos[topic].invalidMessageDeliveries == 1.0
|
||||
@@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testgossipsubcontrolmessages, testgossipsubfanout, testgossipsubcustomconn,
|
||||
testgossipsubgossip, testgossipsubheartbeat, testgossipsubmeshmanagement,
|
||||
testgossipsubmessagecache, testgossipsubmessagehandling, testgossipsubparams,
|
||||
testgossipsubscoring, testfloodsub, testmcache, testtimedcache, testmessage
|
||||
testbehavior, testgossipsub, testgossipsubparams, testmcache, testmessage,
|
||||
testscoring, testtimedcache
|
||||
|
||||
import ./integration/testpubsubintegration
|
||||
|
||||
477
tests/pubsub/testscoring.nim
Normal file
477
tests/pubsub/testscoring.nim
Normal file
@@ -0,0 +1,477 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronos
|
||||
import math
|
||||
import std/[options, tables, sets]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/gossipsub/[types, scoring]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../../libp2p/[multiaddress, peerid]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
const topic = "foobar"
|
||||
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
asyncTest "Time in mesh scoring (P1)":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(3, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
timeInMeshWeight: 1.0,
|
||||
timeInMeshQuantum: 1.seconds,
|
||||
timeInMeshCap: 10.0,
|
||||
)
|
||||
|
||||
let now = Moment.now()
|
||||
|
||||
# Set different mesh times for peers
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true, graftTime: now - 2.seconds # seconds in mesh
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 12.seconds,
|
||||
# seconds in mesh (should be capped at timeInMeshCap)
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: false # Not in mesh
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Score calculation breakdown:
|
||||
# P1 formula: min(meshTime / timeInMeshQuantum, timeInMeshCap) * timeInMeshWeight * topicWeight
|
||||
|
||||
check:
|
||||
# Peer 0: min(2.0s / 1s, 10.0) * 1.0 * 1.0 = 2.0
|
||||
round(peers[0].score, 1) == 2.0
|
||||
# Peer 1: min(12.0s / 1s, 10.0) * 1.0 * 1.0 = 10.0 (capped at timeInMeshCap)
|
||||
round(peers[1].score, 1) == 10.0
|
||||
# Peer 2: not in mesh, score should be 0
|
||||
round(peers[2].score, 1) == 0.0
|
||||
|
||||
asyncTest "First message deliveries scoring (P2)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
firstMessageDeliveriesWeight: 2.0,
|
||||
firstMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set different first message delivery counts
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 4.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 0.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 2.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check scores: firstMessageDeliveries * weight
|
||||
check:
|
||||
round(peers[0].score, 1) == 8.0 # 4.0 * 2.0
|
||||
round(peers[1].score, 1) == 0.0 # 0.0 * 2.0
|
||||
round(peers[2].score, 1) == 4.0 # 2.0 * 2.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].firstMessageDeliveries, 1) == 2.0 # 4.0 * 0.5
|
||||
|
||||
asyncTest "Mesh message deliveries scoring (P3)":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(3, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let now = Moment.now()
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
meshMessageDeliveriesWeight: -1.0,
|
||||
meshMessageDeliveriesThreshold: 4.0,
|
||||
meshMessageDeliveriesActivation: 1.seconds,
|
||||
meshMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set up peers with different mesh message delivery counts
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 2.seconds,
|
||||
meshMessageDeliveries: 2.0, # Below threshold
|
||||
meshMessageDeliveriesActive: true,
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 2.seconds,
|
||||
meshMessageDeliveries: 6.0, # Above threshold
|
||||
meshMessageDeliveriesActive: true,
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[2].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 500.milliseconds, # Recently grafted, not active yet
|
||||
meshMessageDeliveries: 2.0,
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
# Peer 0: deficit = 4 - 2 = 2, penalty = 2^2 * -1 = -4
|
||||
round(peers[0].score, 1) == -4.0
|
||||
# Peer 1: above threshold, no penalty
|
||||
round(peers[1].score, 1) == 0.0
|
||||
# Peer 2: not active yet, no penalty
|
||||
round(peers[2].score, 1) == 0.0
|
||||
|
||||
asyncTest "Mesh failure penalty scoring (P3b)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(2, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0, meshFailurePenaltyWeight: -2.0, meshFailurePenaltyDecay: 0.5
|
||||
)
|
||||
|
||||
# Set mesh failure penalty
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(meshFailurePenalty: 2.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(meshFailurePenalty: 0.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty application
|
||||
check:
|
||||
round(peers[0].score, 1) == -4.0 # 2.0 * -2.0
|
||||
round(peers[1].score, 1) == 0.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].meshFailurePenalty, 1) == 1.0 # 2.0 * 0.5
|
||||
|
||||
asyncTest "Invalid message deliveries scoring (P4)":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(2, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
invalidMessageDeliveriesWeight: -4.0,
|
||||
invalidMessageDeliveriesDecay: 0.5,
|
||||
)
|
||||
|
||||
# Set invalid message deliveries
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(invalidMessageDeliveries: 2.0)
|
||||
|
||||
gossipSub.withPeerStats(peers[1].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(invalidMessageDeliveries: 0.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty: 2^2 * -4 = -16
|
||||
check:
|
||||
round(peers[0].score, 1) == -16.0
|
||||
round(peers[1].score, 1) == 0.0
|
||||
|
||||
# Check decay was applied
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
check:
|
||||
round(stats[].topicInfos[topic].invalidMessageDeliveries, 1) == 1.0 # 2.0 * 0.5
|
||||
|
||||
asyncTest "App-specific scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.appSpecificWeight = 0.5
|
||||
|
||||
# Set different app scores
|
||||
peers[0].appScore = 8.0
|
||||
peers[1].appScore = -6.0
|
||||
peers[2].appScore = 0.0
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
round(peers[0].score, 1) == 4.0 # 8.0 * 0.5
|
||||
round(peers[1].score, 1) == -3.0 # -6.0 * 0.5
|
||||
round(peers[2].score, 1) == 0.0 # 0.0 * 0.5
|
||||
|
||||
asyncTest "Behaviour penalty scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(3, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.behaviourPenaltyWeight = -0.25
|
||||
gossipSub.parameters.behaviourPenaltyDecay = 0.5
|
||||
|
||||
# Set different behaviour penalties
|
||||
peers[0].behaviourPenalty = 4.0
|
||||
peers[1].behaviourPenalty = 2.0
|
||||
peers[2].behaviourPenalty = 0.0
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Check penalty: penalty^2 * weight
|
||||
check:
|
||||
round(peers[0].score, 1) == -4.0 # 4^2 * -0.25 = -4.0
|
||||
round(peers[1].score, 1) == -1.0 # 2^2 * -0.25 = -1.0
|
||||
round(peers[2].score, 1) == 0.0 # 0^2 * -0.25 = 0.0
|
||||
|
||||
# Check decay was applied
|
||||
check:
|
||||
round(peers[0].behaviourPenalty, 1) == 2.0 # 4.0 * 0.5
|
||||
round(peers[1].behaviourPenalty, 1) == 1.0 # 2.0 * 0.5
|
||||
round(peers[2].behaviourPenalty, 1) == 0.0
|
||||
|
||||
asyncTest "Colocation factor scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(5, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.ipColocationFactorWeight = -1.0
|
||||
gossipSub.parameters.ipColocationFactorThreshold = 2.0
|
||||
|
||||
# Simulate peers from same IP
|
||||
let sharedAddress = MultiAddress.init("/ip4/192.168.1.1/tcp/4001").tryGet()
|
||||
peers[0].address = some(sharedAddress)
|
||||
peers[1].address = some(sharedAddress)
|
||||
peers[2].address = some(sharedAddress)
|
||||
|
||||
# Add to peersInIP to simulate colocation detection
|
||||
gossipSub.peersInIP[sharedAddress] =
|
||||
toHashSet([peers[0].peerId, peers[1].peerId, peers[2].peerId])
|
||||
|
||||
# Different IP for other peers
|
||||
peers[3].address = some(MultiAddress.init("/ip4/192.168.1.2/tcp/4001").tryGet())
|
||||
peers[4].address = some(MultiAddress.init("/ip4/192.168.1.3/tcp/4001").tryGet())
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
check:
|
||||
# First 3 peers should have colocation penalty
|
||||
# over = 3 - 2 = 1, penalty = 1^2 * -1.0 = -1.0
|
||||
round(peers[0].score, 1) == -1.0
|
||||
round(peers[1].score, 1) == -1.0
|
||||
round(peers[2].score, 1) == -1.0
|
||||
# Other peers should have no penalty
|
||||
round(peers[3].score, 1) == 0.0
|
||||
round(peers[4].score, 1) == 0.0
|
||||
|
||||
asyncTest "Score decay to zero":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.decayToZero = 0.01
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 1.0,
|
||||
firstMessageDeliveriesDecay: 0.1,
|
||||
meshMessageDeliveriesDecay: 0.1,
|
||||
meshFailurePenaltyDecay: 0.1,
|
||||
invalidMessageDeliveriesDecay: 0.1,
|
||||
)
|
||||
|
||||
# Set small values that should decay to zero
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
firstMessageDeliveries: 0.02,
|
||||
meshMessageDeliveries: 0.04,
|
||||
meshFailurePenalty: 0.06,
|
||||
invalidMessageDeliveries: 0.08,
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# All values should be decayed to zero
|
||||
gossipSub.peerStats.withValue(peers[0].peerId, stats):
|
||||
let info = stats[].topicInfos[topic]
|
||||
check:
|
||||
round(info.firstMessageDeliveries, 1) == 0.0
|
||||
round(info.meshMessageDeliveries, 1) == 0.0
|
||||
round(info.meshFailurePenalty, 1) == 0.0
|
||||
round(info.invalidMessageDeliveries, 1) == 0.0
|
||||
|
||||
asyncTest "Peer stats expiration and eviction":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let now = Moment.now()
|
||||
|
||||
# Create expired peer stats for disconnected peer
|
||||
let expiredPeerId = randomPeerId()
|
||||
gossipSub.peerStats[expiredPeerId] = PeerStats(
|
||||
expire: now - 1.seconds, # Already expired
|
||||
score: -5.0,
|
||||
)
|
||||
|
||||
# Create non-expired stats for connected peer
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.expire = now + 10.seconds
|
||||
stats.score = 2.0
|
||||
|
||||
check:
|
||||
gossipSub.peerStats.len == 2 # Before cleanup: expired + connected peer
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Expired peer should be evicted, connected peer should remain
|
||||
check:
|
||||
gossipSub.peerStats.len == 1
|
||||
expiredPeerId notin gossipSub.peerStats
|
||||
peers[0].peerId in gossipSub.peerStats
|
||||
|
||||
asyncTest "Combined scoring":
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Set up all topic parameters
|
||||
let now = Moment.now()
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 2.0,
|
||||
timeInMeshWeight: 0.25, # P1
|
||||
timeInMeshQuantum: 1.seconds,
|
||||
timeInMeshCap: 10.0,
|
||||
firstMessageDeliveriesWeight: 1.0, # P2
|
||||
meshMessageDeliveriesWeight: -1.0, # P3
|
||||
meshMessageDeliveriesThreshold: 4.0,
|
||||
meshMessageDeliveriesActivation: 1.seconds,
|
||||
meshFailurePenaltyWeight: -2.0, # P3b
|
||||
invalidMessageDeliveriesWeight: -1.0, # P4
|
||||
)
|
||||
|
||||
gossipSub.parameters.appSpecificWeight = 0.5
|
||||
gossipSub.parameters.behaviourPenaltyWeight = -0.25
|
||||
|
||||
# Set up peer state
|
||||
let peer = peers[0]
|
||||
peer.appScore = 6.0
|
||||
peer.behaviourPenalty = 2.0
|
||||
|
||||
gossipSub.withPeerStats(peer.peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(
|
||||
inMesh: true,
|
||||
graftTime: now - 4.seconds, # seconds in mesh
|
||||
meshMessageDeliveriesActive: true,
|
||||
firstMessageDeliveries: 3.0, # P2 component
|
||||
meshMessageDeliveries: 2.0, # P3 component (below threshold)
|
||||
meshFailurePenalty: 1.0, # P3b component
|
||||
invalidMessageDeliveries: 2.0, # P4 component
|
||||
)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Calculate expected score step by step:
|
||||
#
|
||||
# P1 (time in mesh): meshTime / timeInMeshQuantum * timeInMeshWeight
|
||||
# = 4.0s / 1s * 0.25 = 1.0
|
||||
#
|
||||
# P2 (first message deliveries): firstMessageDeliveries * firstMessageDeliveriesWeight
|
||||
# = 3.0 * 1.0 = 3.0
|
||||
#
|
||||
# P3 (mesh message deliveries): deficit = max(0, threshold - deliveries)
|
||||
# deficit = max(0, 4.0 - 2.0) = 2.0
|
||||
# penalty = deficit^2 * weight = 2.0^2 * -1.0 = -4.0
|
||||
#
|
||||
# P3b (mesh failure penalty): meshFailurePenalty * meshFailurePenaltyWeight
|
||||
# = 1.0 * -2.0 = -2.0
|
||||
#
|
||||
# P4 (invalid message deliveries): invalidMessageDeliveries^2 * invalidMessageDeliveriesWeight
|
||||
# = 2.0^2 * -1.0 = -4.0
|
||||
#
|
||||
# Topic score = (P1 + P2 + P3 + P3b + P4) * topicWeight
|
||||
# = (1.0 + 3.0 + (-4.0) + (-2.0) + (-4.0)) * 2.0
|
||||
# = (1.0 + 3.0 - 4.0 - 2.0 - 4.0) * 2.0
|
||||
# = -6.0 * 2.0 = -12.0
|
||||
#
|
||||
# App score = appScore * appSpecificWeight = 6.0 * 0.5 = 3.0
|
||||
#
|
||||
# Behaviour penalty = behaviourPenalty^2 * behaviourPenaltyWeight
|
||||
# = 2.0^2 * -0.25 = 4.0 * -0.25 = -1.0
|
||||
#
|
||||
# Final score = topicScore + appScore + behaviourPenalty
|
||||
# = -12.0 + 3.0 + (-1.0) = -10.0
|
||||
|
||||
check:
|
||||
round(peer.score, 1) == -10.0
|
||||
|
||||
asyncTest "Zero topic weight skips scoring":
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Set topic weight to zero
|
||||
gossipSub.topicParams[topic] = TopicParams(
|
||||
topicWeight: 0.0,
|
||||
firstMessageDeliveriesWeight: 100.0, # High weight but should be ignored
|
||||
)
|
||||
|
||||
gossipSub.withPeerStats(peers[0].peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos[topic] = TopicInfo(firstMessageDeliveries: 10.0)
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
# Score should be zero since topic weight is zero
|
||||
check:
|
||||
round(peers[0].score, 1) == 0.0
|
||||
@@ -96,6 +96,7 @@ proc setupGossipSubWithPeers*(
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
for topic in topics:
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
@@ -541,10 +542,10 @@ proc baseTestProcedure*(
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
proc currentRateLimitHits*(): float64 =
|
||||
proc currentRateLimitHits*(label: string = "nim-libp2p"): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @[label]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
@@ -11,13 +11,20 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, json
|
||||
import sequtils, json, uri
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../libp2p/[stream/connection, upgrademngrs/upgrade, autotls/acme/mockapi, wire]
|
||||
import
|
||||
../libp2p/[
|
||||
stream/connection,
|
||||
upgrademngrs/upgrade,
|
||||
autotls/acme/mockapi,
|
||||
autotls/acme/client,
|
||||
wire,
|
||||
]
|
||||
|
||||
import ./helpers
|
||||
|
||||
suite "AutoTLS ACME Client":
|
||||
suite "AutoTLS ACME API":
|
||||
var api {.threadvar.}: MockACMEApi
|
||||
var key {.threadvar.}: KeyPair
|
||||
|
||||
@@ -27,54 +34,71 @@ suite "AutoTLS ACME Client":
|
||||
|
||||
asyncSetup:
|
||||
api = await MockACMEApi.new()
|
||||
api.mockedHeaders = HttpTable.init()
|
||||
key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
|
||||
asyncTest "register to acme server":
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("location", "some-expected-kid")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "valid"},
|
||||
headers: HttpTable.init(@[("location", "some-expected-kid")]),
|
||||
)
|
||||
)
|
||||
|
||||
let registerResponse = await api.requestRegister(key)
|
||||
check registerResponse.kid == "some-expected-kid"
|
||||
|
||||
asyncTest "request challenge for a domain":
|
||||
api.mockedBody =
|
||||
%*{
|
||||
"status": "pending",
|
||||
"authorizations": ["expected-authorizations-url"],
|
||||
"finalize": "expected-finalize-url",
|
||||
}
|
||||
api.mockedHeaders.set("location", "expected-order-url")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body:
|
||||
%*{
|
||||
"status": "pending",
|
||||
"authorizations": ["http://example.com/expected-authorizations-url"],
|
||||
"finalize": "http://example.com/expected-finalize-url",
|
||||
},
|
||||
headers:
|
||||
HttpTable.init(@[("location", "http://example.com/expected-order-url")]),
|
||||
)
|
||||
)
|
||||
|
||||
let challengeResponse =
|
||||
await api.requestNewOrder(@["some.dummy.domain.com"], key, "kid")
|
||||
check challengeResponse.status == ACMEChallengeStatus.pending
|
||||
check challengeResponse.authorizations == ["expected-authorizations-url"]
|
||||
check challengeResponse.finalize == "expected-finalize-url"
|
||||
check challengeResponse.orderURL == "expected-order-url"
|
||||
check challengeResponse.status == ACMEOrderStatus.PENDING
|
||||
check challengeResponse.authorizations ==
|
||||
["http://example.com/expected-authorizations-url"]
|
||||
check challengeResponse.finalize == "http://example.com/expected-finalize-url"
|
||||
check challengeResponse.order == "http://example.com/expected-order-url"
|
||||
|
||||
# reset mocked obj for second request
|
||||
api.mockedBody =
|
||||
%*{
|
||||
"challenges": [
|
||||
{
|
||||
"url": "expected-dns01-url",
|
||||
"type": "dns-01",
|
||||
"status": "pending",
|
||||
"token": "expected-dns01-token",
|
||||
}
|
||||
]
|
||||
}
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body:
|
||||
%*{
|
||||
"challenges": [
|
||||
{
|
||||
"url": "http://example.com/expected-dns01-url",
|
||||
"type": "dns-01",
|
||||
"status": "pending",
|
||||
"token": "expected-dns01-token",
|
||||
}
|
||||
]
|
||||
},
|
||||
headers:
|
||||
HttpTable.init(@[("location", "http://example.com/expected-order-url")]),
|
||||
)
|
||||
)
|
||||
|
||||
let authorizationsResponse =
|
||||
await api.requestAuthorizations(challengeResponse.authorizations, key, "kid")
|
||||
check authorizationsResponse.challenges.len > 0
|
||||
|
||||
let dns01 = authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0]
|
||||
check dns01.url == "expected-dns01-url"
|
||||
check dns01.`type` == "dns-01"
|
||||
check dns01.token == "expected-dns01-token"
|
||||
check dns01.status == ACMEChallengeStatus.pending
|
||||
let dns01 = authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0]
|
||||
check dns01.url == "http://example.com/expected-dns01-url"
|
||||
check dns01.`type` == ACMEChallengeType.DNS01
|
||||
check dns01.token == ACMEChallengeToken("expected-dns01-token")
|
||||
check dns01.status == ACMEChallengeStatus.PENDING
|
||||
|
||||
asyncTest "register with unsupported keys":
|
||||
let unsupportedSchemes = [PKScheme.Ed25519, PKScheme.Secp256k1, PKScheme.ECDSA]
|
||||
@@ -83,64 +107,144 @@ suite "AutoTLS ACME Client":
|
||||
expect(ACMEError):
|
||||
discard await api.requestRegister(unsupportedKey)
|
||||
|
||||
asyncTest "request challenge with invalid kid":
|
||||
expect(ACMEError):
|
||||
discard await api.requestChallenge(@["domain.com"], key, "invalid_kid_here")
|
||||
|
||||
asyncTest "challenge completed successful":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let completed = await api.checkChallengeCompleted("some-chal-url", key, "kid")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "valid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
let completed = await api.checkChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
check completed == true
|
||||
|
||||
asyncTest "challenge completed max retries reached":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
|
||||
api.mockedBody = %*{"status": "pending"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let completed =
|
||||
await api.checkChallengeCompleted("some-chal-url", key, "kid", retries = 1)
|
||||
# add this mocked response a few times since checkChallengeCompleted might get more than once
|
||||
for _ in 0 .. 5:
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "pending"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
let completed = await api.checkChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid", retries = 1
|
||||
)
|
||||
check completed == false
|
||||
|
||||
asyncTest "challenge completed invalid":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
discard await api.sendChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
|
||||
# add this mocked response a few times since checkChallengeCompleted might get more than once
|
||||
for _ in 0 .. 5:
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "invalid"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
|
||||
api.mockedBody = %*{"status": "invalid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
expect(ACMEError):
|
||||
discard await api.checkChallengeCompleted("some-chal-url", key, "kid")
|
||||
discard await api.checkChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
|
||||
asyncTest "finalize certificate successful":
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
# first status is processing, then valid
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "processing"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "valid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
let finalized = await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
|
||||
"some-domain",
|
||||
parseUri("http://example.com/some-finalize-url"),
|
||||
parseUri("http://example.com/some-order-url"),
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
check finalized == true
|
||||
|
||||
asyncTest "finalize certificate max retries reached":
|
||||
api.mockedBody = %*{"status": "processing"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
# add this mocked response a few times since checkCertFinalized might get more than once
|
||||
for _ in 0 .. 5:
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "processing"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
let finalized = await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid", retries = 1
|
||||
"some-domain",
|
||||
parseUri("http://example.com/some-finalize-url"),
|
||||
parseUri("http://example.com/some-order-url"),
|
||||
key,
|
||||
"kid",
|
||||
retries = 1,
|
||||
)
|
||||
check finalized == false
|
||||
|
||||
asyncTest "finalize certificate invalid":
|
||||
api.mockedBody = %*{"status": "invalid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
# first request is processing, then invalid
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "processing"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "invalid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
expect(ACMEError):
|
||||
discard await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
|
||||
"some-domain",
|
||||
parseUri("http://example.com/some-finalize-url"),
|
||||
parseUri("http://example.com/some-order-url"),
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
|
||||
asyncTest "expect error on invalid JSON response":
|
||||
api.mockedBody = %*{"inexistent field": "invalid value"}
|
||||
# add a couple invalid responses as they get popped by every get or post call
|
||||
for _ in 0 .. 20:
|
||||
api.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"inexistent field": "invalid value"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
# avoid calling overloaded mock method requestNonce here since we want to test the actual thing
|
||||
@@ -160,19 +264,94 @@ suite "AutoTLS ACME Client":
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCheck(
|
||||
"some-check-url", ACMECheckKind.ACMEOrderCheck, key, "kid"
|
||||
parseUri("http://example.com/some-check-url"),
|
||||
ACMECheckKind.ACMEOrderCheck,
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCheck(
|
||||
"some-check-url", ACMECheckKind.ACMEChallengeCheck, key, "kid"
|
||||
parseUri("http://example.com/some-check-url"),
|
||||
ACMECheckKind.ACMEChallengeCheck,
|
||||
key,
|
||||
"kid",
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
discard await api.sendChallengeCompleted(
|
||||
parseUri("http://example.com/some-chal-url"), key, "kid"
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestFinalize("some-domain", "some-finalize-url", key, "kid")
|
||||
discard await api.requestFinalize(
|
||||
"some-domain", parseUri("http://example.com/some-finalize-url"), key, "kid"
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestGetOrder("some-order-url")
|
||||
discard await api.requestGetOrder(parseUri("http://example.com/some-order-url"))
|
||||
|
||||
suite "AutoTLS ACME Client":
|
||||
var acmeApi {.threadvar.}: MockACMEApi
|
||||
var acme {.threadvar.}: ACMEClient
|
||||
|
||||
asyncSetup:
|
||||
acmeApi = await MockACMEApi.new()
|
||||
|
||||
asyncTeardown:
|
||||
await acme.close()
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "client registers new account when instantiated":
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "valid"},
|
||||
headers: HttpTable.init(@[("location", "some-expected-kid")]),
|
||||
)
|
||||
)
|
||||
|
||||
acme = await ACMEClient.new(api = Opt.some(ACMEApi(acmeApi)))
|
||||
check acme.kid == "some-expected-kid"
|
||||
|
||||
asyncTest "getCertificate succeeds on sendChallengeCompleted but fails on requestFinalize":
|
||||
# register successful
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "valid"},
|
||||
headers: HttpTable.init(@[("location", "some-expected-kid")]),
|
||||
)
|
||||
)
|
||||
# request completed successful
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"url": "http://example.com/some-check-url"}, headers: HttpTable.init()
|
||||
)
|
||||
)
|
||||
# finalize is invalid
|
||||
# first request is processing, then invalid
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "processing"},
|
||||
headers: HttpTable.init(@[("Retry-After", "0")]),
|
||||
)
|
||||
)
|
||||
acmeApi.mockedResponses.add(
|
||||
HTTPResponse(
|
||||
body: %*{"status": "invalid"}, headers: HttpTable.init(@[("Retry-After", "0")])
|
||||
)
|
||||
)
|
||||
acme = await ACMEClient.new(api = Opt.some(ACMEApi(acmeApi)))
|
||||
check acme.kid == "some-expected-kid"
|
||||
|
||||
let challenge = ACMEChallengeResponseWrapper(
|
||||
finalize: "https://finalize.com",
|
||||
order: "https://order.com",
|
||||
dns01: ACMEChallenge(
|
||||
url: "https://some.domain",
|
||||
`type`: ACMEChallengeType.DNS01,
|
||||
status: ACMEChallengeStatus.VALID,
|
||||
token: ACMEChallengeToken("some-token"),
|
||||
),
|
||||
)
|
||||
expect(ACMEError):
|
||||
discard await acme.getCertificate(api.Domain("some.domain"), challenge)
|
||||
|
||||
@@ -9,9 +9,23 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos
|
||||
import chronos/apps/http/httpclient
|
||||
import ../libp2p/[stream/connection, upgrademngrs/upgrade, autotls/acme/api, wire]
|
||||
import
|
||||
../libp2p/[
|
||||
stream/connection,
|
||||
upgrademngrs/upgrade,
|
||||
autotls/acme/api,
|
||||
autotls/acme/client,
|
||||
autotls/manager,
|
||||
autotls/utils,
|
||||
multiaddress,
|
||||
switch,
|
||||
builders,
|
||||
nameresolving/dnsresolver,
|
||||
wire,
|
||||
]
|
||||
|
||||
import ./helpers
|
||||
|
||||
@@ -19,31 +33,104 @@ when defined(linux) and defined(amd64):
|
||||
{.used.}
|
||||
|
||||
suite "AutoTLS Integration":
|
||||
var api {.threadvar.}: ACMEApi
|
||||
var key {.threadvar.}: KeyPair
|
||||
|
||||
asyncTeardown:
|
||||
await api.close()
|
||||
checkTrackers()
|
||||
|
||||
asyncSetup:
|
||||
api = await ACMEApi.new(acmeServerURL = LetsEncryptURLStaging)
|
||||
key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
|
||||
asyncTest "test request challenge":
|
||||
let registerResponse = await api.requestRegister(key)
|
||||
asyncTest "request challenge without ACMEClient (ACMEApi only)":
|
||||
let key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
let acmeApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
defer:
|
||||
await acmeApi.close()
|
||||
let registerResponse = await acmeApi.requestRegister(key)
|
||||
# account was registered (kid set)
|
||||
check registerResponse.kid != ""
|
||||
if registerResponse.kid == "":
|
||||
raiseAssert "unable to register acme account"
|
||||
|
||||
# challenge requested
|
||||
let challenge =
|
||||
await api.requestChallenge(@["some.dummy.domain.com"], key, registerResponse.kid)
|
||||
check challenge.finalizeURL.len() > 0
|
||||
check challenge.orderURL.len() > 0
|
||||
let challenge = await acmeApi.requestChallenge(
|
||||
@["some.dummy.domain.com"], key, registerResponse.kid
|
||||
)
|
||||
check challenge.finalize.len > 0
|
||||
check challenge.order.len > 0
|
||||
|
||||
check challenge.dns01.url.len() > 0
|
||||
check challenge.dns01.`type`.len() > 0
|
||||
check challenge.dns01.status == ACMEChallengeStatus.pending
|
||||
check challenge.dns01.token.len() > 0
|
||||
check challenge.dns01.url.len > 0
|
||||
check challenge.dns01.`type` == ACMEChallengeType.DNS01
|
||||
check challenge.dns01.status == ACMEChallengeStatus.PENDING
|
||||
check challenge.dns01.token.len > 0
|
||||
|
||||
asyncTest "request challenge with ACMEClient":
|
||||
let acme = await ACMEClient.new(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
defer:
|
||||
await acme.close()
|
||||
|
||||
let challenge = await acme.getChallenge(@["some.dummy.domain.com"])
|
||||
|
||||
check challenge.finalize.len > 0
|
||||
check challenge.order.len > 0
|
||||
check challenge.dns01.url.len > 0
|
||||
check challenge.dns01.`type` == ACMEChallengeType.DNS01
|
||||
check challenge.dns01.status == ACMEChallengeStatus.PENDING
|
||||
check challenge.dns01.token.len > 0
|
||||
|
||||
asyncTest "AutoTLSManager correctly downloads challenges":
|
||||
let ip = checkedGetPrimaryIPAddr()
|
||||
if not ip.isIPv4() or not ip.isPublic():
|
||||
skip() # host doesn't have public IPv4 address
|
||||
return
|
||||
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddress(MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet())
|
||||
.withTcpTransport()
|
||||
.withAutotls(acmeServerURL = parseUri(LetsEncryptURLStaging))
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
# this is to quickly renew cert for testing
|
||||
switch.autotls.renewCheckTime = 1.seconds
|
||||
|
||||
await switch.start()
|
||||
defer:
|
||||
await switch.stop()
|
||||
|
||||
# wait for cert to be ready
|
||||
await switch.autotls.certReady.wait()
|
||||
# clear since we'll use it again for renewal
|
||||
switch.autotls.certReady.clear()
|
||||
|
||||
let dnsResolver = DnsResolver.new(DefaultDnsServers)
|
||||
let base36PeerId = encodePeerId(switch.peerInfo.peerId)
|
||||
let dnsTXTRecord = (
|
||||
await dnsResolver.resolveTxt(
|
||||
"_acme-challenge." & base36PeerId & "." & AutoTLSDNSServer
|
||||
)
|
||||
)[0]
|
||||
|
||||
# check if DNS TXT record is set
|
||||
check dnsTXTRecord.len > 0
|
||||
|
||||
# certificate was downloaded and parsed
|
||||
let cert = switch.autotls.cert.valueOr:
|
||||
raiseAssert "certificate not found"
|
||||
let certBefore = cert
|
||||
|
||||
# invalidate certificate
|
||||
switch.autotls.certExpiry = Opt.some(Moment.now - 2.hours)
|
||||
|
||||
# wait for cert to be renewed
|
||||
await switch.autotls.certReady.wait()
|
||||
|
||||
# certificate was indeed renewed
|
||||
let certAfter = switch.autotls.cert.valueOr:
|
||||
raiseAssert "certificate not found"
|
||||
|
||||
check certBefore != certAfter
|
||||
|
||||
let certExpiry = switch.autotls.certExpiry.valueOr:
|
||||
raiseAssert "certificate expiry not found"
|
||||
|
||||
# cert is valid
|
||||
check certExpiry > Moment.now
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import
|
||||
testvarint, testconnection, testbridgestream, testminprotobuf, teststreamseq,
|
||||
testsemaphore, testheartbeat, testfuture
|
||||
testsemaphore, testheartbeat, testfuture, testzeroqueue
|
||||
|
||||
import testminasn1, testrsa, testecnist, tested25519, testsecp256k1, testcrypto
|
||||
|
||||
@@ -34,4 +34,4 @@ import
|
||||
testdiscovery, testyamux, testautonat, testautonatservice, testautorelay, testdcutr,
|
||||
testhpservice, testutility, testhelpers, testwildcardresolverservice, testperf
|
||||
|
||||
import kademlia/testencoding
|
||||
import kademlia/[testencoding, testroutingtable]
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
import json, uri
|
||||
import chronos
|
||||
import chronos/apps/http/httpclient
|
||||
import ../libp2p/[stream/connection, upgrademngrs/upgrade, peeridauth, wire]
|
||||
import ../libp2p/[stream/connection, upgrademngrs/upgrade, peeridauth/client, wire]
|
||||
|
||||
import ./helpers
|
||||
|
||||
@@ -54,5 +54,5 @@ suite "PeerID Auth":
|
||||
doAssert bearer.token.len > 0
|
||||
|
||||
let (_, responseWithBearer) =
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload, bearer)
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload, Opt.some(bearer))
|
||||
check responseWithBearer.status != HttpPeerAuthFailed
|
||||
|
||||
@@ -16,18 +16,29 @@ import
|
||||
import ./helpers
|
||||
|
||||
proc createSwitch(
|
||||
isServer: bool = false, useMplex: bool = false, useYamux: bool = false
|
||||
isServer: bool = false,
|
||||
useQuic: bool = false,
|
||||
useMplex: bool = false,
|
||||
useYamux: bool = false,
|
||||
): Switch =
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withNoise()
|
||||
if useMplex:
|
||||
builder = builder.withMplex()
|
||||
if useYamux:
|
||||
builder = builder.withYamux()
|
||||
var builder = SwitchBuilder.new()
|
||||
builder = builder.withRng(newRng()).withNoise()
|
||||
|
||||
if useQuic:
|
||||
builder = builder.withQuicTransport().withAddresses(
|
||||
@[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
|
||||
)
|
||||
else:
|
||||
builder = builder.withTcpTransport().withAddresses(
|
||||
@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||
)
|
||||
|
||||
if useMplex:
|
||||
builder = builder.withMplex()
|
||||
elif useYamux:
|
||||
builder = builder.withYamux()
|
||||
else:
|
||||
raiseAssert "must use mplex or yamux"
|
||||
|
||||
var switch = builder.build()
|
||||
|
||||
@@ -43,13 +54,12 @@ proc runTest(server: Switch, client: Switch) {.async.} =
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn = await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
let perfClient = PerfClient.new()
|
||||
discard await perfClient.perf(conn, bytesToUpload, bytesToDownload)
|
||||
|
||||
let stats = perfClient.currentStats()
|
||||
@@ -58,12 +68,53 @@ proc runTest(server: Switch, client: Switch) {.async.} =
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes == bytesToDownload
|
||||
|
||||
proc runTestWithException(server: Switch, client: Switch) {.async.} =
|
||||
const
|
||||
bytesToUpload = 1.uint64
|
||||
bytesToDownload = 10000000000.uint64
|
||||
# use large downlaod request which will make perf to execute for longer
|
||||
# giving us change to stop it
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn = await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
let perfClient = PerfClient.new()
|
||||
let perfFut = perfClient.perf(conn, bytesToUpload, bytesToDownload)
|
||||
|
||||
# after some time upload should be finished and download should be ongoing
|
||||
await sleepAsync(200.milliseconds)
|
||||
var stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == false
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes > 0
|
||||
|
||||
perfFut.cancel() # cancelling future will raise exception in perfClient
|
||||
await sleepAsync(10.milliseconds)
|
||||
|
||||
# after cancelling perf, stats must indicate that it is final one
|
||||
stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes > 0
|
||||
stats.downloadBytes < bytesToDownload # download must not be completed
|
||||
|
||||
suite "Perf protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "quic":
|
||||
return # nim-libp2p#1482: currently it does not work with quic
|
||||
let server = createSwitch(isServer = true, useQuic = true)
|
||||
let client = createSwitch(useQuic = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "tcp::yamux":
|
||||
return # nim-libp2p#1462 test fails with stream closed error
|
||||
let server = createSwitch(isServer = true, useYamux = true)
|
||||
let client = createSwitch(useYamux = true)
|
||||
await runTest(server, client)
|
||||
@@ -73,40 +124,12 @@ suite "Perf protocol":
|
||||
let client = createSwitch(useMplex = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "perf with exception":
|
||||
asyncTest "perf with exception::yamux":
|
||||
let server = createSwitch(isServer = true, useYamux = true)
|
||||
let client = createSwitch(useYamux = true)
|
||||
await runTestWithException(server, client)
|
||||
|
||||
asyncTest "perf with exception::mplex":
|
||||
let server = createSwitch(isServer = true, useMplex = true)
|
||||
let client = createSwitch(useMplex = true)
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn =
|
||||
await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
var perfFut: Future[Duration]
|
||||
try:
|
||||
# start perf future with large download request
|
||||
# this will make perf execute for longer so we can cancel it
|
||||
perfFut = perfClient.perf(conn, 1.uint64, 1000000000000.uint64)
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
# after some time upload should be finished
|
||||
await sleepAsync(50.milliseconds)
|
||||
var stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == false
|
||||
stats.uploadBytes == 1
|
||||
|
||||
perfFut.cancel() # cancelling future will raise exception
|
||||
await sleepAsync(50.milliseconds)
|
||||
|
||||
# after cancelling perf, stats must indicate that it is final one
|
||||
stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == 1
|
||||
await runTestWithException(server, client)
|
||||
|
||||
@@ -1037,35 +1037,36 @@ suite "Switch":
|
||||
await srcWsSwitch.stop()
|
||||
await srcTcpSwitch.stop()
|
||||
|
||||
asyncTest "e2e quic transport":
|
||||
let
|
||||
quicAddress1 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
quicAddress2 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
when defined(libp2p_quic_support):
|
||||
asyncTest "e2e quic transport":
|
||||
let
|
||||
quicAddress1 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
quicAddress2 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
|
||||
srcSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress1)
|
||||
.withRng(crypto.newRng())
|
||||
.withQuicTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
srcSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress1)
|
||||
.withRng(crypto.newRng())
|
||||
.withQuicTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
destSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress2)
|
||||
.withRng(crypto.newRng())
|
||||
.withQuicTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
destSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress2)
|
||||
.withRng(crypto.newRng())
|
||||
.withQuicTransport()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
await destSwitch.start()
|
||||
await srcSwitch.start()
|
||||
await destSwitch.start()
|
||||
await srcSwitch.start()
|
||||
|
||||
await srcSwitch.connect(destSwitch.peerInfo.peerId, destSwitch.peerInfo.addrs)
|
||||
check srcSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
await srcSwitch.connect(destSwitch.peerInfo.peerId, destSwitch.peerInfo.addrs)
|
||||
check srcSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
|
||||
await destSwitch.stop()
|
||||
await srcSwitch.stop()
|
||||
await destSwitch.stop()
|
||||
await srcSwitch.stop()
|
||||
|
||||
asyncTest "mount unstarted protocol":
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
|
||||
115
tests/testzeroqueue.nim
Normal file
115
tests/testzeroqueue.nim
Normal file
@@ -0,0 +1,115 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import unittest2
|
||||
import ../libp2p/utils/zeroqueue
|
||||
|
||||
proc toSeq(p: pointer, length: int): seq[byte] =
|
||||
let b = cast[ptr UncheckedArray[byte]](p)
|
||||
var res = newSeq[byte](length)
|
||||
copyMem(res[0].addr, p, length)
|
||||
return res
|
||||
|
||||
suite "ZeroQueue":
|
||||
test "push-pop":
|
||||
var q: ZeroQueue
|
||||
check q.len() == 0
|
||||
check q.isEmpty()
|
||||
check q.popChunkSeq(1).len == 0 # pop empty seq when queue is empty
|
||||
|
||||
q.push(@[1'u8, 2, 3])
|
||||
q.push(@[4'u8, 5])
|
||||
check q.len() == 5
|
||||
check not q.isEmpty()
|
||||
|
||||
check q.popChunkSeq(3) == @[1'u8, 2, 3] # pop eactly the size of the chunk
|
||||
check q.popChunkSeq(1) == @[4'u8] # pop less then size of the chunk
|
||||
check q.popChunkSeq(5) == @[5'u8] # pop more then size of the chunk
|
||||
check q.isEmpty()
|
||||
|
||||
# should not push empty seq
|
||||
q.push(@[])
|
||||
q.push(@[])
|
||||
check q.isEmpty()
|
||||
|
||||
test "clear":
|
||||
var q: ZeroQueue
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check not q.isEmpty()
|
||||
q.clear()
|
||||
check q.isEmpty()
|
||||
check q.len() == 0
|
||||
|
||||
test "consumeTo":
|
||||
var q: ZeroQueue
|
||||
let nbytes = 20
|
||||
var pbytes = alloc(nbytes)
|
||||
defer:
|
||||
dealloc(pbytes)
|
||||
|
||||
# consumeTo: on empty queue
|
||||
check q.consumeTo(pbytes, nbytes) == 0
|
||||
|
||||
# consumeTo: emptying whole queue (multiple pushes)
|
||||
q.push(@[1'u8, 2, 3])
|
||||
q.push(@[4'u8, 5])
|
||||
q.push(@[6'u8, 7])
|
||||
check q.consumeTo(pbytes, nbytes) == 7
|
||||
check toSeq(pbytes, 7) == @[1'u8, 2, 3, 4, 5, 6, 7]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: consuming one chunk of data in two steps
|
||||
q.push(@[1'u8, 2, 3])
|
||||
# first consume
|
||||
check q.consumeTo(pbytes, 1) == 1
|
||||
check toSeq(pbytes, 1) == @[1'u8]
|
||||
check q.len() == 2
|
||||
# second consime
|
||||
check q.consumeTo(pbytes, nbytes) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: consuming multiple chunks of data in two steps
|
||||
q.clear()
|
||||
q.push(@[4'u8, 5])
|
||||
q.push(@[1'u8, 2, 3])
|
||||
# first consume
|
||||
check q.consumeTo(pbytes, 3) == 3
|
||||
check toSeq(pbytes, 3) == @[4'u8, 5, 1]
|
||||
check q.len() == 2
|
||||
# second consume
|
||||
check q.consumeTo(pbytes, nbytes) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.isEmpty()
|
||||
|
||||
# consumeTo: parially consume big push multiple times
|
||||
q.clear()
|
||||
q.push(newSeq[byte](20))
|
||||
for i in 1 .. 10:
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check q.isEmpty()
|
||||
check q.consumeTo(pbytes, 2) == 0
|
||||
|
||||
# consumeTo: parially consuming while pushing
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[1'u8, 2]
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[3'u8, 1]
|
||||
q.push(@[1'u8, 2, 3])
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[2'u8, 3]
|
||||
check q.consumeTo(pbytes, 2) == 2
|
||||
check toSeq(pbytes, 2) == @[1'u8, 2]
|
||||
check q.consumeTo(pbytes, 2) == 1
|
||||
check toSeq(pbytes, 1) == @[3'u8]
|
||||
check q.isEmpty()
|
||||
Reference in New Issue
Block a user