mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 12:58:05 -05:00
Compare commits
9 Commits
fix-daily-
...
v1110
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c0d4b873e | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 | ||
|
|
848fdde0a8 | ||
|
|
31e7dc68e2 | ||
|
|
08299a2059 | ||
|
|
2f3156eafb | ||
|
|
72e85101b0 | ||
|
|
d205260a3e |
2
.github/workflows/daily_common.yml
vendored
2
.github/workflows/daily_common.yml
vendored
@@ -96,4 +96,4 @@ jobs:
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
nimble testintegraion
|
||||
nimble testintegration
|
||||
|
||||
@@ -84,8 +84,8 @@ proc main() {.async.} =
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError:
|
||||
raise newException(CatchableError, "Connection to relay timed out")
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
@@ -103,7 +103,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
@@ -130,8 +130,8 @@ try:
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError:
|
||||
error "Program execution timed out."
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
@@ -80,7 +80,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
|
||||
let
|
||||
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
|
||||
dialingStart = Moment.now()
|
||||
@@ -105,8 +105,8 @@ try:
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(testTimeout))
|
||||
except AsyncTimeoutError:
|
||||
error "Program execution timed out."
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.10.1"
|
||||
version = "1.11.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
|
||||
@@ -19,18 +19,22 @@ const
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Nonce = string
|
||||
type Kid = string
|
||||
type Nonce* = string
|
||||
type Kid* = string
|
||||
|
||||
type ACMEDirectory = object
|
||||
newNonce: string
|
||||
newOrder: string
|
||||
newAccount: string
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
newOrder*: string
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = object
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: ACMEDirectory
|
||||
session: HttpSessionRef
|
||||
acmeServerURL: string
|
||||
acmeServerURL*: string
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
headers*: HttpTable
|
||||
|
||||
type JWK = object
|
||||
kty: string
|
||||
@@ -94,10 +98,10 @@ type ACMEChallengeResponseBody = object
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status: ACMEChallengeStatus
|
||||
authorizations: seq[string]
|
||||
finalize: string
|
||||
orderURL: string
|
||||
status*: ACMEChallengeStatus
|
||||
authorizations*: seq[string]
|
||||
finalize*: string
|
||||
orderURL*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalizeURL*: string
|
||||
@@ -105,7 +109,7 @@ type ACMEChallengeResponseWrapper* = object
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges: seq[ACMEChallenge]
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
checkURL: string
|
||||
@@ -117,7 +121,7 @@ type ACMEOrderStatus* {.pure.} = enum
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
|
||||
type ACMECheckKind = enum
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
ACMEChallengeCheck
|
||||
|
||||
@@ -129,7 +133,8 @@ type ACMECheckResponse* = object
|
||||
chalStatus: ACMEChallengeStatus
|
||||
retryAfter: Duration
|
||||
|
||||
type ACMEFinalizedResponse* = object
|
||||
type ACMEFinalizeResponse* = object
|
||||
status: ACMEOrderStatus
|
||||
|
||||
type ACMEOrderResponse* = object
|
||||
certificate: string
|
||||
@@ -139,7 +144,7 @@ type ACMECertificateResponse* = object
|
||||
rawCertificate: string
|
||||
certificateExpiry: DateTime
|
||||
|
||||
template handleError(msg: string, body: untyped): untyped =
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except ACMEError as exc:
|
||||
@@ -155,6 +160,18 @@ template handleError(msg: string, body: untyped): untyped =
|
||||
except CatchableError as exc:
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
|
||||
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
@@ -167,15 +184,12 @@ proc new*(
|
||||
|
||||
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
|
||||
|
||||
proc requestNonce(
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let resp =
|
||||
await HttpClientRequestRef.get(self.session, self.directory.newNonce).get().send()
|
||||
return resp.headers.getString("Replay-Nonce")
|
||||
except HttpError as exc:
|
||||
raise newException(ACMEError, "Failed to request new nonce from ACME server", exc)
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(self.directory.newNonce)
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
@@ -210,15 +224,26 @@ proc acmeHeader(
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
proc post(
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError])
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
await HttpClientRequestRef
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, url, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, url).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
@@ -247,16 +272,14 @@ proc requestRegister*(
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newAccount, registerRequest, key, needsJwk = true
|
||||
)
|
||||
let rawResponse = await self.post(self.directory.newAccount, payload)
|
||||
let body = await rawResponse.getResponseBody()
|
||||
let headers = rawResponse.headers
|
||||
let acmeResponseBody = body.to(ACMERegisterResponseBody)
|
||||
let acmeResponse = await self.post(self.directory.newAccount, payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
status: acmeResponseBody.status, kid: headers.getString("location")
|
||||
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
|
||||
)
|
||||
|
||||
proc requestNewOrder(
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
@@ -267,29 +290,25 @@ proc requestNewOrder(
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
|
||||
)
|
||||
let rawResponse = await self.post(self.directory.newOrder, payload)
|
||||
let body = await rawResponse.getResponseBody()
|
||||
let headers = rawResponse.headers
|
||||
let acmeResponse = await self.post(self.directory.newOrder, payload)
|
||||
|
||||
let challengeResponseBody = body.to(ACMEChallengeResponseBody)
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len() == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
orderURL: headers.getString("location"),
|
||||
orderURL: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations(
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(self.session, authorizations[0]).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEAuthorizationsResponse)
|
||||
let acmeResponse = await self.get(authorizations[0])
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
@@ -305,17 +324,14 @@ proc requestChallenge*(
|
||||
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
|
||||
)
|
||||
|
||||
proc requestCheck(
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: string, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(self.session, checkURL).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
let headers = rawResponse.headers
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
let retryAfter =
|
||||
try:
|
||||
parseInt(rawResponse.headers.getString("Retry-After")).seconds
|
||||
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
|
||||
except ValueError:
|
||||
DefaultChalCompletedRetryTime
|
||||
|
||||
@@ -324,58 +340,70 @@ proc requestCheck(
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
orderStatus: parseEnum[ACMEOrderStatus](body["status"].getStr),
|
||||
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(ACMEError, "Invalid order status: " & body["status"].getStr)
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
of ACMEChallengeCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
chalStatus: parseEnum[ACMEChallengeStatus](body["status"].getStr),
|
||||
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(ACMEError, "Invalid order status: " & body["status"].getStr)
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc requestCompleted(
|
||||
proc requestCompleted*(
|
||||
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("challengeCompleted (send notify)"):
|
||||
handleError("requestCompleted (send notify)"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let rawResponse = await self.post(chalURL, payload)
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMECompletedResponse)
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
acmeResponse.body.to(ACMECompletedResponse)
|
||||
|
||||
proc challengeCompleted*(
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
chalURL: string,
|
||||
checkURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[void] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.requestCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse =
|
||||
await self.requestCheck(completedResponse.checkURL, ACMEChallengeCheck, key, kid)
|
||||
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.pending:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.valid:
|
||||
return
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed challenge completion: expected 'valid', got '" &
|
||||
$checkResponse.chalStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.requestCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: string, finalizeURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let derCSR = createCSR(domain)
|
||||
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
@@ -383,11 +411,35 @@ proc requestFinalize*(
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let rawResponse = await self.post(finalizeURL, payload)
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEFinalizedResponse)
|
||||
let acmeResponse = await self.post(finalizeURL, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc finalizeCertificate*(
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
orderURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed certificate finalization: expected 'valid', got '" &
|
||||
$checkResponse.orderStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
return false
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: string,
|
||||
finalizeURL: string,
|
||||
@@ -396,31 +448,16 @@ proc finalizeCertificate*(
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# call finalize and keep checking order until cert is valid (done)
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
|
||||
|
||||
handleError("finalizeCertificate (check finalized)"):
|
||||
var checkResponse: ACMECheckResponse
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
return false
|
||||
|
||||
return false
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(orderURL, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, orderURL: string
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(self.session, orderURL).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEOrderResponse)
|
||||
let acmeResponse = await self.get(orderURL)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, orderURL: string
|
||||
|
||||
37
libp2p/autotls/acme/mockapi.nim
Normal file
37
libp2p/autotls/acme/mockapi.nim
Normal file
@@ -0,0 +1,37 @@
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
parent*: ACMEApi
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: JsonNode
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[MockACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(), directory: directory, acmeServerURL: LetsEncryptURL
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, url: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
@@ -4,6 +4,11 @@ import ../../transports/tls/certificate_ffi
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
table.getString(key)
|
||||
|
||||
proc base64UrlEncode*(data: seq[byte]): string =
|
||||
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
|
||||
var encoded = base64.encode(data, safe = true)
|
||||
|
||||
@@ -140,7 +140,7 @@ proc triggerConnEvent*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
warn "Exception in triggerConnEvent",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(
|
||||
@@ -186,7 +186,7 @@ proc expectConnection*(
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(
|
||||
AlreadyExpectingConnectionError,
|
||||
"Already expecting an incoming connection from that peer",
|
||||
"Already expecting an incoming connection from that peer: " & shortLog(p),
|
||||
)
|
||||
|
||||
let future = Future[Muxer].Raising([CancelledError]).init()
|
||||
|
||||
@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = hexToSeqByte(data)
|
||||
except ValueError:
|
||||
return err("secp: Hex to bytes failed")
|
||||
except ValueError as e:
|
||||
let errMsg = "secp: Hex to bytes failed: " & e.msg
|
||||
return err(errMsg.cstring)
|
||||
init(sig, buffer)
|
||||
|
||||
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||
|
||||
@@ -595,13 +595,13 @@ template exceptionToAssert(body: untyped): untyped =
|
||||
try:
|
||||
res = body
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Defect as exc:
|
||||
raise exc
|
||||
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
res
|
||||
@@ -967,9 +967,9 @@ proc openStream*(
|
||||
stream.flags.incl(Outbound)
|
||||
stream.transp = transp
|
||||
result = stream
|
||||
except ResultError[ProtoError]:
|
||||
except ResultError[ProtoError] as e:
|
||||
await api.closeConnection(transp)
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
|
||||
|
||||
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
# must not specify raised exceptions as this is StreamCallback from chronos
|
||||
@@ -1023,10 +1023,10 @@ proc addHandler*(
|
||||
api.servers.add(P2PServer(server: server, address: maddress))
|
||||
except DaemonLocalError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
|
||||
except TransportError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
|
||||
except CancelledError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
@@ -1503,10 +1503,14 @@ proc pubsubSubscribe*(
|
||||
result = ticket
|
||||
except DaemonLocalError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except TransportError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(
|
||||
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
|
||||
@@ -127,8 +127,8 @@ proc expandDnsAddr(
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
except ResultError[string]:
|
||||
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
|
||||
except ResultError[string] as e:
|
||||
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
@@ -178,7 +178,7 @@ proc internalConnect(
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(DialFailedError, "can't dial self!")
|
||||
raise newException(DialFailedError, "internalConnect can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||
@@ -186,8 +186,8 @@ proc internalConnect(
|
||||
defer:
|
||||
try:
|
||||
lock.release()
|
||||
except AsyncLockError:
|
||||
raiseAssert "lock must have been acquired in line above"
|
||||
except AsyncLockError as e:
|
||||
raiseAssert "lock must have been acquired in line above: " & e.msg
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
@@ -198,7 +198,9 @@ proc internalConnect(
|
||||
try:
|
||||
self.connManager.getOutgoingSlot(forceDial)
|
||||
except TooManyConnectionsError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
let muxed =
|
||||
try:
|
||||
@@ -208,11 +210,15 @@ proc internalConnect(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(
|
||||
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
raise newException(
|
||||
DialFailedError, "Unable to establish outgoing link in internalConnect"
|
||||
)
|
||||
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
@@ -228,7 +234,11 @@ proc internalConnect(
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoing upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -260,7 +270,7 @@ method connect*(
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled!"
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
|
||||
)
|
||||
|
||||
return
|
||||
@@ -273,7 +283,7 @@ proc negotiateStream(
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
@@ -289,13 +299,13 @@ method tryDial*(
|
||||
try:
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
raise newException(DialFailedError, "No valid multiaddress in tryDial")
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
|
||||
|
||||
method dial*(
|
||||
self: Dialer, peerId: PeerId, protos: seq[string]
|
||||
@@ -309,14 +319,17 @@ method dial*(
|
||||
try:
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
|
||||
)
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled"
|
||||
trace "Dial canceled", description = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error dialing", description = exc.msg
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
@@ -347,17 +360,20 @@ method dial*(
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
|
||||
)
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
trace "Dial canceled", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
|
||||
|
||||
method addTransport*(self: Dialer, t: Transport) =
|
||||
self.transports &= t
|
||||
|
||||
@@ -113,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||
try:
|
||||
query.peers.putNoWait(pa)
|
||||
except AsyncQueueFullError as exc:
|
||||
debug "Cannot push discovered peer to queue"
|
||||
debug "Cannot push discovered peer to queue", description = exc.msg
|
||||
|
||||
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||
|
||||
@@ -567,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
|
||||
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
|
||||
## Create MultiHash from BASE58 encoded string representation ``data``.
|
||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
|
||||
|
||||
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
if len(a) != len(b):
|
||||
|
||||
@@ -87,7 +87,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
@@ -587,10 +587,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
except KeyError as e:
|
||||
raise newException(
|
||||
YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
|
||||
e.msg,
|
||||
e,
|
||||
)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
|
||||
@@ -78,23 +78,23 @@ proc getDnsResponse(
|
||||
|
||||
try:
|
||||
await receivedDataFuture.wait(5.seconds) #unix default
|
||||
except AsyncTimeoutError:
|
||||
raise newException(IOError, "DNS server timeout")
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(IOError, "DNS server timeout: " & e.msg, e)
|
||||
|
||||
let rawResponse = sock.getMessage()
|
||||
try:
|
||||
parseResponse(string.fromBytes(rawResponse))
|
||||
except IOError as exc:
|
||||
raise exc
|
||||
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except OSError as exc:
|
||||
raise exc
|
||||
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except ValueError as exc:
|
||||
raise exc
|
||||
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert exc.msg
|
||||
raiseAssert "Exception parsing DN response: " & exc.msg
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
|
||||
@@ -10,48 +10,53 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import base64, json, strutils, uri, times
|
||||
import chronos/apps/http/httpclient, results, chronicles, bio
|
||||
import ./peerinfo, ./crypto/crypto, ./varint.nim
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
|
||||
import ../peerinfo, ../crypto/crypto, ../varint.nim
|
||||
|
||||
logScope:
|
||||
topics = "libp2p peeridauth"
|
||||
|
||||
const
|
||||
NimLibp2pUserAgent = "nim-libp2p"
|
||||
PeerIDAuthPrefix = "libp2p-PeerID"
|
||||
PeerIDAuthPrefix* = "libp2p-PeerID"
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
type PeerIDAuthError* = object of LPError
|
||||
|
||||
type PeerIDAuthResponse* = object
|
||||
status*: int
|
||||
headers*: HttpTable
|
||||
body*: seq[byte]
|
||||
|
||||
type BearerToken* = object
|
||||
token*: string
|
||||
expires*: Opt[DateTime]
|
||||
|
||||
type PeerIDAuthOpaque = string
|
||||
type PeerIDAuthSignature = string
|
||||
type PeerIDAuthChallenge = string
|
||||
|
||||
type PeerIDAuthApi* = object
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
type PeerIDAuthOpaque* = string
|
||||
type PeerIDAuthSignature* = string
|
||||
type PeerIDAuthChallenge* = string
|
||||
|
||||
type PeerIDAuthAuthenticationResponse* = object
|
||||
challengeClient: PeerIDAuthChallenge
|
||||
opaque: PeerIDAuthOpaque
|
||||
serverPubkey: PublicKey
|
||||
challengeClient*: PeerIDAuthChallenge
|
||||
opaque*: PeerIDAuthOpaque
|
||||
serverPubkey*: PublicKey
|
||||
|
||||
type PeerIDAuthAuthorizationResponse* = object
|
||||
sig: PeerIDAuthSignature
|
||||
bearer: BearerToken
|
||||
rawResponse: HttpClientResponseRef
|
||||
sig*: PeerIDAuthSignature
|
||||
bearer*: BearerToken
|
||||
response*: PeerIDAuthResponse
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(T: typedesc[PeerIDAuthApi], rng: ref HmacDrbgContext): PeerIDAuthApi =
|
||||
PeerIDAuthApi(session: HttpSessionRef.new(), rng: rng)
|
||||
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
ctx: var HmacDrbgContext, choices: string
|
||||
@@ -123,7 +128,7 @@ proc sign(
|
||||
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
|
||||
)
|
||||
|
||||
proc checkSignature(
|
||||
proc checkSignature*(
|
||||
serverSig: PeerIDAuthSignature,
|
||||
serverPublicKey: PublicKey,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
@@ -145,10 +150,10 @@ proc checkSignature(
|
||||
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
|
||||
)
|
||||
|
||||
proc post(
|
||||
self: PeerIDAuthApi, uri: string, payload: string, authHeader: string
|
||||
): Future[HttpClientResponseRef] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
await HttpClientRequestRef
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
uri,
|
||||
@@ -162,18 +167,34 @@ proc post(
|
||||
.get()
|
||||
.send()
|
||||
|
||||
proc requestAuthentication(
|
||||
self: PeerIDAuthApi, uri: Uri
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
proc requestAuthentication*(
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthAuthenticationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let rawResponse =
|
||||
let response =
|
||||
try:
|
||||
await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
await self.get($uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
let wwwAuthenticate = rawResponse.headers.getString("WWW-Authenticate")
|
||||
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
|
||||
if wwwAuthenticate == "":
|
||||
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
|
||||
|
||||
@@ -190,7 +211,7 @@ proc requestAuthentication(
|
||||
serverPubkey: serverPubkey,
|
||||
)
|
||||
|
||||
proc pubkeyBytes(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
try:
|
||||
pubkey.getBytes().valueOr:
|
||||
raise
|
||||
@@ -208,8 +229,8 @@ proc parse3339DateTime(
|
||||
let millis = parseInt(parts[1].strip(chars = {'Z'}))
|
||||
result = base + initDuration(milliseconds = millis)
|
||||
|
||||
proc requestAuthorization(
|
||||
self: PeerIDAuthApi,
|
||||
proc requestAuthorization*(
|
||||
self: PeerIDAuthClient,
|
||||
peerInfo: PeerInfo,
|
||||
uri: Uri,
|
||||
challengeClient: PeerIDAuthChallenge,
|
||||
@@ -225,7 +246,7 @@ proc requestAuthorization(
|
||||
let authHeader =
|
||||
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let rawResponse =
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
@@ -233,7 +254,7 @@ proc requestAuthorization(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
)
|
||||
|
||||
let authenticationInfo = rawResponse.headers.getString("authentication-info")
|
||||
let authenticationInfo = response.headers.getString("authentication-info")
|
||||
|
||||
let bearerExpires =
|
||||
try:
|
||||
@@ -246,12 +267,12 @@ proc requestAuthorization(
|
||||
bearer: BearerToken(
|
||||
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
|
||||
),
|
||||
rawResponse: rawResponse,
|
||||
response: response,
|
||||
)
|
||||
|
||||
proc sendWithoutBearer(
|
||||
self: PeerIDAuthApi, uri: Uri, peerInfo: PeerInfo, payload: auto
|
||||
): Future[(BearerToken, HttpClientResponseRef)] {.
|
||||
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
# Authenticate in three ways as per the PeerID Auth spec
|
||||
@@ -271,36 +292,36 @@ proc sendWithoutBearer(
|
||||
):
|
||||
raise newException(PeerIDAuthError, "Failed to validate server's signature")
|
||||
|
||||
return (authorizationResponse.bearer, authorizationResponse.rawResponse)
|
||||
return (authorizationResponse.bearer, authorizationResponse.response)
|
||||
|
||||
proc sendWithBearer(
|
||||
self: PeerIDAuthApi,
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken,
|
||||
): Future[(BearerToken, HttpClientResponseRef)] {.
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let rawResponse =
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
)
|
||||
return (bearer, rawResponse)
|
||||
return (bearer, response)
|
||||
|
||||
proc send*(
|
||||
self: PeerIDAuthApi,
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken = BearerToken(),
|
||||
): Future[(BearerToken, HttpClientResponseRef)] {.
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.token == "":
|
||||
@@ -308,5 +329,7 @@ proc send*(
|
||||
else:
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer)
|
||||
|
||||
proc close*(self: PeerIDAuthApi): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
41
libp2p/peeridauth/mockclient.nim
Normal file
41
libp2p/peeridauth/mockclient.nim
Normal file
@@ -0,0 +1,41 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto
|
||||
|
||||
import ./client
|
||||
|
||||
export client
|
||||
|
||||
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
|
||||
mockedStatus*: int
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
|
||||
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
@@ -101,8 +101,10 @@ proc new*(
|
||||
let pubkey =
|
||||
try:
|
||||
key.getPublicKey().tryGet()
|
||||
except CatchableError:
|
||||
raise newException(PeerInfoError, "invalid private key")
|
||||
except CatchableError as e:
|
||||
raise newException(
|
||||
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
|
||||
)
|
||||
|
||||
let peerId = PeerId.init(key).tryGet()
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ method dialMe*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(AutonatError, "read Dial response failed", e)
|
||||
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
|
||||
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
|
||||
|
||||
|
||||
@@ -107,7 +107,9 @@ proc startSync*(
|
||||
description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
|
||||
err.msg,
|
||||
err,
|
||||
)
|
||||
finally:
|
||||
if stream != nil:
|
||||
|
||||
@@ -148,7 +148,7 @@ proc dialPeerV1*(
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
trace "error writing hop request", description = exc.msg
|
||||
raise newException(RelayV1DialError, "error writing hop request", exc)
|
||||
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
|
||||
|
||||
let msgRcvFromRelayOpt =
|
||||
try:
|
||||
@@ -158,7 +158,8 @@ proc dialPeerV1*(
|
||||
except LPStreamError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, "error reading stop response", exc)
|
||||
raise
|
||||
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
|
||||
|
||||
try:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
@@ -173,10 +174,16 @@ proc dialPeerV1*(
|
||||
)
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
raise newException(
|
||||
RelayV1DialError,
|
||||
"Hop can't open destination stream after sendStatus: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
raise newException(
|
||||
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
|
||||
)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -199,7 +206,8 @@ proc dialPeerV2*(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
raise newException(RelayV2DialError, exc.msg)
|
||||
raise
|
||||
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
|
||||
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
|
||||
@@ -76,7 +76,7 @@ proc dial*(
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayDialError, "Destination doesn't exist")
|
||||
except RelayDialError as e:
|
||||
raise e
|
||||
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
|
||||
except CatchableError:
|
||||
raise newException(RelayDialError, "dial address not valid")
|
||||
|
||||
@@ -100,13 +100,13 @@ proc dial*(
|
||||
raise e
|
||||
except DialFailedError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayDialError, "dial relay peer failed", e)
|
||||
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
|
||||
except RelayV1DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
|
||||
except RelayV2DialError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
|
||||
|
||||
method dial*(
|
||||
self: RelayTransport,
|
||||
@@ -121,7 +121,8 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise
|
||||
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
|
||||
@@ -69,8 +69,8 @@ proc bridge*(
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
|
||||
@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
type Stats* = object
|
||||
isFinal*: bool
|
||||
uploadBytes*: uint
|
||||
downloadBytes*: uint
|
||||
duration*: Duration
|
||||
|
||||
type PerfClient* = ref object
|
||||
stats: Stats
|
||||
|
||||
proc new*(T: typedesc[PerfClient]): T =
|
||||
return T()
|
||||
|
||||
proc currentStats*(p: PerfClient): Stats =
|
||||
return p.stats
|
||||
|
||||
proc perf*(
|
||||
_: typedesc[PerfClient],
|
||||
conn: Connection,
|
||||
sizeToWrite: uint64 = 0,
|
||||
sizeToRead: uint64 = 0,
|
||||
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
|
||||
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite
|
||||
p.stats = Stats()
|
||||
|
||||
await conn.close()
|
||||
try:
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
|
||||
size = sizeToRead
|
||||
let start = Moment.now()
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite.uint
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.uploadBytes += toWrite.uint
|
||||
p.stats = statsCopy
|
||||
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead.uint
|
||||
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.downloadBytes += toRead.uint
|
||||
p.stats = statsCopy
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
raise e
|
||||
finally:
|
||||
p.stats.isFinal = true
|
||||
|
||||
trace "finishing performance benchmark", duration = p.stats.duration
|
||||
|
||||
return p.stats.duration
|
||||
|
||||
@@ -185,7 +185,7 @@ method init*(f: FloodSub) =
|
||||
try:
|
||||
await f.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in floodsub handler", conn
|
||||
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
f.handler = handler
|
||||
|
||||
@@ -218,7 +218,7 @@ method init*(g: GossipSub) =
|
||||
try:
|
||||
await g.handleConn(conn, proto)
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in gossipsub handler", conn
|
||||
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
g.handler = handler
|
||||
|
||||
@@ -457,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
prunes = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# avoid pruning peers we are currently grafting in this heartbeat
|
||||
prunes.keepIf do(x: PubSubPeer) -> bool:
|
||||
@@ -513,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
var peers = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# grafting so high score has priority
|
||||
peers.sort(byScore, SortOrder.Descending)
|
||||
|
||||
@@ -230,10 +230,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
|
||||
conn, peer = p, closed = conn.closed, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
except CancelledError as e:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
trace "Unexpected cancellation in PubSubPeer.handle"
|
||||
trace "Unexpected cancellation in PubSubPeer.handle", description = e.msg
|
||||
finally:
|
||||
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
|
||||
|
||||
@@ -266,7 +266,7 @@ proc connectOnce(
|
||||
await p.getConn().wait(5.seconds)
|
||||
except AsyncTimeoutError as error:
|
||||
trace "getConn timed out", description = error.msg
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
|
||||
|
||||
# When the send channel goes up, subscriptions need to be sent to the
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
|
||||
@@ -419,8 +419,8 @@ proc save(
|
||||
)
|
||||
rdv.namespaces[nsSalted].add(rdv.registered.high)
|
||||
# rdv.registerEvent.fire()
|
||||
except KeyError:
|
||||
doAssert false, "Should have key"
|
||||
except KeyError as e:
|
||||
doAssert false, "Should have key: " & e.msg
|
||||
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
|
||||
@@ -110,8 +110,8 @@ proc handleConn(
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished:
|
||||
await fut1.cancelAndWait()
|
||||
@@ -182,14 +182,14 @@ method readOnce*(
|
||||
except LPStreamEOFError as err:
|
||||
s.isEof = true
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamEOFError, "Secure connection EOF: " & err.msg, err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name, message = err.msg, connection = s
|
||||
await s.close()
|
||||
raise err
|
||||
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
||||
|
||||
@@ -55,7 +55,7 @@ proc tryStartingDirectConn(
|
||||
if not isRelayed.get(false) and address.isPublicMA():
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
debug "Failed to create direct connection.", description = err.msg
|
||||
continue
|
||||
return false
|
||||
|
||||
@@ -91,7 +91,7 @@ proc newConnectedPeerHandler(
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
debug "Hole punching failed during dcutr", err = err.msg
|
||||
debug "Hole punching failed during dcutr", description = err.msg
|
||||
|
||||
method setup*(
|
||||
self: HPService, switch: Switch
|
||||
@@ -104,7 +104,7 @@ method setup*(
|
||||
let dcutrProto = Dcutr.new(switch)
|
||||
switch.mount(dcutrProto)
|
||||
except LPError as err:
|
||||
error "Failed to mount Dcutr", err = err.msg
|
||||
error "Failed to mount Dcutr", description = err.msg
|
||||
|
||||
self.newConnectedPeerHandler = proc(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
|
||||
@@ -199,8 +199,10 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
|
||||
elif s.pushing:
|
||||
if not s.readQueue.empty():
|
||||
discard s.readQueue.popFirstNoWait()
|
||||
except AsyncQueueFullError, AsyncQueueEmptyError:
|
||||
raiseAssert(getCurrentExceptionMsg())
|
||||
except AsyncQueueFullError as e:
|
||||
raiseAssert("closeImpl failed queue full: " & e.msg)
|
||||
except AsyncQueueEmptyError as e:
|
||||
raiseAssert("closeImpl failed queue empty: " & e.msg)
|
||||
|
||||
trace "Closed BufferStream", s
|
||||
|
||||
|
||||
@@ -328,7 +328,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
|
||||
debug "Unexpected bytes while waiting for EOF", s
|
||||
except CancelledError:
|
||||
discard
|
||||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except LPStreamEOFError as e:
|
||||
trace "Expected EOF came", s, description = e.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, description = exc.msg
|
||||
|
||||
@@ -233,7 +233,7 @@ proc upgrader(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(UpgradeError, e.msg, e)
|
||||
raise newException(UpgradeError, "catchable error upgrader: " & e.msg, e)
|
||||
|
||||
proc upgradeMonitor(
|
||||
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
|
||||
@@ -275,7 +275,8 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
|
||||
await transport.accept()
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
raise
|
||||
newException(CatchableError, "failed to accept connection: " & exc.msg, exc)
|
||||
slot.trackConnection(conn)
|
||||
if isNil(conn):
|
||||
# A nil connection means that we might have hit a
|
||||
|
||||
@@ -101,7 +101,7 @@ proc getStream*(
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
except CatchableError as exc:
|
||||
# TODO: incomingStream is using {.async.} with no raises
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
|
||||
|
||||
method getWrapped*(self: QuicSession): P2PConnection =
|
||||
nil
|
||||
@@ -119,7 +119,7 @@ method newStream*(
|
||||
try:
|
||||
return await m.quicSession.getStream(Direction.Out)
|
||||
except CatchableError as exc:
|
||||
raise newException(MuxerError, exc.msg, exc)
|
||||
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
|
||||
|
||||
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
@@ -236,11 +236,16 @@ method start*(
|
||||
except QuicConfigError as exc:
|
||||
doAssert false, "invalid quic setup: " & $exc.msg
|
||||
except TLSCertificateError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "tlscert error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
except QuicError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref QuicTransportError)(msg: "quicerror in quic start: " & exc.msg, parent: exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref QuicTransportError)(
|
||||
msg: "transport error in quic start: " & exc.msg, parent: exc
|
||||
)
|
||||
self.running = true
|
||||
|
||||
method stop*(transport: QuicTransport) {.async: (raises: []).} =
|
||||
@@ -318,7 +323,7 @@ method dial*(
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(QuicTransportDialError, e.msg, e)
|
||||
raise newException(QuicTransportDialError, "error in quic dial:" & e.msg, e)
|
||||
|
||||
method upgrade*(
|
||||
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
|
||||
|
||||
@@ -133,7 +133,9 @@ method start*(
|
||||
try:
|
||||
createStreamServer(ta, flags = self.flags)
|
||||
except common.TransportError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "transport error in TcpTransport start:" & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.servers &= server
|
||||
|
||||
@@ -250,9 +252,13 @@ method accept*(
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportOs error in accept:" & exc.msg, parent: exc
|
||||
)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref TcpTransportError)(
|
||||
msg: "TransportError in accept: " & exc.msg, parent: exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
cancelAcceptFuts()
|
||||
raise exc
|
||||
@@ -302,7 +308,8 @@ method dial*(
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
raise
|
||||
(ref TcpTransportError)(msg: "TcpTransport dial error: " & exc.msg, parent: exc)
|
||||
|
||||
# If `stop` is called after `connect` but before `await` returns, we might
|
||||
# end up with a race condition where `stop` returns but not all connections
|
||||
@@ -318,7 +325,7 @@ method dial*(
|
||||
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
|
||||
except TransportOsError as exc:
|
||||
safeCloseWait(transp)
|
||||
raise (ref TcpTransportError)(msg: exc.msg)
|
||||
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
|
||||
|
||||
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||
|
||||
|
||||
@@ -118,8 +118,8 @@ proc makeASN1Time(time: Time): string {.inline.} =
|
||||
try:
|
||||
let f = initTimeFormat("yyyyMMddhhmmss")
|
||||
format(time.utc(), f)
|
||||
except TimeFormatParseError:
|
||||
raiseAssert "time format is const and checked with test"
|
||||
except TimeFormatParseError as e:
|
||||
raiseAssert "time format is const and checked with test: " & e.msg
|
||||
|
||||
return str & "Z"
|
||||
|
||||
@@ -278,7 +278,7 @@ proc parse*(
|
||||
validTo = parseCertTime($certParsed.valid_to)
|
||||
except TimeParseError as e:
|
||||
raise newException(
|
||||
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
|
||||
CertificateParsingError, "Failed to parse certificate validity time: " & $e.msg, e
|
||||
)
|
||||
|
||||
P2pCertificate(
|
||||
|
||||
@@ -243,7 +243,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeCloseWait(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in dial TorTransport: " & e.msg, e
|
||||
)
|
||||
|
||||
method start*(
|
||||
self: TorTransport, addrs: seq[MultiAddress]
|
||||
|
||||
@@ -160,7 +160,9 @@ method start*(
|
||||
else:
|
||||
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
|
||||
except CatchableError as exc:
|
||||
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
|
||||
raise (ref WsTransportError)(
|
||||
msg: "error in WsTransport start: " & exc.msg, parent: exc
|
||||
)
|
||||
|
||||
self.httpservers &= httpserver
|
||||
|
||||
@@ -309,7 +311,9 @@ method accept*(
|
||||
debug "OS Error", description = exc.msg
|
||||
except CatchableError as exc:
|
||||
info "Unexpected error accepting connection", description = exc.msg
|
||||
raise newException(transport.TransportError, exc.msg, exc)
|
||||
raise newException(
|
||||
transport.TransportError, "Error in WsTransport accept: " & exc.msg, exc
|
||||
)
|
||||
|
||||
method dial*(
|
||||
self: WsTransport,
|
||||
@@ -338,7 +342,9 @@ method dial*(
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
safeClose(transp)
|
||||
raise newException(transport.TransportDialError, e.msg, e)
|
||||
raise newException(
|
||||
transport.TransportDialError, "error in WsTransport dial: " & e.msg, e
|
||||
)
|
||||
|
||||
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
|
||||
if procCall Transport(t).handles(address):
|
||||
|
||||
@@ -54,8 +54,9 @@ when defined(libp2p_agents_metrics):
|
||||
proc safeToLowerAscii*(s: string): Result[string, cstring] =
|
||||
try:
|
||||
ok(s.toLowerAscii())
|
||||
except CatchableError:
|
||||
err("toLowerAscii failed")
|
||||
except CatchableError as e:
|
||||
let errMsg = "toLowerAscii failed: " & e.msg
|
||||
err(errMsg.cstring)
|
||||
|
||||
const
|
||||
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"
|
||||
|
||||
@@ -27,9 +27,9 @@ proc anyCompleted*[T](
|
||||
if raceFut.completed:
|
||||
return raceFut
|
||||
requests.del(requests.find(raceFut))
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
raise newException(
|
||||
AllFuturesFailedError, "None of the futures completed successfully"
|
||||
AllFuturesFailedError, "None of the futures completed successfully: " & e.msg, e
|
||||
)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
@@ -108,7 +108,9 @@ proc createStreamServer*[T](
|
||||
): StreamServer {.raises: [LPError, MaInvalidAddress].} =
|
||||
## Create new TCP stream server which bounds to ``ma`` address.
|
||||
if not (RTRANSPMA.match(ma)):
|
||||
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
|
||||
raise newException(
|
||||
MaInvalidAddress, "Incorrect or unsupported address in createStreamServer"
|
||||
)
|
||||
|
||||
try:
|
||||
return createStreamServer(
|
||||
@@ -123,7 +125,7 @@ proc createStreamServer*[T](
|
||||
init,
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createStreamServer*[T](
|
||||
ma: MultiAddress,
|
||||
@@ -146,7 +148,7 @@ proc createStreamServer*[T](
|
||||
initTAddress(ma).tryGet(), flags, udata, sock, backlog, bufferSize, child, init
|
||||
)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
|
||||
|
||||
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
|
||||
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
|
||||
@@ -178,7 +180,9 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPErro
|
||||
try:
|
||||
createAsyncSocket(address.getDomain(), socktype, protocol)
|
||||
except CatchableError as exc:
|
||||
raise newException(LPError, exc.msg)
|
||||
raise newException(
|
||||
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
|
||||
)
|
||||
|
||||
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
|
||||
## Bind socket ``sock`` to MultiAddress ``ma``.
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
import sequtils, tables, sets
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
../utils,
|
||||
../../../libp2p/[
|
||||
switch,
|
||||
stream/connection,
|
||||
crypto/crypto,
|
||||
@@ -23,9 +23,9 @@ import
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
]
|
||||
import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
import ../../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
import ../../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
@@ -38,7 +38,7 @@ proc waitSub(sender, receiver: auto, key: string) {.async.} =
|
||||
dec ceil
|
||||
doAssert(ceil > 0, "waitSub timeout!")
|
||||
|
||||
suite "FloodSub":
|
||||
suite "FloodSub Integration":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -1,106 +1,15 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Control Messages":
|
||||
suite "GossipSub Integration - Control Messages":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
@@ -512,32 +421,3 @@ suite "GossipSub Control Messages":
|
||||
check:
|
||||
toSeq(nodeC.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
toSeq(nodeA.mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
|
||||
|
||||
asyncTest "Max IDONTWANT messages per heartbeat per peer":
|
||||
# Given GossipSub node with 1 peer
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 1
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let peer = peers[0]
|
||||
|
||||
# And sequence of iDontWants with more messages than max number (1200)
|
||||
proc generateMessageIds(count: int): seq[MessageId] =
|
||||
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
|
||||
|
||||
let iDontWants =
|
||||
@[
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
]
|
||||
|
||||
# When node handles iDontWants
|
||||
gossipSub.handleIDontWant(peer, iDontWants)
|
||||
|
||||
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
|
||||
check:
|
||||
peer.iDontWants[0].len == IDontWantMaxCount
|
||||
@@ -11,11 +11,11 @@
|
||||
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/stream/connection
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../../libp2p/stream/connection
|
||||
import ../../helpers
|
||||
|
||||
type DummyConnection* = ref object of Connection
|
||||
|
||||
@@ -30,7 +30,7 @@ proc new*(T: typedesc[DummyConnection]): DummyConnection =
|
||||
let instance = T()
|
||||
instance
|
||||
|
||||
suite "GossipSub Custom Connection Support":
|
||||
suite "GossipSub Integration - Custom Connection Support":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -9,66 +9,18 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
suite "GossipSub Integration - Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
asyncTest "GossipSub send over fanout A -> B":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
@@ -107,7 +59,7 @@ suite "GossipSub Fanout Management":
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
asyncTest "GossipSub send over fanout A -> B for subscribed topic":
|
||||
let (passed, handler) = createCompleteHandler()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
@@ -12,129 +12,15 @@
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
suite "GossipSub Integration - Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
@@ -314,7 +200,7 @@ suite "GossipSub Gossip Protocol":
|
||||
messages[].mapIt(it[].len)[1] == 0
|
||||
messages[].mapIt(it[].len)[0] == 0
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
asyncTest "GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
@@ -1,11 +1,11 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Heartbeat":
|
||||
suite "GossipSub Integration - Heartbeat":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -11,194 +11,14 @@
|
||||
|
||||
import chronicles
|
||||
import std/[sequtils]
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Mesh Management":
|
||||
suite "GossipSub Integration - Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let
|
||||
topic = "foobar"
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
|
||||
# Given GossipSub node starting with 13 peers in mesh
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 13
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And mesh is larger than dHigh
|
||||
gossipSub.parameters.dLow = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dHigh = 8
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dScore = 13
|
||||
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
# When mesh is rebalanced
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
|
||||
# Then prunning is not triggered when mesh is not larger than dScore
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
@@ -242,7 +62,7 @@ suite "GossipSub Mesh Management":
|
||||
node.mesh.getOrDefault(topic).len <= dHigh
|
||||
node.fanout.len == 0
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
@@ -261,7 +81,7 @@ suite "GossipSub Mesh Management":
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
asyncTest "GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../helpers
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages, message]
|
||||
import ../../helpers
|
||||
|
||||
suite "GossipSub Message Cache":
|
||||
suite "GossipSub Integration - Message Cache":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
@@ -11,12 +11,12 @@
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers, ../utils/[futures]
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../../helpers, ../../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -72,62 +72,11 @@ proc createMessages(
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
suite "GossipSub Integration - Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
asyncTest "Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -154,7 +103,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
asyncTest "Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -181,7 +130,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
asyncTest "Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -208,7 +157,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
asyncTest "Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
@@ -469,7 +418,7 @@ suite "GossipSub Message Handling":
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
asyncTest "GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -499,7 +448,7 @@ suite "GossipSub Message Handling":
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
asyncTest "GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
@@ -565,7 +514,7 @@ suite "GossipSub Message Handling":
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
asyncTest "GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
@@ -595,7 +544,7 @@ suite "GossipSub Message Handling":
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
asyncTest "GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -607,7 +556,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
asyncTest "GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
@@ -620,7 +569,7 @@ suite "GossipSub Message Handling":
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
asyncTest "GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -662,7 +611,7 @@ suite "GossipSub Message Handling":
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
asyncTest "GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
@@ -711,7 +660,7 @@ suite "GossipSub Message Handling":
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
asyncTest "GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
381
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
381
tests/pubsub/integration/testgossipsubscoring.nim
Normal file
@@ -0,0 +1,381 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import ../utils
|
||||
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../helpers
|
||||
import ../../utils/[futures]
|
||||
|
||||
suite "GossipSub Integration - Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
tryPublish await nodes[0].publish(topic, message), 1
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "Should not rate limit decodable messages below the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
currentRateLimitHits() == rateLimitHits
|
||||
|
||||
asyncTest "Should rate limit undecodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await nodes[1].peers[nodes[0].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
await nodes[0].peers[nodes[1].switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit decodable messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "Should rate limit invalid messages above the size allowed":
|
||||
const topic = "foobar"
|
||||
let
|
||||
nodes = generateNodes(
|
||||
2,
|
||||
gossip = true,
|
||||
overheadRateLimit = Opt.some((20, 1.millis)),
|
||||
verifySignature = false,
|
||||
# Avoid being disconnected by failing signature verification
|
||||
)
|
||||
.toGossipSub()
|
||||
rateLimitHits = currentRateLimitHits()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
return ValidationResult.Reject
|
||||
|
||||
nodes[0].addValidator(topic, execValidator)
|
||||
nodes[1].addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
currentRateLimitHits() == rateLimitHits + 1
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
nodes[1].parameters.disconnectPeerAboveRateLimit = true
|
||||
nodes[0].broadcast(
|
||||
nodes[0].mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
|
||||
currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
asyncTest "DirectPeers: don't kick direct peer with low score":
|
||||
const topic = "foobar"
|
||||
let nodes = generateNodes(2, gossip = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await nodes.addDirectPeerStar()
|
||||
|
||||
nodes[1].parameters.disconnectBadPeers = true
|
||||
nodes[1].parameters.graylistThreshold = 100000
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
nodes[1].updateScores()
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
topic notin nodes[1].mesh
|
||||
nodes[1].peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
nodes[1].parameters.graylistThreshold
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hellow")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
asyncTest "Peers disconnections mechanics":
|
||||
const
|
||||
numberOfNodes = 10
|
||||
topic = "foobar"
|
||||
let nodes =
|
||||
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< numberOfNodes:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topicName: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topicName == topic
|
||||
if not seenFut.finished() and seen.len >= numberOfNodes:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe(topic, handler)
|
||||
|
||||
await waitSubGraph(nodes, topic)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
await seenFut.wait(2.seconds)
|
||||
check:
|
||||
seen.len >= numberOfNodes
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
check:
|
||||
topic in node.gossipsub
|
||||
node.fanout.len == 0
|
||||
node.mesh[topic].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll(topic)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
for i in 0 ..< numberOfNodes:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe(topic, voidTopicHandler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
await nodes[0].waitForHeartbeatByEvent(2)
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
|
||||
|
||||
asyncTest "DecayInterval":
|
||||
const
|
||||
topic = "foobar"
|
||||
decayInterval = 50.milliseconds
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var (handlerFut, handler) = createCompleteHandler()
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
nodes[1].subscribe(topic, handler)
|
||||
|
||||
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
|
||||
|
||||
var futResult = await waitForState(handlerFut)
|
||||
check:
|
||||
futResult.isCompleted(true)
|
||||
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries =
|
||||
100
|
||||
nodes[0].topicParams[topic].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(decayInterval * 5)
|
||||
check:
|
||||
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
7
tests/pubsub/integration/testpubsubintegration.nim
Normal file
@@ -0,0 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsubcontrolmessages, testgossipsubcustomconn,
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubheartbeat,
|
||||
testgossipsubmeshmanagement, testgossipsubmessagecache, testgossipsubmessagehandling,
|
||||
testgossipsubscoring
|
||||
588
tests/pubsub/testbehavior.nim
Normal file
588
tests/pubsub/testbehavior.nim
Normal file
@@ -0,0 +1,588 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers
|
||||
import ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Behavior":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "handleIHave - peers with no budget should not request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIHave - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
check:
|
||||
peer.iHaveBudget > 0
|
||||
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "handleIWant - peers with budget should request messages":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
# If ids are repeated, only one request should be generated
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
messages.len == 1
|
||||
gossipSub.mcache.msgs.len == 1
|
||||
|
||||
asyncTest "Max IDONTWANT messages per heartbeat per peer":
|
||||
# Given GossipSub node with 1 peer
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 1
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(totalPeers, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
let peer = peers[0]
|
||||
|
||||
# And sequence of iDontWants with more messages than max number (1200)
|
||||
proc generateMessageIds(count: int): seq[MessageId] =
|
||||
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
|
||||
|
||||
let iDontWants =
|
||||
@[
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
ControlIWant(messageIDs: generateMessageIds(600)),
|
||||
]
|
||||
|
||||
# When node handles iDontWants
|
||||
gossipSub.handleIDontWant(peer, iDontWants)
|
||||
|
||||
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
|
||||
check:
|
||||
peer.iDontWants[0].len == IDontWantMaxCount
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let
|
||||
topic1 = "foobar1"
|
||||
topic2 = "foobar2"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow first topic to expire
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let peer = peers[i]
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 30 ..< 45:
|
||||
let peer = peers[i]
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
for p in gossipPeers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i, peer in peers:
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
check gossipPeers.len == 0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
var scoreLow = -11'f64
|
||||
for peer in peers:
|
||||
peer.score = scoreLow
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peer.peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for peer in peers:
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let
|
||||
topic = "foobar"
|
||||
numInPeers = 6
|
||||
numOutPeers = 7
|
||||
totalPeers = numInPeers + numOutPeers
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
for i in 0 ..< numInPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.In
|
||||
peer.score = 40.0
|
||||
|
||||
for i in numInPeers ..< totalPeers:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
conn.transportDir = Direction.Out
|
||||
peer.score = 10.0
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - dScore controls number of peers to retain by score when pruning":
|
||||
# Given GossipSub node starting with 13 peers in mesh
|
||||
let
|
||||
topic = "foobar"
|
||||
totalPeers = 13
|
||||
|
||||
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
|
||||
totalPeers, topic, populateGossipsub = true, populateMesh = true
|
||||
)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# And mesh is larger than dHigh
|
||||
gossipSub.parameters.dLow = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dHigh = 8
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dScore = 13
|
||||
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
# When mesh is rebalanced
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
|
||||
# Then prunning is not triggered when mesh is not larger than dScore
|
||||
check gossipSub.mesh[topic].len == totalPeers
|
||||
|
||||
asyncTest "GossipThreshold - do not handle IHave if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IHave message
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id])
|
||||
|
||||
# When IHave is handled
|
||||
let iWant = gossipSub.handleIHave(peer, @[msg])
|
||||
|
||||
# Then IHave is ignored
|
||||
check:
|
||||
iWant.messageIDs.len == 0
|
||||
|
||||
asyncTest "GossipThreshold - do not handle IWant if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and IWant message with MsgId in mcache and sentIHaves
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[0].incl(id)
|
||||
let msg = ControlIWant(messageIDs: @[id])
|
||||
|
||||
# When IWant is handled
|
||||
let messages = gossipSub.handleIWant(peer, @[msg])
|
||||
|
||||
# Then IWant is ignored
|
||||
check:
|
||||
messages.len == 0
|
||||
|
||||
asyncTest "GossipThreshold - do not trigger PeerExchange on Prune":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and RoutingRecordsHandler added
|
||||
var routingRecordsFut = newFuture[void]()
|
||||
gossipSub.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
routingRecordsFut.complete()
|
||||
)
|
||||
|
||||
# and Prune message
|
||||
let msg = ControlPrune(
|
||||
topicID: topic, peers: @[PeerInfoMsg(peerId: peer.peerId)], backoff: 123'u64
|
||||
)
|
||||
|
||||
# When Prune is handled
|
||||
gossipSub.handlePrune(peer, @[msg])
|
||||
|
||||
# Then handler is not triggered
|
||||
let result = await waitForState(routingRecordsFut, HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
result.isCancelled()
|
||||
|
||||
asyncTest "GossipThreshold - do not select peer for IHave broadcast if peer score is below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
gossipThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(1, topic, populateGossipsub = true)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below GossipThreshold
|
||||
gossipSub.parameters.gossipThreshold = gossipThreshold
|
||||
peer.score = gossipThreshold - 100.0
|
||||
|
||||
# and message in cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message(topic: topic))
|
||||
|
||||
# When Node selects peers for IHave broadcast
|
||||
let gossipPeers = gossipSub.getGossipPeers()
|
||||
|
||||
# Then peer is not selected
|
||||
check:
|
||||
gossipPeers.len == 0
|
||||
|
||||
asyncTest "PublishThreshold - do not graft when peer score below threshold":
|
||||
const
|
||||
topic = "foobar"
|
||||
publishThreshold = -100.0
|
||||
let
|
||||
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
|
||||
peer = peers[0]
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# Given peer with score below publishThreshold
|
||||
gossipSub.parameters.publishThreshold = publishThreshold
|
||||
peer.score = publishThreshold - 100.0
|
||||
|
||||
# and Graft message
|
||||
let msg = ControlGraft(topicID: topic)
|
||||
|
||||
# When Graft is handled
|
||||
let prunes = gossipSub.handleGraft(peer, @[msg])
|
||||
|
||||
# Then peer is ignored and not added to prunes
|
||||
check:
|
||||
gossipSub.mesh[topic].len == 0
|
||||
prunes.len == 0
|
||||
95
tests/pubsub/testgossipsub.nim
Normal file
95
tests/pubsub/testgossipsub.nim
Normal file
@@ -0,0 +1,95 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import chronicles
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let topic = "foobar"
|
||||
let (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, voidTopicHandler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = conns[i]
|
||||
let peer = peers[i]
|
||||
inc seqno
|
||||
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
@@ -1,409 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import metrics
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
check (await nodes[0].publish(topic, message)) == 1
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
|
||||
|
||||
await startNodes(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
|
||||
gossip0.subscribe("foobar", handle)
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async: (raw: true).} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
const testDecayInterval = 50.milliseconds
|
||||
gossip.parameters.decayInterval = testDecayInterval
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(testDecayInterval * 5)
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
@@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testgossipsubcontrolmessages, testgossipsubfanout, testgossipsubcustomconn,
|
||||
testgossipsubgossip, testgossipsubheartbeat, testgossipsubmeshmanagement,
|
||||
testgossipsubmessagecache, testgossipsubmessagehandling, testgossipsubparams,
|
||||
testgossipsubscoring, testfloodsub, testmcache, testtimedcache, testmessage
|
||||
testbehavior, testgossipsub, testgossipsubparams, testmcache, testmessage,
|
||||
testscoring, testtimedcache
|
||||
|
||||
import ./integration/testpubsubintegration
|
||||
|
||||
44
tests/pubsub/testscoring.nim
Normal file
44
tests/pubsub/testscoring.nim
Normal file
@@ -0,0 +1,44 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let topic = "foobar"
|
||||
var (gossipSub, conns, peers) =
|
||||
setupGossipSubWithPeers(30, topic, populateGossipsub = true)
|
||||
defer:
|
||||
await teardownGossipSub(gossipSub, conns)
|
||||
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
|
||||
for i, peer in peers:
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
let conn = conns[i]
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
@@ -20,6 +20,7 @@ import
|
||||
]
|
||||
import ../helpers
|
||||
import chronicles
|
||||
import metrics
|
||||
|
||||
export builders
|
||||
|
||||
@@ -35,6 +36,12 @@ proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
|
||||
proc waitForHeartbeat*(timeout: Duration) {.async.} =
|
||||
await sleepAsync(timeout)
|
||||
|
||||
proc waitForHeartbeatByEvent*[T: PubSub](node: T, multiplier: int = 1) {.async.} =
|
||||
for _ in 0 ..< multiplier:
|
||||
let evnt = newAsyncEvent()
|
||||
node.heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
type
|
||||
TestGossipSub* = ref object of GossipSub
|
||||
DValues* = object
|
||||
@@ -89,6 +96,7 @@ proc setupGossipSubWithPeers*(
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
for topic in topics:
|
||||
gossipSub.subscribe(topic, voidTopicHandler)
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
@@ -186,6 +194,7 @@ proc generateNodes*(
|
||||
historyLength = 20,
|
||||
historyGossip = 5,
|
||||
gossipThreshold = -100.0,
|
||||
decayInterval = 1.seconds,
|
||||
): seq[PubSub] =
|
||||
for i in 0 ..< num:
|
||||
let switch = newStandardSwitch(
|
||||
@@ -215,6 +224,7 @@ proc generateNodes*(
|
||||
p.sendIDontWantOnPublish = sendIDontWantOnPublish
|
||||
p.opportunisticGraftThreshold = opportunisticGraftThreshold
|
||||
p.gossipThreshold = gossipThreshold
|
||||
p.decayInterval = decayInterval
|
||||
if gossipFactor.isSome: p.gossipFactor = gossipFactor.get
|
||||
applyDValues(p, dValues)
|
||||
p
|
||||
@@ -531,3 +541,22 @@ proc baseTestProcedure*(
|
||||
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
proc currentRateLimitHits*(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
proc addDirectPeer*[T: PubSub](node: T, target: T) {.async.} =
|
||||
doAssert node.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not add same peer"
|
||||
await node.addDirectPeer(target.switch.peerInfo.peerId, target.switch.peerInfo.addrs)
|
||||
|
||||
proc addDirectPeerStar*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
for node in nodes:
|
||||
for target in nodes:
|
||||
if node.switch.peerInfo.peerId != target.switch.peerInfo.peerId:
|
||||
await addDirectPeer(node, target)
|
||||
|
||||
178
tests/testautotls.nim
Normal file
178
tests/testautotls.nim
Normal file
@@ -0,0 +1,178 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, json
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../libp2p/[stream/connection, upgrademngrs/upgrade, autotls/acme/mockapi, wire]
|
||||
|
||||
import ./helpers
|
||||
|
||||
suite "AutoTLS ACME Client":
|
||||
var api {.threadvar.}: MockACMEApi
|
||||
var key {.threadvar.}: KeyPair
|
||||
|
||||
asyncTeardown:
|
||||
await api.close()
|
||||
checkTrackers()
|
||||
|
||||
asyncSetup:
|
||||
api = await MockACMEApi.new()
|
||||
api.mockedHeaders = HttpTable.init()
|
||||
key = KeyPair.random(PKScheme.RSA, newRng()[]).get()
|
||||
|
||||
asyncTest "register to acme server":
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("location", "some-expected-kid")
|
||||
|
||||
let registerResponse = await api.requestRegister(key)
|
||||
check registerResponse.kid == "some-expected-kid"
|
||||
|
||||
asyncTest "request challenge for a domain":
|
||||
api.mockedBody =
|
||||
%*{
|
||||
"status": "pending",
|
||||
"authorizations": ["expected-authorizations-url"],
|
||||
"finalize": "expected-finalize-url",
|
||||
}
|
||||
api.mockedHeaders.set("location", "expected-order-url")
|
||||
|
||||
let challengeResponse =
|
||||
await api.requestNewOrder(@["some.dummy.domain.com"], key, "kid")
|
||||
check challengeResponse.status == ACMEChallengeStatus.pending
|
||||
check challengeResponse.authorizations == ["expected-authorizations-url"]
|
||||
check challengeResponse.finalize == "expected-finalize-url"
|
||||
check challengeResponse.orderURL == "expected-order-url"
|
||||
|
||||
# reset mocked obj for second request
|
||||
api.mockedBody =
|
||||
%*{
|
||||
"challenges": [
|
||||
{
|
||||
"url": "expected-dns01-url",
|
||||
"type": "dns-01",
|
||||
"status": "pending",
|
||||
"token": "expected-dns01-token",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
let authorizationsResponse =
|
||||
await api.requestAuthorizations(challengeResponse.authorizations, key, "kid")
|
||||
check authorizationsResponse.challenges.len > 0
|
||||
|
||||
let dns01 = authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0]
|
||||
check dns01.url == "expected-dns01-url"
|
||||
check dns01.`type` == "dns-01"
|
||||
check dns01.token == "expected-dns01-token"
|
||||
check dns01.status == ACMEChallengeStatus.pending
|
||||
|
||||
asyncTest "register with unsupported keys":
|
||||
let unsupportedSchemes = [PKScheme.Ed25519, PKScheme.Secp256k1, PKScheme.ECDSA]
|
||||
for scheme in unsupportedSchemes:
|
||||
let unsupportedKey = KeyPair.random(scheme, newRng()[]).get()
|
||||
expect(ACMEError):
|
||||
discard await api.requestRegister(unsupportedKey)
|
||||
|
||||
asyncTest "request challenge with invalid kid":
|
||||
expect(ACMEError):
|
||||
discard await api.requestChallenge(@["domain.com"], key, "invalid_kid_here")
|
||||
|
||||
asyncTest "challenge completed successful":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let completed = await api.checkChallengeCompleted("some-chal-url", key, "kid")
|
||||
check completed == true
|
||||
|
||||
asyncTest "challenge completed max retries reached":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
|
||||
api.mockedBody = %*{"status": "pending"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let completed =
|
||||
await api.checkChallengeCompleted("some-chal-url", key, "kid", retries = 1)
|
||||
check completed == false
|
||||
|
||||
asyncTest "challenge completed invalid":
|
||||
api.mockedBody = %*{"checkURL": "some-check-url"}
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
|
||||
api.mockedBody = %*{"status": "invalid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
expect(ACMEError):
|
||||
discard await api.checkChallengeCompleted("some-chal-url", key, "kid")
|
||||
|
||||
asyncTest "finalize certificate successful":
|
||||
api.mockedBody = %*{"status": "valid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let finalized = await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
|
||||
)
|
||||
check finalized == true
|
||||
|
||||
asyncTest "finalize certificate max retries reached":
|
||||
api.mockedBody = %*{"status": "processing"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
let finalized = await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid", retries = 1
|
||||
)
|
||||
check finalized == false
|
||||
|
||||
asyncTest "finalize certificate invalid":
|
||||
api.mockedBody = %*{"status": "invalid"}
|
||||
api.mockedHeaders.add("Retry-After", "1")
|
||||
expect(ACMEError):
|
||||
discard await api.certificateFinalized(
|
||||
"some-domain", "some-finalize-url", "some-order-url", key, "kid"
|
||||
)
|
||||
|
||||
asyncTest "expect error on invalid JSON response":
|
||||
api.mockedBody = %*{"inexistent field": "invalid value"}
|
||||
|
||||
expect(ACMEError):
|
||||
# avoid calling overloaded mock method requestNonce here since we want to test the actual thing
|
||||
discard await procCall requestNonce(ACMEApi(api))
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestRegister(key)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestNewOrder(@["some-domain"], key, "kid")
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestAuthorizations(@["auth-1", "auth-2"], key, "kid")
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestChallenge(@["domain-1", "domain-2"], key, "kid")
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCheck(
|
||||
"some-check-url", ACMECheckKind.ACMEOrderCheck, key, "kid"
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCheck(
|
||||
"some-check-url", ACMECheckKind.ACMEChallengeCheck, key, "kid"
|
||||
)
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestCompleted("some-chal-url", key, "kid")
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestFinalize("some-domain", "some-finalize-url", key, "kid")
|
||||
|
||||
expect(ACMEError):
|
||||
discard await api.requestGetOrder("some-order-url")
|
||||
@@ -47,14 +47,3 @@ suite "AutoTLS Integration":
|
||||
check challenge.dns01.`type`.len() > 0
|
||||
check challenge.dns01.status == ACMEChallengeStatus.pending
|
||||
check challenge.dns01.token.len() > 0
|
||||
|
||||
asyncTest "test register with unsupported keys":
|
||||
let unsupportedSchemes = [PKScheme.Ed25519, PKScheme.Secp256k1, PKScheme.ECDSA]
|
||||
for scheme in unsupportedSchemes:
|
||||
let unsupportedKey = KeyPair.random(scheme, newRng()[]).get()
|
||||
expect(ACMEError):
|
||||
discard await api.requestRegister(unsupportedKey)
|
||||
|
||||
asyncTest "test request challenge with invalid kid":
|
||||
expect(ACMEError):
|
||||
discard await api.requestChallenge(@["domain.com"], key, "invalid_kid_here")
|
||||
|
||||
@@ -28,10 +28,10 @@ import
|
||||
transports/tls/testcertificate
|
||||
|
||||
import
|
||||
testnameresolve, testmultistream, testbufferstream, testidentify,
|
||||
testautotls, testnameresolve, testmultistream, testbufferstream, testidentify,
|
||||
testobservedaddrmanager, testconnmngr, testswitch, testnoise, testpeerinfo,
|
||||
testpeerstore, testping, testmplex, testrelayv1, testrelayv2, testrendezvous,
|
||||
testdiscovery, testyamux, testautonat, testautonatservice, testautorelay, testdcutr,
|
||||
testhpservice, testutility, testhelpers, testwildcardresolverservice
|
||||
testhpservice, testutility, testhelpers, testwildcardresolverservice, testperf
|
||||
|
||||
import kademlia/testencoding
|
||||
|
||||
113
tests/testpeeridauth.nim
Normal file
113
tests/testpeeridauth.nim
Normal file
@@ -0,0 +1,113 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri, base64, times
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import
|
||||
../libp2p/
|
||||
[
|
||||
stream/connection,
|
||||
upgrademngrs/upgrade,
|
||||
peeridauth/mockclient,
|
||||
wire,
|
||||
crypto/crypto,
|
||||
]
|
||||
|
||||
import ./helpers
|
||||
|
||||
suite "PeerID Auth Client":
|
||||
var client {.threadvar.}: MockPeerIDAuthClient
|
||||
var rng {.threadvar.}: ref HmacDrbgContext
|
||||
var peerInfo {.threadvar.}: PeerInfo
|
||||
|
||||
asyncTeardown:
|
||||
await client.close()
|
||||
checkTrackers()
|
||||
|
||||
asyncSetup:
|
||||
rng = newRng()
|
||||
client = MockPeerIDAuthClient.new(rng)
|
||||
client.mockedHeaders = HttpTable.init()
|
||||
peerInfo = PeerInfo.new(PrivateKey.random(PKScheme.RSA, rng[]).get())
|
||||
|
||||
asyncTest "request authentication":
|
||||
let serverPrivateKey = PrivateKey.random(PKScheme.RSA, rng[]).get()
|
||||
let serverPubkey = serverPrivateKey.getPublicKey().get()
|
||||
let b64serverPubkey = serverPubkey.pubkeyBytes().encode(safe = true)
|
||||
client.mockedHeaders.add(
|
||||
"WWW-Authenticate",
|
||||
"libp2p-PeerID " & "challenge-client=\"somechallengeclient\", public-key=\"" &
|
||||
b64serverPubkey & "\", opaque=\"someopaque\"",
|
||||
)
|
||||
|
||||
let authenticationResponse =
|
||||
await client.requestAuthentication(parseUri("https://example.com/some/uri"))
|
||||
|
||||
check authenticationResponse.challengeClient ==
|
||||
PeerIDAuthChallenge("somechallengeclient")
|
||||
check authenticationResponse.opaque == PeerIDAuthOpaque("someopaque")
|
||||
check authenticationResponse.serverPubkey == serverPubkey
|
||||
|
||||
asyncTest "request authorization":
|
||||
let sig = PeerIDAuthSignature("somesig")
|
||||
let bearer = BearerToken(token: "somebearer", expires: Opt.none(DateTime))
|
||||
client.mockedHeaders.add(
|
||||
"Authentication-Info",
|
||||
"libp2p-PeerID " & "sig=\"" & sig & "\", " & "bearer=\"" & bearer.token & "\"",
|
||||
)
|
||||
|
||||
let uri = parseUri("https://example.com/some/uri")
|
||||
let serverPrivateKey = PrivateKey.random(PKScheme.RSA, rng[]).get()
|
||||
let serverPubkey = serverPrivateKey.getPublicKey().get()
|
||||
let authorizationResponse = await client.requestAuthorization(
|
||||
peerInfo, uri, "some-challenge-client", "some-challenge-server", serverPubkey,
|
||||
"some-opaque", "some-payload",
|
||||
)
|
||||
check authorizationResponse.bearer == bearer
|
||||
check authorizationResponse.sig == sig
|
||||
|
||||
asyncTest "checkSignature successful":
|
||||
# example from peer-id-auth spec
|
||||
let serverPrivateKey = PrivateKey
|
||||
.init(
|
||||
"0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"
|
||||
)
|
||||
.get()
|
||||
|
||||
let serverPublicKey = serverPrivateKey.getPublicKey().get()
|
||||
let challenge = "ERERERERERERERERERERERERERERERERERERERERERE="
|
||||
let hostname = "example.com"
|
||||
let sig =
|
||||
"UA88qZbLUzmAxrD9KECbDCgSKAUBAvBHrOCF2X0uPLR1uUCF7qGfLPc7dw3Olo-LaFCDpk5sXN7TkLWPVvuXAA=="
|
||||
let clientPublicKey = PublicKey
|
||||
.init("080112208139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394")
|
||||
.get()
|
||||
check checkSignature(sig, serverPublicKey, challenge, clientPublicKey, hostname)
|
||||
|
||||
asyncTest "checkSignature failed":
|
||||
# example from peer-id-auth spec (but with sig altered)
|
||||
let serverPrivateKey = PrivateKey
|
||||
.init(
|
||||
"0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"
|
||||
)
|
||||
.get()
|
||||
let serverPublicKey = serverPrivateKey.getPublicKey().get()
|
||||
let challenge = "ERERERERERERERERERERERERERERERERERERERERERE="
|
||||
let hostname = "example.com"
|
||||
let sig =
|
||||
"ZZZZZZZZZZZZZZZ9KECbDCgSKAUBAvBHrOCF2X0uPLR1uUCF7qGfLPc7dw3Olo-LaFCDpk5sXN7TkLWPVvuXAA=="
|
||||
let clientPublicKey = PublicKey
|
||||
.init("080112208139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394")
|
||||
.get()
|
||||
check checkSignature(sig, serverPublicKey, challenge, clientPublicKey, hostname) ==
|
||||
false
|
||||
@@ -24,16 +24,16 @@ const
|
||||
HttpPeerAuthFailed = 401
|
||||
|
||||
suite "PeerID Auth":
|
||||
var api {.threadvar.}: PeerIDAuthApi
|
||||
var client {.threadvar.}: PeerIDAuthClient
|
||||
var peerInfo {.threadvar.}: PeerInfo
|
||||
|
||||
asyncTeardown:
|
||||
await api.close()
|
||||
await client.close()
|
||||
checkTrackers()
|
||||
|
||||
asyncSetup:
|
||||
let rng = newRng()
|
||||
api = PeerIDAuthApi.new(rng)
|
||||
client = PeerIDAuthClient.new(rng)
|
||||
peerInfo = PeerInfo.new(PrivateKey.random(PKScheme.RSA, rng[]).get())
|
||||
|
||||
asyncTest "test peerID send":
|
||||
@@ -49,10 +49,10 @@ suite "PeerID Auth":
|
||||
}
|
||||
|
||||
let (bearer, responseWithoutBearer) =
|
||||
await api.send(parseUri(AuthPeerURL), peerInfo, payload)
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload)
|
||||
check responseWithoutBearer.status != HttpPeerAuthFailed
|
||||
doAssert bearer.token.len > 0
|
||||
|
||||
let (_, responseWithBearer) =
|
||||
await api.send(parseUri(AuthPeerURL), peerInfo, payload, bearer)
|
||||
await client.send(parseUri(AuthPeerURL), peerInfo, payload, bearer)
|
||||
check responseWithBearer.status != HttpPeerAuthFailed
|
||||
|
||||
112
tests/testperf.nim
Normal file
112
tests/testperf.nim
Normal file
@@ -0,0 +1,112 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import chronos
|
||||
import ../libp2p
|
||||
import
|
||||
../libp2p/[switch, protocols/perf/client, protocols/perf/server, protocols/perf/core]
|
||||
import ./helpers
|
||||
|
||||
proc createSwitch(
|
||||
isServer: bool = false, useMplex: bool = false, useYamux: bool = false
|
||||
): Switch =
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withNoise()
|
||||
if useMplex:
|
||||
builder = builder.withMplex()
|
||||
if useYamux:
|
||||
builder = builder.withYamux()
|
||||
|
||||
var switch = builder.build()
|
||||
|
||||
if isServer:
|
||||
switch.mount(Perf.new())
|
||||
|
||||
return switch
|
||||
|
||||
proc runTest(server: Switch, client: Switch) {.async.} =
|
||||
const
|
||||
bytesToUpload = 100000
|
||||
bytesToDownload = 10000000
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn = await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
discard await perfClient.perf(conn, bytesToUpload, bytesToDownload)
|
||||
|
||||
let stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == bytesToUpload
|
||||
stats.downloadBytes == bytesToDownload
|
||||
|
||||
suite "Perf protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "tcp::yamux":
|
||||
return # nim-libp2p#1462 test fails with stream closed error
|
||||
let server = createSwitch(isServer = true, useYamux = true)
|
||||
let client = createSwitch(useYamux = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "tcp::mplex":
|
||||
let server = createSwitch(isServer = true, useMplex = true)
|
||||
let client = createSwitch(useMplex = true)
|
||||
await runTest(server, client)
|
||||
|
||||
asyncTest "perf with exception":
|
||||
let server = createSwitch(isServer = true, useMplex = true)
|
||||
let client = createSwitch(useMplex = true)
|
||||
|
||||
await server.start()
|
||||
await client.start()
|
||||
|
||||
defer:
|
||||
await client.stop()
|
||||
await server.stop()
|
||||
|
||||
let conn =
|
||||
await client.dial(server.peerInfo.peerId, server.peerInfo.addrs, PerfCodec)
|
||||
var perfClient = PerfClient.new()
|
||||
var perfFut: Future[Duration]
|
||||
try:
|
||||
# start perf future with large download request
|
||||
# this will make perf execute for longer so we can cancel it
|
||||
perfFut = perfClient.perf(conn, 1.uint64, 1000000000000.uint64)
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
# after some time upload should be finished
|
||||
await sleepAsync(50.milliseconds)
|
||||
var stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == false
|
||||
stats.uploadBytes == 1
|
||||
|
||||
perfFut.cancel() # cancelling future will raise exception
|
||||
await sleepAsync(50.milliseconds)
|
||||
|
||||
# after cancelling perf, stats must indicate that it is final one
|
||||
stats = perfClient.currentStats()
|
||||
check:
|
||||
stats.isFinal == true
|
||||
stats.uploadBytes == 1
|
||||
@@ -87,5 +87,5 @@ when isMainModule:
|
||||
let dump = if cmd == "dump": true else: false
|
||||
try:
|
||||
echo parseFile(paramStr(2), dump)
|
||||
except:
|
||||
fatal "Could not read pbcap file", filename = path
|
||||
except CatchableError as e:
|
||||
fatal "Could not read pbcap file", description = e.msg, filename = path
|
||||
|
||||
Reference in New Issue
Block a user