Compare commits

...

158 Commits

Author SHA1 Message Date
Gabriel Cruz
faff912465 Merge branch 'master' into move-testdaemon 2025-06-23 23:26:34 +00:00
Gabriel Cruz
021d0c1700 chore(acme): add ACMEClient to hold information about an ACME account (#1470) 2025-06-23 19:24:33 +00:00
Gabriel Cruz
f49cd377ce fix(peeridauth): fix peeridauth_integration import (#1478) 2025-06-23 15:13:22 +00:00
Gabriel Cruz
effe1d08ab chore(tests): move testdaemon out of testall 2025-06-23 09:39:04 -03:00
richΛrd
fc80840784 feat(kad-dht): handler (#1455) 2025-06-20 21:08:06 +00:00
richΛrd
7742d06a58 feat(kad-dht): routing table (#1454) 2025-06-20 16:47:48 -04:00
richΛrd
e0ea1d48a4 fix: make quic test optional (#1475) 2025-06-20 15:39:40 -04:00
richΛrd
f028ad8c12 fix: force close of streams instead of reset when closing connection (#1466) 2025-06-20 12:57:20 +00:00
richΛrd
9c153c822b chore(version): update libp2p.nimble to 1.11.0 (#1433) 2025-06-18 16:39:45 -04:00
Radosław Kamiński
d803352bd6 test(gossipsub): split unit and integration tests (#1465) 2025-06-16 15:18:18 +00:00
Radosław Kamiński
2eafac47e8 test(gossipsub): GossipThreshold and PublishThreshold tests (#1464) 2025-06-16 14:46:25 +00:00
vladopajic
848fdde0a8 feat(perf): add stats (#1452) 2025-06-13 10:16:45 +00:00
Gabriel Cruz
31e7dc68e2 chore(peeridauth): add mocked client (#1458) 2025-06-12 21:11:36 +00:00
Ivan FB
08299a2059 chore: Add some more context when an exception is caught (#1432)
Co-authored-by: richΛrd <info@richardramos.me>
2025-06-12 14:38:25 +00:00
Gabriel Cruz
2f3156eafb fix(daily): fix typo in testintegration (#1463) 2025-06-12 09:26:46 -03:00
Radosław Kamiński
72e85101b0 test(gossipsub): refactor and unify scoring tests (#1461) 2025-06-12 08:18:01 +00:00
Gabriel Cruz
d205260a3e chore(acme): add MockACMEApi for testing (#1457) 2025-06-11 18:59:29 +00:00
Radosław Kamiński
97e576d146 test: increase timeout (#1460) 2025-06-11 14:19:33 +00:00
richΛrd
888cb78331 feat(kad-dht): protobuffers (#1453) 2025-06-11 12:56:02 +00:00
richΛrd
1d4c261d2a feat: withWsTransport (#1398) 2025-06-10 22:32:55 +00:00
Gabriel Cruz
83de0c0abd feat(peeridauth): add peeridauth (#1445) 2025-06-10 10:25:34 -03:00
AkshayaMani
c501adc9ab feat(gossipsub): Add support for custom connection handling (Mix protocol integration) (#1420)
Co-authored-by: Ben-PH <benphawke@gmail.com>
2025-06-09 13:36:06 -04:00
Radosław Kamiński
f9fc24cc08 test(gossipsub): flaky tests (#1451) 2025-06-09 17:20:49 +01:00
richΛrd
cd26244ccc chore(quic): add libp2p_network_bytes metric (#1439)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-09 09:42:52 -03:00
vladopajic
cabab6aafe chore(gossipsub): add consts (#1447)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-06 14:33:38 +00:00
Radosław Kamiński
fb42a9b4aa test(gossipsub): parameters (#1442)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-06 14:09:55 +00:00
Radosław Kamiński
141f4d9116 fix(GossipSub): save sent iHave in first element (#1405) 2025-06-06 10:27:59 +00:00
Gabriel Cruz
cb31152b53 feat(autotls): add acme client (#1436) 2025-06-05 17:47:02 +00:00
Radosław Kamiński
3a7745f920 test(gossipsub): message cache (#1431) 2025-06-03 15:18:29 +01:00
Radosław Kamiński
a89916fb1a test: checkUntilTimeout refactor (#1437) 2025-06-03 13:31:34 +01:00
vladopajic
c6cf46c904 fix(ci-daily): delete cache action will continue on error (#1435) 2025-06-02 17:08:31 +02:00
Gabriel Cruz
b28a71ab13 chore(readme): improve README's development section (#1427) 2025-05-29 17:51:29 +00:00
vladopajic
95b9859bcd chore(interop): move interop code to separate folder (#1413) 2025-05-29 16:14:12 +00:00
vladopajic
9e599753af ci(daily): add pinned dependencies variant (#1418) 2025-05-29 15:27:06 +00:00
richΛrd
2e924906bb chore: bump quic (#1428) 2025-05-29 14:25:02 +00:00
Radosław Kamiński
e811c1ad32 fix(gossipsub): save iDontWants messages in the first element of history (#1393) 2025-05-29 13:33:51 +01:00
Radosław Kamiński
86695b55bb test(gossipsub): include missing test files and handle flaky tests (#1416)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-05-29 12:44:21 +01:00
vladopajic
8c3a4d882a ci(dependencies): fix access to tokens (#1421) 2025-05-29 00:27:36 +00:00
richΛrd
4bad343ddc fix: limit chronicles version to < 0.11.0 (#1423) 2025-05-28 21:00:41 -03:00
vladopajic
47b8a05c32 ci(daily): improvements (#1404) 2025-05-27 14:41:53 +00:00
Radosław Kamiński
4e6f4af601 test(gossipsub): heartbeat tests (#1391) 2025-05-27 10:28:12 +01:00
Miran
7275f6f9c3 chore: unused imports are now errors (#1399) 2025-05-26 21:36:08 +02:00
richΛrd
c3dae6a7d4 fix(quic): reset and mm for interop tests (#1397) 2025-05-26 12:16:17 -04:00
vladopajic
bb404eda4a fix(ci-daily): remove --solver flag (#1400) 2025-05-26 16:48:51 +02:00
richΛrd
584710bd80 chore: move -d:libp2p_quic_support flag to .nimble (#1392) 2025-05-26 08:57:26 -04:00
Radosław Kamiński
ad5eae9adf test(gossipsub): move and refactor control messages tests (#1380) 2025-05-22 15:10:37 +00:00
richΛrd
26fae7cd2d chore: bump quic (#1387) 2025-05-21 22:30:35 +00:00
Miran
87d6655368 chore: update more dependencies (#1374) 2025-05-21 21:46:09 +00:00
richΛrd
cd60b254a0 chore(version): update libp2p.nimble to 1.10.1 (#1390) 2025-05-21 07:40:11 -04:00
richΛrd
b88cdcdd4b chore: make quic optional (#1389) 2025-05-20 21:04:30 -04:00
vladopajic
4a5e06cb45 revert: disable transport interop with zig-v0.0.1 (#1372) (#1383) 2025-05-20 14:20:42 +02:00
vladopajic
fff3a7ad1f chore(hp): add timeout on dial (#1378) 2025-05-20 11:10:01 +00:00
Miran
05c894d487 fix(ci): test Nim 2.2 (#1385) 2025-05-19 15:51:56 -03:00
vladopajic
8850e9ccd9 ci(test): reduce timeout (#1376) 2025-05-19 15:34:16 +00:00
Ivan FB
2746531851 chore(dialer): capture possible exception (#1381) 2025-05-19 10:57:04 -04:00
vladopajic
2856db5490 ci(interop): disable transport interop with zig-v0.0.1 (#1372) 2025-05-15 20:04:41 +00:00
AYAHASSAN287
b29e78ccae test(gossipsub): block5 protobuf test cases (#1204)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
2025-05-15 16:32:03 +01:00
Gabriel Cruz
c9761c3588 chore: improve README.md text (#1373) 2025-05-15 12:35:01 +00:00
richΛrd
e4ef21e07c chore: bump quic (#1371)
Co-authored-by: Gabriel Cruz <8129788+gmelodie@users.noreply.github.com>
2025-05-14 21:06:38 +00:00
Miran
61429aa0d6 chore: fix import warnings (#1370) 2025-05-14 19:08:46 +00:00
Radosław Kamiński
c1ef011556 test(gossipsub): refactor testgossipinternal (#1366) 2025-05-14 17:15:31 +01:00
vladopajic
cd1424c09f chore(interop): use the same redis dependency (#1364) 2025-05-14 15:49:51 +00:00
Miran
878d627f93 chore: update dependencies (#1368) 2025-05-14 10:51:08 -03:00
richΛrd
1d6385ddc5 chore: bump quic (#1361)
Co-authored-by: Gabriel Cruz <8129788+gmelodie@users.noreply.github.com>
2025-05-14 11:40:13 +00:00
Gabriel Cruz
873f730b4e chore: change nim-stew dep tagging (#1362) 2025-05-13 21:46:07 -04:00
Radosław Kamiński
1c1547b137 test(gossipsub): Topic Membership Tests - updated (#1363) 2025-05-13 16:22:49 +01:00
Álex
9997f3e3d3 test(gossipsub): control message (#1191)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
2025-05-13 10:54:07 -04:00
richΛrd
4d0b4ecc22 feat: interop (#1303) 2025-05-06 19:27:33 -04:00
Gabriel Cruz
ccb24b5f1f feat(cert): add certificate signing request (CSR) generation (#1355) 2025-05-06 18:56:51 +00:00
Marko Burčul
5cb493439d fix(ci): secrets token typo (#1357) 2025-05-05 09:49:42 -03:00
Ivan FB
24b284240a chore: add gcsafe pragma to removeValidator (#1356) 2025-05-02 18:39:00 +02:00
richΛrd
b0f77d24f9 chore(version): update libp2p.nimble to 1.10.0 (#1351) 2025-05-01 05:39:58 -04:00
richΛrd
e32ac492d3 chore: set @vacp2p/p2p team as codeowners of repo (#1352) 2025-05-01 05:03:54 -03:00
Gabriel Cruz
470a7f8cc5 chore: add libp2p CID codec (#1348) 2025-04-27 09:45:40 +00:00
Radosław Kamiński
b269fce289 test(gossipsub): reorganize tests by feature category (#1350) 2025-04-25 16:48:50 +01:00
vladopajic
bc4febe92c fix: git ignore for tests (#1349) 2025-04-24 15:36:46 +02:00
Radosław Kamiński
b5f9bfe0f4 test(gossipsub): optimise heartbeat interval and sleepAsync (#1342) 2025-04-23 18:10:16 +01:00
Gabriel Cruz
4ce1e8119b chore(readme): add gabe as a maintainer (#1346) 2025-04-23 15:57:32 +02:00
Miran
65136b38e2 chore: fix warnings (#1341)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-04-22 19:45:53 +00:00
Gabriel Cruz
ffc114e8d9 chore: fix broken old status-im links (#1332) 2025-04-22 09:14:26 +00:00
Radosław Kamiński
f2be2d6ed5 test: include missing tests in testall (#1338) 2025-04-22 09:45:38 +01:00
Radosław Kamiński
ab690a06a6 test: combine tests (#1335) 2025-04-21 17:39:42 +01:00
Radosław Kamiński
10cdaf14c5 chore(ci): decouple examples from unit tests (#1334) 2025-04-21 16:31:50 +01:00
Radosław Kamiński
ebbfb63c17 chore(test): remove unused flags and simplify testpubsub (#1328) 2025-04-17 13:38:27 +02:00
Álex
ac25da6cea test(gossipsub): message propagation (#1184)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-14 15:49:13 +01:00
Gabriel Cruz
fb41972ba3 chore: rendezvous improvements (#1319) 2025-04-11 13:31:24 +00:00
Richard Ramos
504d1618af fix: bump nim-quic 2025-04-10 17:54:20 -04:00
richΛrd
0f91b23f12 fix: do not use while loop for quic transport errors (#1317) 2025-04-10 21:47:42 +00:00
vladopajic
5ddd62a8b9 chore(git): ignore auto generated test binaries (#1320) 2025-04-10 13:39:04 +00:00
vladopajic
e7f13a7e73 refactor: utilize singe bridgedConnections (#1309) 2025-04-10 08:37:26 -04:00
vladopajic
89e825fb0d fix(quic): continue accept when client certificate is incorrect (#1312) 2025-04-08 21:03:47 +02:00
richΛrd
1b706e84fa chore: bump nim-quic (#1314) 2025-04-08 10:04:31 -04:00
richΛrd
5cafcb70dc chore: remove range checks from rendezvous (#1306) 2025-04-07 12:16:18 +00:00
vladopajic
8c71266058 chore(readme): add quic and memory transports (#1311) 2025-04-04 15:07:31 +00:00
vladopajic
9c986c5c13 feat(transport): add memory transport (#1304) 2025-04-04 15:43:34 +02:00
vladopajic
3d0451d7f2 chore(protocols): remove deprecated utilities (#1305) 2025-04-04 08:44:36 +00:00
richΛrd
b1f65c97ae fix: unsafe string usage (#1308) 2025-04-03 15:33:08 -04:00
vladopajic
5584809fca chore(certificate): update test vectors (#1294) 2025-04-01 17:15:26 +02:00
richΛrd
7586f17b15 fix: set peerId on incoming Quic connection (#1302) 2025-03-31 09:38:30 -04:00
richΛrd
0e16d873c8 feat: withQuicTransport (#1301) 2025-03-30 04:44:49 +00:00
richΛrd
b11acd2118 chore: update quic and expect exception in test (#1300)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-27 12:19:49 -04:00
vladopajic
1376f5b077 chore(quic): add tests with invalid certs (#1297) 2025-03-27 15:19:14 +01:00
richΛrd
340ea05ae5 feat: quic (#1265)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-26 10:17:15 -04:00
vladopajic
024ec51f66 feat(certificate): add date verification (#1299) 2025-03-25 11:50:25 +01:00
richΛrd
efe453df87 refactor: use openssl instead of mbedtls (#1298) 2025-03-24 10:22:52 -04:00
vladopajic
c0f4d903ba feat(certificate): set distinguishable issuer name with peer id (#1296) 2025-03-21 12:38:02 +00:00
vladopajic
28f2b268ae chore(certificate): cosmetics (#1293) 2025-03-19 17:02:14 +00:00
vladopajic
5abb6916b6 feat: X.509 certificate validation (#1292) 2025-03-19 15:40:14 +00:00
richΛrd
e6aec94c0c chore: use token per repo in autobump task (#1288) 2025-03-18 17:12:52 +00:00
vladopajic
9eddc7c662 chore: specify exceptions (#1284) 2025-03-17 13:09:18 +00:00
richΛrd
028c730a4f chore: remove python dependency (#1287) 2025-03-17 08:04:30 -04:00
richΛrd
3c93bdaf80 chore(version): update libp2p.nimble to 1.9.0 (#1282)
Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
2025-03-14 15:56:51 +00:00
vladopajic
037b99997e chore(ci): remove AppVeyor config (#1281) 2025-03-10 21:09:40 +00:00
vladopajic
e67744bf2a chore(connmanager): propagate CancelledError (#1276) 2025-03-05 17:31:46 +00:00
vladopajic
5843e6fb4f chore(protocol): handler to propagate CancelledError (#1275) 2025-03-05 15:04:29 +00:00
vladopajic
f0ff7e4c69 fix(ci): transport interoperability action (#1277) 2025-03-04 19:00:08 +01:00
diegomrsantos
24808ad534 feat(tls-certificate): generate and parse libp2p tls certificate (#1209)
Co-authored-by: Richard Ramos <info@richardramos.me>
2025-03-04 08:05:40 -04:00
vladopajic
c4bccef138 chore(relay): specify raised exceptions (#1274) 2025-03-03 15:18:27 +01:00
vladopajic
adf2345adb chore: specify raised exceptions in miscellaneous places (#1269) 2025-02-28 09:19:53 -04:00
vladopajic
f7daad91e6 chore(protocols): specify raised exceptions (part 2) (#1268) 2025-02-28 08:06:51 -04:00
vladopajic
65052d7b59 chore(transports): specify raised exceptions (#1266) 2025-02-27 10:42:05 +01:00
vladopajic
b07ec5c0c6 chore(protocol): list raised exceptions (#1260) 2025-02-25 17:33:24 -04:00
vladopajic
f4c94ddba1 chore(dialer): list raised exceptions (#1264) 2025-02-24 20:10:20 +01:00
vladopajic
a7ec485ca9 chore(transports): list raised exceptions (#1255) 2025-02-24 16:42:27 +01:00
vladopajic
86b6469e35 chore: list raised exceptions in switch services (#1251)
Co-authored-by: richΛrd <info@richardramos.me>
2025-02-24 15:33:06 +01:00
Ivan FB
3e16ca724d chore: add trace log to analyse remote stream reset (#1253) 2025-02-24 08:24:27 -04:00
vladopajic
93dd5a6768 chore(connmanager): specify raised exceptions (#1263) 2025-02-19 15:55:36 +00:00
richΛrd
ec43d0cb9f chore: refactors to remove .closure., .gcsafe for .async. procs, and added callback compatibility to daemonapi (#1240) 2025-02-19 15:00:44 +00:00
vladopajic
8469a750e7 chore: add description of public pragma (#1262) 2025-02-19 14:14:50 +01:00
vladopajic
fc6ac07ce8 ci: utilize github action for nph check (#1257) 2025-02-17 15:42:58 +00:00
vladopajic
79cdc31b37 ci: remove commit message check (#1256) 2025-02-17 12:35:42 +00:00
vladopajic
be33ad6ac7 chore: specify raising exceptions in daemon module (#1249) 2025-02-14 15:36:29 +01:00
vladopajic
a6e45d6157 chore: list raised exceptions in utils module (#1252)
part of https://github.com/vacp2p/nim-libp2p/issues/962 effort.
2025-02-13 11:27:29 -04:00
richΛrd
37e0f61679 chore: update maintainers (#1248) 2025-02-11 14:32:12 -04:00
vladopajic
5d382b6423 style: fix code style with nph (#1246) 2025-02-11 15:39:08 +00:00
richΛrd
78a4344054 refactor(pubsub): do not raise exceptions on publish (#1244) 2025-02-07 01:30:21 +00:00
Ivan FB
a4f0a638e7 chore: add isClosedRemotely public bool attribute in lpstream (#1242)
Also adds the gcsafe to getStreams method so that we can invoke it from async procs
2025-02-06 09:54:00 -04:00
richΛrd
c5aa3736f9 chore(version): update libp2p.nimble to 1.8.0 (#1236)
This PR's commit is planned to be tagged as v1.8.0
2025-01-22 08:45:56 -04:00
richΛrd
b0f83fd48c fix: use target repository branch as suffix for nim-libp2p-auto-bump- (#1238)
The branch `nim-libp2p-auto-bump-unstable` was not getting autobumped
because at one point of time in nim-libp2p `unstable` branch was renamed
to `master` branch. Since the workflow file depended on `GITHUB_REF`, it
was instead updating `nim-libp2p-auto-bump-master` instead of
`nim-libp2p-auto-bump-unstable`
2025-01-15 14:25:21 -04:00
richΛrd
d6e5094095 fix(pubsub): revert async: raises: [] annotation for TopicHandler and ValidatorHandler (#1237) 2025-01-15 03:35:24 +00:00
richΛrd
483e1d91ba feat: idontwant on publish (#1230)
Closes #1224 

The following changes were done:
1. Once a message is broadcasted, if it exceeds the threeshold to
consider it a large message it will send an IDONTWANT message before
publishing the message

4f9c21f699/libp2p/protocols/pubsub/gossipsub.nim (L800-L801)
2. Extracts the logic to broadcast an IDONTWANT message and to verify
the size of a message into separate functions
3. Adds a template to filter values of a hashset without modifying the
original
2025-01-14 16:59:38 -04:00
richΛrd
d215bb21e0 fix(multiaddress): don't re-export minprotobuf (#1235) 2025-01-14 15:47:06 -04:00
richΛrd
61ac0c5b95 feat(pubsub): add {.async: (raises).} annotations (#1233)
This PR adds `{.async: (raises).}` annotations to the pubsub package.
The cases in which a `raises:[CatchableError]` was added were due to not
being part of the package and should probably be changed in a separate
PR
2025-01-14 17:01:02 +01:00
diegomrsantos
1fa30f07e8 chore(ci): add arm64 for macOS (#1212)
This PR adds the macOS 14 GitHub runner that uses the arm64 cpu.
2024-12-20 21:18:56 -04:00
richΛrd
39d0451a10 chore: validate PR titles and commits, and autoassign PRs (#1227) 2024-12-20 15:42:54 +01:00
richΛrd
4dc7a89f45 chore: use latest nimpng and nico (#1229) 2024-12-17 14:37:06 -04:00
richΛrd
fd26f93b80 fix(ci): use nim 2.0 branch (#1228) 2024-12-09 19:41:08 +00:00
Etan Kissling
dd2c74d413 feat(nameresolving): Add {.async: (raises).} annotations (#1214)
Modernize `nameresolving` modules to track `{.async: (raises).}`.

---------

Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
Co-authored-by: richΛrd <info@richardramos.me>
2024-12-09 12:43:21 +01:00
Eugene Kabanov
b7e0df127f Fix PeerStore missing remote endpoints of established connection. (#1226) 2024-11-27 14:49:41 +02:00
kaiserd
f591e692fc chore(version): update libp2p.nimble to 1.7.1 (#1223)
minor version for rendezvous fix
2024-11-09 10:51:33 +07:00
NagyZoltanPeter
8855bce085 fix:missing raises pragma for one of RendezVous.new (#1222) 2024-11-08 11:11:51 +01:00
kaiserd
ed5670408b chore(version): update libp2p.nimble to 1.7.0 (#1213)
This commit is planned to be tagged with v1.7.0.
The only feature added in this version are configurable min and max TTL
for rendezvous.

Co-authored-by: ksr <kaiserd@users.noreply.github.com>
2024-11-03 18:15:33 +00:00
Álex
97192a3c80 fix(ci): nim 2.0 dependency failure (#1216)
Broken due to an update to Nimble 0.16.2 for the version-2-0 branch.
This PR sets the ref to the previous commit. Also, updates the key
naming so it fits better.

Nim's commit list: https://github.com/nim-lang/Nim/commits/version-2-0/

# Important Note
In this PR some required job's names were changed. They need to be
updated at repo-level so this PR can be merged.
2024-10-31 17:56:13 +00:00
Álex
294d06323c docs(test): handle IHAVE / IWANT tests (#1202)
Add documentation as requested.
2024-10-31 17:23:24 +00:00
Álex
a3b8729cbe fix(ci): Daily workflows report (#1200)
Fix daily workflows' green tick when jobs failed.

Based of off: https://stackoverflow.com/a/58859404

Closes:  https://github.com/vacp2p/nim-libp2p/issues/1197

---------

Co-authored-by: Diego <diego@status.im>
2024-10-10 12:10:49 +00:00
Álex
6c970911f2 fix(CI): free disk space on interop transport job (#1206)
Readd the free disk space job, as space issues still happen when
building images.

---------

Co-authored-by: Diego <diego@status.im>
2024-10-10 09:42:25 +00:00
Álex
5d48776b02 chore(ci): Enable S3 caching for interop (#1193)
- Adds our S3 bucket for caching docker images as Protocol Labs shut
down their shared one.
- Remove the free disk space workaround that prevented the jobs from
failing for using too much space for the images.

---------

Co-authored-by: diegomrsantos <diego@status.im>
2024-09-26 09:56:09 +00:00
Simon-Pierre Vivier
d389d96789 feat: rendezvous refactor (#1183)
Hello!

This PR aim to refactor rendezvous code so that it is easier to impl.
Waku rdv strategy. The hardcoded min and max TTL were out of range with
what we needed and specifying which peers to interact with is also
needed since Waku deals with peers on multiple separate shards.

I tried to keep the changes to a minimum, specifically I did not change
the name of any public procs which result in less than descriptive names
in some cases. I also wanted to return results instead of raising
exceptions but didn't. Would it be acceptable to do so?

Please advise on best practices, thank you.

---------

Co-authored-by: Ludovic Chenut <ludovic@status.im>
2024-09-25 09:11:57 +00:00
220 changed files with 13665 additions and 4997 deletions

View File

@@ -1,52 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
- p2pdCache
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
- git submodule update --init --recursive
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
# set path for produced Go binaries
- MKDIR goblin
- CD goblin
- SET GOPATH=%CD%
- SET PATH=%GOPATH%\bin;%PATH%
- CD ..
# install and build go-libp2p-daemon
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
build_script:
- nimble install -y --depsOnly
test_script:
- nimble test
- nimble examples_build
deploy: off

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @vacp2p/p2p

View File

@@ -6,7 +6,7 @@ inputs:
cpu:
description: "CPU to build for"
default: "amd64"
nim_branch:
nim_ref:
description: "Nim version"
default: "version-1-6"
shell:
@@ -88,6 +88,8 @@ runs:
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
PLATFORM=arm64
else
PLATFORM=x86
fi
@@ -117,7 +119,7 @@ runs:
uses: actions/cache@v4
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_ref }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
@@ -126,6 +128,6 @@ runs:
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_ref }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries

12
.github/workflows/auto_assign_pr.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Auto Assign PR to Creator
on:
pull_request:
types:
- opened
jobs:
assign_creator:
runs-on: ubuntu-latest
steps:
- uses: toshimaru/auto-author-assign@v1.6.2

View File

@@ -14,7 +14,7 @@ concurrency:
jobs:
test:
timeout-minutes: 90
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
@@ -27,12 +27,16 @@ jobs:
cpu: amd64
- os: macos
cpu: amd64
- os: macos-14
cpu: arm64
- os: windows
cpu: amd64
nim:
- branch: version-1-6
nim:
- ref: version-1-6
memory_management: refc
- branch: version-2-0
- ref: version-2-0
memory_management: refc
- ref: version-2-2
memory_management: refc
include:
- platform:
@@ -47,6 +51,10 @@ jobs:
os: macos
builder: macos-13
shell: bash
- platform:
os: macos-14
builder: macos-14
shell: bash
- platform:
os: windows
builder: windows-2022
@@ -56,7 +64,7 @@ jobs:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.branch }})'
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
runs-on: ${{ matrix.builder }}
steps:
- name: Checkout
@@ -70,12 +78,12 @@ jobs:
os: ${{ matrix.platform.os }}
cpu: ${{ matrix.platform.cpu }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.nim.branch }}
nim_ref: ${{ matrix.nim.ref }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
@@ -86,9 +94,9 @@ jobs:
uses: actions/cache@v3
with:
path: nimbledeps
# Using nim.branch as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0.
key: nimbledeps-${{ matrix.nim.branch }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
@@ -109,5 +117,7 @@ jobs:
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
nimble testdaemon

View File

@@ -6,9 +6,26 @@ on:
workflow_dispatch:
jobs:
test_amd64:
name: Daily amd64
test_amd64_latest:
name: Daily amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"

View File

@@ -4,10 +4,15 @@ name: Daily Common
on:
workflow_call:
inputs:
pinned_deps:
description: 'Should dependencies be installed from pinned file or use latest versions'
required: false
type: boolean
default: false
nim:
description: 'Nim Configuration'
required: true
type: string # Following this format: [{"branch": ..., "memory_management": ...}, ...]
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
cpu:
description: 'CPU'
required: true
@@ -17,26 +22,18 @@ on:
required: false
type: string
default: "[]"
use_sat_solver:
description: 'Install dependencies with SAT Solver'
required: false
type: boolean
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
delete_cache:
name: Delete github action's branch cache
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: snnaplab/delete-branch-cache-action@v1
test:
needs: delete_cache
timeout-minutes: 90
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
@@ -58,9 +55,8 @@ jobs:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.branch }})'
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
runs-on: ${{ matrix.platform.builder }}
continue-on-error: ${{ matrix.nim.branch == 'devel' || matrix.nim.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -70,20 +66,26 @@ jobs:
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_branch: ${{ matrix.nim.branch }}
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |
nimble install_pinned
- name: Install dependencies (latest)
if: ${{ inputs.pinned_deps != 'true' }}
run: |
nimble install -y --depsOnly
@@ -92,11 +94,7 @@ jobs:
nim --version
nimble --version
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
dependency_solver="sat"
else
dependency_solver="legacy"
fi
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
nimble testintegration
nimble testdaemon

View File

@@ -1,14 +0,0 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_nim_devel:
name: Daily Nim Devel
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'devel', 'memory_management': 'orc'}]"
cpu: "['amd64']"

View File

@@ -10,6 +10,14 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}, {'branch': 'devel', 'memory_management': 'orc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"

View File

@@ -1,15 +0,0 @@
name: Daily SAT
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_amd64:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,10 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
- repository: waku-org/nwaku
ref: master
secret: ACTIONS_GITHUB_TOKEN_NWAKU
- repository: codex-storage/nim-codex
ref: master
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -29,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
token: ${{ secrets[matrix.target.secret] }}
- name: Checkout this ref in target repository
run: |
@@ -44,7 +47,7 @@ jobs:
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}

60
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Examples
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
examples:
timeout-minutes: 30
strategy:
fail-fast: false
defaults:
run:
shell: bash
name: "Build Examples"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
shell: bash
os: linux
cpu: amd64
nim_ref: version-1-6
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Build and run examples
run: |
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble examples

View File

@@ -17,8 +17,9 @@ jobs:
name: Run transport interoperability tests
runs-on: ubuntu-22.04
steps:
- name: Free Disk Space (Ubuntu)
# For some reason the original job (libp2p/test-plans) has enough disk space, but this one doesn't.
- name: Free Disk Space
# For some reason we have space issues while running this action. Likely while building the image.
# This action will free up some space to avoid the issue.
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: true
@@ -26,12 +27,19 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/interop/transport/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-hole-punching-interop:
name: Run hole-punching interoperability tests
@@ -40,9 +48,13 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}

View File

@@ -18,17 +18,10 @@ jobs:
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
- name: Setup NPH
# Pin nph to a specific version to avoid sudden style differences.
# Updating nph version should be accompanied with running the new version on the fluffy directory.
run: |
VERSION="v0.5.1"
ARCHIVE="nph-linux_x64.tar.gz"
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
tar -xzf ${ARCHIVE}
- name: Check style
run: |
shopt -s extglob # Enable extended globbing
./nph examples libp2p tests tools *.@(nim|nims|nimble)
git diff --exit-code
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests interop tools *.nim*"
fail: true
suggest: true

35
.github/workflows/pr_lint.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: "Conventional Commits"
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: amannn/action-semantic-pull-request@v5
id: lint_pr_title
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: marocchino/sticky-pull-request-comment@v2
# When the previous steps fails, the workflow would stop. By adding this
# condition you can continue the execution with the populated error message.
if: always() && (steps.lint_pr_title.outputs.error_message != null)
with:
header: pr-title-lint-error
message: |
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
# Delete a previous comment when the issue has been resolved
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
uses: marocchino/sticky-pull-request-comment@v2
with:
header: pr-title-lint-error
delete: true

8
.gitignore vendored
View File

@@ -17,3 +17,11 @@ examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests).
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
# Second and third rules are here to un-ignores all files with extension and Docker file,
# because it appears that vs code is skipping text search is some tests files without these rules.
tests/**/test*[^.]*
!tests/**/*.*
!tests/**/Dockerfile

37
.pinned
View File

@@ -1,19 +1,22 @@
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97

173
README.md
View File

@@ -20,39 +20,120 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Go-libp2p-daemon](#go-libp2p-daemon)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [License](#license)
## Background
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim version is 1.6.18.
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
```
nimble install libp2p
```
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/).
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)
## Development
Clone the repository and install the dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
nimble install -dy
```
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
Run unit tests:
```sh
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
nimble tasks
```
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## Modules
List of packages modules implemented in nim-libp2p:
@@ -70,6 +151,8 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
| **Secure Channels** | |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
@@ -78,10 +161,10 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
| **Data Types** | |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
| **Utilities** | |
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
@@ -109,66 +192,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
Run unit tests:
```sh
# run all the unit tests
nimble test
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## License
Licensed and distributed under either of

View File

@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")

View File

@@ -26,15 +26,22 @@ proc main() {.async.} =
let customProtoCodec = "/test"
var proto = new LPProtocol
proto.codec = customProtoCodec
proto.handler = proc(conn: Connection, proto: string) {.async.} =
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
proto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
let
relay = Relay.new()

View File

@@ -43,12 +43,17 @@ proc new(T: typedesc[ChatProto], c: Chat): T =
let chatproto = T()
# create handler for incoming connection
proc handle(stream: Connection, proto: string) {.async.} =
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
await stream.close()
else:
await c.handlePeer(stream)
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
else:
await c.handlePeer(stream)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await stream.close()
# assign the new handler

View File

@@ -0,0 +1,3 @@
{.used.}
import directchat, tutorial_6_game

View File

@@ -0,0 +1,5 @@
{.used.}
import
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery

View File

@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except:
echo getCurrentException().msg
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData

View File

@@ -3,9 +3,7 @@
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.15.15`.
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.15.15`, as previously stated.
```sh
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our assistance.
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage
## Example
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
## Getting Started
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)

View File

@@ -11,12 +11,17 @@ type TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
# We must close the connections ourselves when we're done with it
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -25,12 +25,17 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -108,12 +108,18 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb

View File

@@ -79,8 +79,7 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr:
return ValidationResult.Reject
return ValidationResult.Accept
,
return ValidationResult.Accept,
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
@@ -93,7 +92,8 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
node.gossip.subscribe(
"metrics",
proc(topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
let m = MetricList.decode(data).expect("metric can be decoded")
echo m
,
)
else:
@@ -158,8 +158,8 @@ waitFor(main())
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)

View File

@@ -34,9 +34,15 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)

View File

@@ -152,21 +152,26 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
defer:
await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
try:
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
@@ -214,8 +219,7 @@ proc networking(g: Game) {.async.} =
# We are "player 2"
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard
,
discard,
)
await switch.start()
@@ -268,14 +272,11 @@ nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(
proc() =
discard
,
discard,
proc(dt: float32) =
game.update(dt)
,
game.update(dt),
proc() =
game.draw()
,
game.draw(),
)
waitFor(netFut.cancelAndWait())

View File

@@ -0,0 +1,19 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -0,0 +1,138 @@
import std/[os, options, strformat, sequtils]
import redis
import chronos, chronicles
import
../../libp2p/[
builders,
switch,
multicodec,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../../tests/[stubs/autonatclientstub, errorhelpers]
logScope:
topics = "hp interop node"
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder
.new()
.withRng(rng)
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng = rng))
return s
proc main() {.async.} =
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
debug "All relay addresses", relayAddr
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
try:
debug "Dialing relay...", relayMA
let relayId = await switch.connect(relayMA).wait(30.seconds)
debug "Connected to relay", relayId
except AsyncTimeoutError as e:
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
)
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(4.minutes))
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -0,0 +1,18 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
WORKDIR /app
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -42,29 +42,26 @@ proc main() {.async.} =
discard switchBuilder.withTcpTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
)
of "ws":
discard switchBuilder
.withTransport(
proc(upgr: Upgrade): Transport =
WsTransport.new(upgr)
of "quic-v1":
discard switchBuilder.withQuicTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
)
of "ws":
discard switchBuilder.withWsTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
)
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
else:
doAssert false
case secureChannel
of "noise":
discard switchBuilder.withNoise()
else:
doAssert false
case muxer
of "yamux":
discard switchBuilder.withYamux()
of "mplex":
discard switchBuilder.withMplex()
else:
doAssert false
let
rng = newRng()
@@ -83,7 +80,7 @@ proc main() {.async.} =
try:
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
except Exception as e:
raise newException(CatchableError, e.msg)
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
let
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
dialingStart = Moment.now()
@@ -99,7 +96,18 @@ proc main() {.async.} =
pingRTTMilllis: float(pingDelay.milliseconds),
)
)
quit(0)
discard waitFor(main().withTimeout(testTimeout))
quit(1)
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(testTimeout))
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -3,7 +3,8 @@
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws"
"ws",
"quic-v1"
],
"secureChannels": [
"noise"

View File

@@ -17,7 +17,7 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc
@@ -52,7 +52,6 @@ else:
stream/connection,
transports/transport,
transports/tcptransport,
transports/quictransport,
protocols/secure/noise,
cid,
multihash,
@@ -71,3 +70,7 @@ else:
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
multicodec, builders, pubsub
when defined(libp2p_quic_support):
import libp2p/transports/quictransport
export quictransport

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.6.0"
version = "1.11.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
"websock", "unittest2",
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc"
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -25,16 +25,12 @@ let cfg =
import hashes, strutils
proc runTest(
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
) =
proc runTest(filename: string, moreoptions: string = "") =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r " & " tests/" & filename
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
@@ -60,51 +56,18 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest(
"pubsub/testpubsub",
sign = false,
verify = false,
moreoptions = "-d:libp2p_pubsub_anonymize=true",
)
task testpubsub_slim, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest(
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
)
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
exec "nimble testnative"
exec "nimble testpubsub"
exec "nimble testdaemon"
exec "nimble testinterop"
exec "nimble testfilter"
exec "nimble examples_build"
task testintegration, "Runs integraion tests":
runTest("testintegration")
task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testnative"
exec "nimble testpubsub_slim"
task test, "Runs the test suite":
runTest("testall")
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
@@ -116,19 +79,12 @@ task website, "Build the website":
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
exec "nimble install -y nimpng@#HEAD"
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico@#af99dd60bf2b395038ece815ea1012330a80d6e6"
buildSample("tutorial_6_game", false, "--styleCheck:off")
task examples, "Build and run examples":
exec "nimble install -y nimpng"
exec "nimble install -y nico --passNim=--skipParentCfg"
buildSample("examples_build", false, "--styleCheck:off") # build only
buildSample("examples_run", true)
# pin system
# while nimble lockfile

512
libp2p/autotls/acme/api.nim Normal file
View File

@@ -0,0 +1,512 @@
import options, base64, sequtils, strutils, json, uri
from times import DateTime, parse
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
import ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export ACMEError
const
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
Alg = "RS256"
DefaultChalCompletedRetries = 10
DefaultChalCompletedRetryTime = 1.seconds
DefaultFinalizeRetries = 10
DefaultFinalizeRetryTime = 1.seconds
DefaultRandStringSize = 256
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
type Authorization* = string
type Domain* = string
type Kid* = string
type Nonce* = string
type ACMEDirectory* = object
newNonce*: string
newOrder*: string
newAccount*: string
type ACMEApi* = ref object of RootObj
directory: Opt[ACMEDirectory]
session: HttpSessionRef
acmeServerURL*: Uri
type HTTPResponse* = object
body*: JsonNode
headers*: HttpTable
type JWK = object
kty: string
n: string
e: string
# whether the request uses Kid or not
type ACMERequestType = enum
ACMEJwkRequest
ACMEKidRequest
type ACMERequestHeader = object
alg: string
typ: string
nonce: Nonce
url: string
case kind: ACMERequestType
of ACMEJwkRequest:
jwk: JWK
of ACMEKidRequest:
kid: Kid
type Email = string
type ACMERegisterRequest* = object
termsOfServiceAgreed: bool
contact: seq[Email]
type ACMEAccountStatus = enum
valid = "valid"
deactivated = "deactivated"
revoked = "revoked"
type ACMERegisterResponseBody = object
status*: ACMEAccountStatus
type ACMERegisterResponse* = object
kid*: Kid
status*: ACMEAccountStatus
type ACMEChallengeStatus* {.pure.} = enum
pending = "pending"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMEChallengeType* {.pure.} = enum
dns01 = "dns-01"
http01 = "http-01"
tlsalpn01 = "tls-alpn-01"
type ACMEChallengeToken* = string
type ACMEChallenge* = object
url*: string
`type`*: ACMEChallengeType
status*: ACMEChallengeStatus
token*: ACMEChallengeToken
type ACMEChallengeIdentifier = object
`type`: string
value: string
type ACMEChallengeRequest = object
identifiers: seq[ACMEChallengeIdentifier]
type ACMEChallengeResponseBody = object
status: ACMEChallengeStatus
authorizations: seq[Authorization]
finalize: string
type ACMEChallengeResponse* = object
status*: ACMEChallengeStatus
authorizations*: seq[Authorization]
finalize*: string
order*: string
type ACMEChallengeResponseWrapper* = object
finalize*: string
order*: string
dns01*: ACMEChallenge
type ACMEAuthorizationsResponse* = object
challenges*: seq[ACMEChallenge]
type ACMECompletedResponse* = object
checkURL: string
type ACMEOrderStatus* {.pure.} = enum
pending = "pending"
ready = "ready"
processing = "processing"
valid = "valid"
invalid = "invalid"
type ACMECheckKind* = enum
ACMEOrderCheck
ACMEChallengeCheck
type ACMECheckResponse* = object
case kind: ACMECheckKind
of ACMEOrderCheck:
orderStatus: ACMEOrderStatus
of ACMEChallengeCheck:
chalStatus: ACMEChallengeStatus
retryAfter: Duration
type ACMEFinalizeResponse* = object
status: ACMEOrderStatus
type ACMEOrderResponse* = object
certificate: string
expires: string
type ACMECertificateResponse* = object
rawCertificate: string
certificateExpiry: DateTime
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
proc new*(
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
): ACMEApi =
let session = HttpSessionRef.new()
ACMEApi(
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
)
proc getDirectory(
self: ACMEApi
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("getDirectory"):
self.directory.valueOr:
let acmeResponse = await self.get(self.acmeServerURL / "directory")
let directory = acmeResponse.body.to(ACMEDirectory)
self.directory = Opt.some(directory)
directory
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone:
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
kid: kid.get(),
)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
proc createSignedAcmeRequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newAccount),
registerRequest,
key,
needsJwk = true,
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newOrder),
orderRequest,
key,
kid = Opt.some(kid),
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len() == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
order: acmeResponse.headers.keyOrError("location"),
)
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(parseUri(authorizations[0]))
acmeResponse.body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
let challengeResponse = await self.requestNewOrder(domains, key, kid)
if challengeResponse.status != ACMEChallengeStatus.pending:
raise newException(
ACMEError, "Invalid new challenge status: " & $challengeResponse.status
)
let authorizationsResponse =
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
if authorizationsResponse.challenges.len == 0:
raise newException(ACMEError, "No challenges received")
return ACMEChallengeResponseWrapper(
finalize: challengeResponse.finalize,
order: challengeResponse.order,
dns01: authorizationsResponse.challenges.filterIt(
it.`type` == ACMEChallengeType.dns01
)[0],
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
)
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc sendChallengeCompleted*(
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("sendChallengeCompleted (send notify)"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.pending:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.valid:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let derCSR = createCSR(domain)
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalize, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalize, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.valid:
return true
of ACMEOrderStatus.processing:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
raise newException(
ACMEError,
"Failed certificate finalization: expected 'valid', got '" &
$checkResponse.orderStatus & "'",
)
return false
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(order, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, order: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get(order)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, order: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(order)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,70 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import uri
import chronos, results, bio
import ./api, ./utils
import ../../crypto/crypto
import ../../crypto/rsa
type KeyAuthorization* = string
type ACMEClient* = object
api: ACMEApi
key*: KeyPair
kid*: Kid
proc new*(
T: typedesc[ACMEClient],
api: Opt[ACMEApi] = Opt.none(ACMEApi),
key: Opt[KeyPair] = Opt.none(KeyPair),
rng: ref HmacDrbgContext = newRng(),
acmeServerURL: Uri = parseUri(LetsEncryptURL),
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
let api = api.valueOr:
ACMEApi.new()
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, rng[]).get()
let registerResponse = await api.requestRegister(key)
T(api: api, key: key, kid: registerResponse.kid)
proc genKeyAuthorization(self: ACMEClient, token: string): KeyAuthorization =
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toByteSeq).data))
proc getChallenge*(
self: ACMEClient, domains: seq[api.Domain]
): Future[ACMEChallengeResponseWrapper] {.raises: [ACMEError, CancelledError].} =
self.api.requestChallenge(domains, self.key, self.kid)
proc getCertificate*(
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let chalURL = parseUri(challenge.dns01.url)
let orderURL = parseUri(challenge.order)
let finalizeURL = parseUri(challenge.finalize)
discard await self.api.sendChallengeCompleted(chalURL, self.key, self.kid)
let completed = await self.api.checkChallengeCompleted(chalURL, self.key, self.kid)
if not completed:
raise
newException(ACMEError, "Failed to signal ACME server about challenge completion")
let finalized = await self.api.certificateFinalized(
domain, finalizeURL, orderURL, self.key, self.kid
)
if not finalized:
raise newException(ACMEError, "Failed to finalize certificate for domain " & domain)
await self.api.downloadCertificate(orderURL)
proc close*(self: ACMEClient): Future[void] {.async: (raises: [CancelledError]).} =
await self.api.close()

View File

@@ -0,0 +1,39 @@
import uri
import chronos, chronos/apps/http/httpclient, json
import ./api, ./utils
export api
type MockACMEApi* = ref object of ACMEApi
mockedResponses*: seq[HTTPResponse]
proc new*(
T: typedesc[MockACMEApi]
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
let directory = ACMEDirectory(
newNonce: LetsEncryptURL & "/new-nonce",
newOrder: LetsEncryptURL & "/new-order",
newAccount: LetsEncryptURL & "/new-account",
)
MockACMEApi(
session: HttpSessionRef.new(),
directory: Opt.some(directory),
acmeServerURL: parseUri(LetsEncryptURL),
)
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return $self.acmeServerURL & "/acme/1234"
method post*(
self: MockACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)

View File

@@ -0,0 +1,64 @@
import base64, strutils, chronos/apps/http/httpclient, json
import ../../errors
import ../../transports/tls/certificate_ffi
import ../../crypto/crypto
import ../../crypto/rsa
type ACMEError* = object of LPError
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc thumbprint*(key: KeyPair): string =
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
let n = base64UrlEncode(nArray)
let e = base64UrlEncode(eArray)
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
let digest = sha256.digest($keyJson)
return base64UrlEncode(@(digest.data))
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let bodyBytes = await response.getBodyBytes()
if bodyBytes.len > 0:
return bytesToString(bodyBytes).parseJson()
return %*{} # empty body
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
except Exception as exc: # this is required for nim 1.6
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
let personalizationStr = "libp2p_autotls"
if cert_init_drbg(
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to initialize certCtx")
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to generate cert key")
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport],
transports/[transport, tcptransport, wstransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -35,10 +35,15 @@ import
utility
import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
TLSCertificate, TLSFlags, ServerFlags
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
TransportProvider* {.public.} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise
@@ -151,7 +156,7 @@ proc withTransport*(
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
@@ -162,10 +167,37 @@ proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
proc withWsTransport*(
b: SwitchBuilder,
tlsPrivateKey: TLSPrivateKey = nil,
tlsCertificate: TLSCertificate = nil,
tlsFlags: set[TLSFlags] = {},
flags: set[ServerFlags] = {},
): SwitchBuilder =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
)
when defined(libp2p_quic_support):
import transports/quictransport
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
MemoryTransport.new(upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
@@ -247,6 +279,10 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let pkRes = PrivateKey.random(b.rng[])
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
if b.secureManagers.len == 0:
debug "no secure managers defined. Adding noise by default"
b.secureManagers.add(SecureProtocol.Noise)
var secureManagerInstances: seq[Secure]
if SecureProtocol.Noise in b.secureManagers:
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
@@ -270,7 +306,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports.add(tProvider(muxedUpgrade, seckey))
transports
if b.secureManagers.len == 0:

View File

@@ -10,10 +10,11 @@
## This module implementes CID (Content IDentifier).
{.push raises: [].}
{.used.}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
import stew/[base58, results]
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
export results
@@ -41,6 +42,7 @@ const ContentIdsList = [
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("libp2p-key"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),

View File

@@ -42,8 +42,9 @@ type
else:
discard
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe, raises: [].}
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
PeerEventKind* {.pure.} = enum
Left
@@ -57,8 +58,9 @@ type
else:
discard
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
@@ -123,7 +125,9 @@ proc removeConnEventHandler*(
) =
c.connEvents[kind].excl(handler)
proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async.} =
proc triggerConnEvent*(
c: ConnManager, peerId: PeerId, event: ConnEvent
) {.async: (raises: [CancelledError]).} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -136,7 +140,7 @@ proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Exception in triggerConnEvents",
warn "Exception in triggerConnEvent",
description = exc.msg, peer = peerId, event = $event
proc addPeerEventHandler*(
@@ -154,7 +158,9 @@ proc removePeerEventHandler*(
) =
c.peerEvents[kind].excl(handler)
proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.async.} =
proc triggerPeerEvents*(
c: ConnManager, peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
return
@@ -174,16 +180,16 @@ proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.asyn
proc expectConnection*(
c: ConnManager, p: PeerId, dir: Direction
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
raise newException(
AlreadyExpectingConnectionError,
"Already expecting an incoming connection from that peer",
"Already expecting an incoming connection from that peer: " & shortLog(p),
)
let future = newFuture[Muxer]()
let future = Future[Muxer].Raising([CancelledError]).init()
c.expectedConnectionsOverLimit[key] = future
try:
@@ -205,18 +211,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
let conn = muxer.connection
return muxer in c.muxed.getOrDefault(conn.peerId)
proc closeMuxer(muxer: Muxer) {.async.} =
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
trace "Cleaning up muxer", m = muxer
await muxer.close()
if not (isNil(muxer.handler)):
try:
await muxer.handler # TODO noraises?
await muxer.handler
except CatchableError as exc:
trace "Exception in close muxer handler", description = exc.msg
trace "Cleaned up muxer", m = muxer
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
try:
trace "Triggering disconnect events", mux
let peerId = mux.connection.peerId
@@ -238,7 +244,7 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
## connection close even handler
##
## triggers the connections resource cleanup
@@ -272,7 +278,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
trace "connection not found", peerId
return mux
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
## store the connection and muxer
##
@@ -324,7 +330,9 @@ proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
proc getIncomingSlot*(
c: ConnManager
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
@@ -339,25 +347,21 @@ proc getOutgoingSlot*(
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
return if dir == In: c.inSema else: c.outSema
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
case dir
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
return semaphore(c, dir).count
proc release*(cs: ConnectionSlot) =
if cs.direction == In:
cs.connManager.inSema.release()
else:
cs.connManager.outSema.release()
semaphore(cs.connManager, cs.direction).release()
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
if isNil(conn):
cs.release()
return
proc semaphoreMonitor() {.async.} =
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
try:
await conn.join()
except CatchableError as exc:
@@ -373,14 +377,18 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
return
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager, muxer: Muxer): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, muxer: Muxer
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed muxer
##
if not (isNil(muxer)):
return await muxer.newStream()
proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from any connection
##
@@ -388,13 +396,13 @@ proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId, dir: Direction
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from a connection with `dir`
##
return await c.getStream(c.selectMuxer(peerId, dir))
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId
@@ -405,7 +413,7 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
trace "Peer dropped", peerId
proc close*(c: ConnManager) {.async.} =
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
## cleanup resources for the connection
## manager
##

View File

@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import stew/results
import results
export results, utility
# This is workaround for Nim's `import` bug

View File

@@ -18,7 +18,7 @@
{.push raises: [].}
import bearssl/[ec, rand]
import stew/results
import results
from stew/assign2 import assign
export results

View File

@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/[results, ctops]
import stew/ctops
import results
import ../utility

View File

@@ -18,7 +18,8 @@ import constants
import nimcrypto/[hash, sha2]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import stew/[results, ctops]
import results
import stew/ctops
import ../../utility

View File

@@ -28,8 +28,7 @@ proc hkdf*[T: sha256, len: static int](
if salt.len > 0:
unsafeAddr salt[0]
else:
nil
,
nil,
csize_t(salt.len),
)
hkdfInject(
@@ -37,8 +36,7 @@ proc hkdf*[T: sha256, len: static int](
if ikm.len > 0:
unsafeAddr ikm[0]
else:
nil
,
nil,
csize_t(ikm.len),
)
hkdfFlip(ctx)
@@ -48,8 +46,7 @@ proc hkdf*[T: sha256, len: static int](
if info.len > 0:
unsafeAddr info[0]
else:
nil
,
nil,
csize_t(info.len),
addr outputs[i][0],
csize_t(outputs[i].len),

View File

@@ -11,7 +11,8 @@
{.push raises: [].}
import stew/[endians2, results, ctops]
import stew/[endians2, ctops]
import results
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
dest[0] = cast[byte](value)
1
else:
var s = 0
var res = 0
while v != 0:
v = v shr 7
s += 7
inc(res)
if len(dest) >= res:
var k = 0
while s != 0:
s -= 7
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
inc(k)
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return ok(field)
else:
return err(Asn1Error.NoSupport)
inclass = false
ttag = 0
else:
return err(Asn1Error.NoSupport)

View File

@@ -17,7 +17,8 @@
import bearssl/[rsa, rand, hash]
import minasn1
import stew/[results, ctops]
import results
import stew/ctops
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import bearssl/rand
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
export sha2, results, rand
@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
var buffer: seq[byte]
try:
buffer = hexToSeqByte(data)
except ValueError:
return err("secp: Hex to bytes failed")
except ValueError as e:
let errMsg = "secp: Hex to bytes failed: " & e.msg
return err(errMsg.cstring)
init(sig, buffer)
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =

View File

@@ -146,7 +146,7 @@ type
PubsubTicket* = ref object
topic*: string
handler*: P2PPubSubCallback
handler*: P2PPubSubCallback2
transp*: StreamTransport
PubSubMessage* = object
@@ -158,12 +158,14 @@ type
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI, stream: P2PStream): Future[void] {.
gcsafe, raises: [CatchableError]
gcsafe, async: (raises: [CatchableError])
.}
P2PPubSubCallback* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback2* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
DaemonLocalError* = object of DaemonError
@@ -485,7 +487,11 @@ proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalErro
if initProtoBuffer(error).getRequiredField(1, result).isErr():
raise newException(DaemonLocalError, "Error message is missing!")
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
proc recvMessage(
conn: StreamTransport
): Future[seq[byte]] {.
async: (raises: [TransportIncompleteError, TransportError, CancelledError])
.} =
var
size: uint
length: int
@@ -508,13 +514,19 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport] {.raises: [LPError].} =
result = connect(api.address)
proc newConnection*(
api: DaemonAPI
): Future[StreamTransport] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
await connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
result = transp.closeWait()
proc closeConnection*(
api: DaemonAPI, transp: StreamTransport
): Future[void] {.async: (raises: [CancelledError]).} =
await transp.closeWait()
proc socketExists(address: MultiAddress): Future[bool] {.async.} =
proc socketExists(address: MultiAddress): Future[bool] {.async: (raises: []).} =
try:
var transp = await connect(address)
await transp.closeWait()
@@ -534,7 +546,9 @@ else:
proc getProcessId(): int =
result = int(posix.getpid())
proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.} =
proc getSocket(
pattern: string, count: ptr int
): Future[MultiAddress] {.async: (raises: [ValueError, LPError]).} =
var sockname = ""
var pid = $getProcessId()
sockname = pattern % [pid, $(count[])]
@@ -562,7 +576,35 @@ proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.}
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.}
template exceptionToAssert(body: untyped): untyped =
block:
var res: type(body)
when defined(nimHasWarnBareExcept):
{.push warning[BareExcept]: off.}
try:
res = body
except OSError as exc:
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
except IOError as exc:
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
except Defect as exc:
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
except Exception as exc:
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
when defined(nimHasWarnBareExcept):
{.pop.}
res
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -586,7 +628,14 @@ proc newDaemonApi*(
peersRequired = 2,
logFile = "",
logLevel = IpfsLogLevel.Debug,
): Future[DaemonAPI] {.async.} =
): Future[DaemonAPI] {.
async: (
raises: [
ValueError, DaemonLocalError, CancelledError, LPError, OSError, IOError,
AsyncError,
]
)
.} =
## Initialize connection to `go-libp2p-daemon` control socket.
##
## ``flags`` - set of P2PDaemonFlags.
@@ -780,7 +829,7 @@ proc newDaemonApi*(
result = api
proc close*(stream: P2PStream) {.async.} =
proc close*(stream: P2PStream) {.async: (raises: [DaemonLocalError]).} =
## Close ``stream``.
if P2PStreamFlags.Closed notin stream.flags:
await stream.transp.closeWait()
@@ -789,7 +838,9 @@ proc close*(stream: P2PStream) {.async.} =
else:
raise newException(DaemonLocalError, "Stream is already closed!")
proc close*(api: DaemonAPI) {.async.} =
proc close*(
api: DaemonAPI
) {.async: (raises: [TransportOsError, LPError, ValueError, OSError, CancelledError]).} =
## Shutdown connections to `go-libp2p-daemon` control socket.
# await api.pool.close()
# Closing all pending servers.
@@ -827,7 +878,9 @@ template withMessage(m, body: untyped): untyped =
proc transactMessage(
transp: StreamTransport, pb: ProtoBuffer
): Future[ProtoBuffer] {.async.} =
): Future[ProtoBuffer] {.
async: (raises: [DaemonLocalError, TransportError, CancelledError])
.} =
let length = pb.getLen()
let res = await transp.write(pb.getPtr(), length)
if res != length:
@@ -845,7 +898,11 @@ proc getPeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
discard pb.getRepeatedField(2, result.addresses)
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc identity*(
api: DaemonAPI
): Future[PeerInfo] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
## Get Node identity information
var transp = await api.newConnection()
try:
@@ -860,7 +917,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc connect*(
api: DaemonAPI, peer: PeerId, addresses: seq[MultiAddress], timeout = 0
) {.async.} =
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
var transp = await api.newConnection()
try:
@@ -870,7 +927,9 @@ proc connect*(
except CatchableError:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc disconnect*(
api: DaemonAPI, peer: PeerId
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Disconnect from remote peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -882,7 +941,12 @@ proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc openStream*(
api: DaemonAPI, peer: PeerId, protocols: seq[string], timeout = 0
): Future[P2PStream] {.async.} =
): Future[P2PStream] {.
async: (
raises:
[MaInvalidAddress, TransportError, CancelledError, LPError, DaemonLocalError]
)
.} =
## Open new stream to peer ``peer`` using one of the protocols in
## ``protocols``. Returns ``StreamTransport`` for the stream.
var transp = await api.newConnection()
@@ -903,11 +967,12 @@ proc openStream*(
stream.flags.incl(Outbound)
stream.transp = transp
result = stream
except CatchableError as exc:
except ResultError[ProtoError] as e:
await api.closeConnection(transp)
raise exc
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
# must not specify raised exceptions as this is StreamCallback from chronos
var api = getUserData[DaemonAPI](server)
var message = await transp.recvMessage()
var pb = initProtoBuffer(message)
@@ -927,11 +992,28 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
proc addHandler*(
api: DaemonAPI, protocols: seq[string], handler: P2PStreamCallback
) {.async, raises: [LPError].} =
) {.
async: (
raises: [
MaInvalidAddress, DaemonLocalError, TransportError, CancelledError, LPError,
ValueError,
]
)
.} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
var server = createStreamServer(maddress, streamHandler, udata = api)
var removeHandler = proc(): Future[void] {.
async: (raises: [CancelledError, TransportError])
.} =
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
try:
for item in protocols:
api.handlers[item] = handler
@@ -939,17 +1021,28 @@ proc addHandler*(
var pb = await transp.transactMessage(requestStreamHandler(maddress, protocols))
pb.withMessage:
api.servers.add(P2PServer(server: server, address: maddress))
except CatchableError as exc:
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
raise exc
except DaemonLocalError as e:
await removeHandler()
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
except TransportError as e:
await removeHandler()
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
except CancelledError as e:
await removeHandler()
raise e
finally:
await api.closeConnection(transp)
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.} =
## Get list of remote peers to which we are currently connected.
var transp = await api.newConnection()
try:
@@ -964,7 +1057,14 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
finally:
await api.closeConnection(transp)
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.} =
proc cmTagPeer*(
api: DaemonAPI, peer: PeerId, tag: string, weight: int
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
var transp = await api.newConnection()
try:
@@ -974,7 +1074,14 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.
finally:
await api.closeConnection(transp)
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
proc cmUntagPeer*(
api: DaemonAPI, peer: PeerId, tag: string
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Remove tag ``tag`` from peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -984,7 +1091,14 @@ proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
finally:
await api.closeConnection(transp)
proc cmTrimPeers*(api: DaemonAPI) {.async.} =
proc cmTrimPeers*(
api: DaemonAPI
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Trim all connections.
var transp = await api.newConnection()
try:
@@ -1058,7 +1172,12 @@ proc getDhtMessageType(
proc dhtFindPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PeerInfo] {.async.} =
): Future[PeerInfo] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1073,7 +1192,12 @@ proc dhtFindPeer*(
proc dhtGetPublicKey*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PublicKey] {.async.} =
): Future[PublicKey] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get peer's public key from peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1088,7 +1212,12 @@ proc dhtGetPublicKey*(
proc dhtGetValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[byte]] {.async.} =
): Future[seq[byte]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get value associated with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1103,7 +1232,12 @@ proc dhtGetValue*(
proc dhtPutValue*(
api: DaemonAPI, key: string, value: seq[byte], timeout = 0
) {.async.} =
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Associate ``value`` with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1116,7 +1250,14 @@ proc dhtPutValue*(
finally:
await api.closeConnection(transp)
proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtProvide*(
api: DaemonAPI, cid: Cid, timeout = 0
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Provide content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1131,7 +1272,12 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtFindPeersConnectedToPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peers which are connected to peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1157,7 +1303,12 @@ proc dhtFindPeersConnectedToPeer*(
proc dhtGetClosestPeers*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[PeerId]] {.async.} =
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get closest peers for ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1183,7 +1334,12 @@ proc dhtGetClosestPeers*(
proc dhtFindProviders*(
api: DaemonAPI, cid: Cid, count: uint32, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get ``count`` providers for content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1209,7 +1365,12 @@ proc dhtFindProviders*(
proc dhtSearchValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[seq[byte]]] {.async.} =
): Future[seq[seq[byte]]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Search for value with ``key``, return list of values found.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1232,7 +1393,14 @@ proc dhtSearchValue*(
finally:
await api.closeConnection(transp)
proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
proc pubsubGetTopics*(
api: DaemonAPI
): Future[seq[string]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of topics this node is subscribed to.
var transp = await api.newConnection()
try:
@@ -1245,7 +1413,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
finally:
await api.closeConnection(transp)
proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.async.} =
proc pubsubListPeers*(
api: DaemonAPI, topic: string
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peers we are connected to and which also subscribed to topic
## ``topic``.
var transp = await api.newConnection()
@@ -1260,7 +1435,14 @@ proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.asyn
finally:
await api.closeConnection(transp)
proc pubsubPublish*(api: DaemonAPI, topic: string, value: seq[byte]) {.async.} =
proc pubsubPublish*(
api: DaemonAPI, topic: string, value: seq[byte]
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peer identifiers which are subscribed to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1280,7 +1462,13 @@ proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
discard pb.getField(5, result.signature)
discard pb.getField(6, result.key)
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
proc pubsubLoop(
api: DaemonAPI, ticket: PubsubTicket
) {.
async: (
raises: [TransportIncompleteError, TransportError, CancelledError, CatchableError]
)
.} =
while true:
var pbmessage = await ticket.transp.recvMessage()
if len(pbmessage) == 0:
@@ -1295,8 +1483,13 @@ proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
break
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.async.} =
api: DaemonAPI, topic: string, handler: P2PPubSubCallback2
): Future[PubsubTicket] {.
async: (
raises:
[MaInvalidAddress, TransportError, LPError, CancelledError, DaemonLocalError]
)
.} =
## Subscribe to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1308,10 +1501,32 @@ proc pubsubSubscribe*(
ticket.transp = transp
asyncSpawn pubsubLoop(api, ticket)
result = ticket
except CatchableError as exc:
except DaemonLocalError as exc:
await api.closeConnection(transp)
raise newException(
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except TransportError as exc:
await api.closeConnection(transp)
raise newException(
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except CancelledError as exc:
await api.closeConnection(transp)
raise exc
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.
async: (raises: [CatchableError]), deprecated: "Use P2PPubSubCallback2 instead"
.} =
proc wrap(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
await handler(api, ticket, message)
await pubsubSubscribe(api, topic, wrap)
proc shortLog*(pinfo: PeerInfo): string =
## Get string representation of ``PeerInfo`` object.
result = newStringOfCap(128)

View File

@@ -55,7 +55,7 @@ proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
@@ -80,7 +80,9 @@ proc newPool*(
pool.state = Connected
result = pool
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
@@ -102,7 +104,9 @@ proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(pool: TransportPool, transp: StreamTransport) =
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
@@ -118,7 +122,9 @@ proc release*(pool: TransportPool, transp: StreamTransport) =
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(pool: TransportPool) {.async.} =
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
@@ -130,7 +136,9 @@ proc join*(pool: TransportPool) {.async.} =
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(pool: TransportPool) {.async.} =
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing

View File

@@ -10,12 +10,14 @@
{.push raises: [].}
import chronos
import stew/results
import results
import peerid, stream/connection, transports/transport
export results
type Dial* = ref object of RootObj
type
Dial* = ref object of RootObj
DialFailedError* = object of LPError
method connect*(
self: Dial,
@@ -24,28 +26,28 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async, base.} =
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method connect*(
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async, base.} =
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method dial*(
self: Dial, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dial*(
self: Dial,
@@ -53,17 +55,19 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")
): Future[Opt[MultiAddress]] {.
base, async: (raises: [DialFailedError, CancelledError])
.} =
doAssert(false, "[Dial.tryDial] abstract method not implemented!")

View File

@@ -9,8 +9,7 @@
import std/tables
import stew/results
import pkg/[chronos, chronicles, metrics]
import pkg/[chronos, chronicles, metrics, results]
import
dial,
@@ -36,16 +35,13 @@ declareCounter(libp2p_total_dial_attempts, "total attempted dials")
declareCounter(libp2p_successful_dials, "dialed successful peers")
declareCounter(libp2p_failed_dials, "failed dials")
type
DialFailedError* = object of LPError
Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
type Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
@@ -53,7 +49,7 @@ proc dialAndUpgrade(
hostname: string,
address: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
@@ -105,7 +101,9 @@ proc dialAndUpgrade(
proc expandDnsAddr(
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
if not DNSADDR.matchPartial(address):
return @[(address, peerId)]
if isNil(self.nameResolver):
@@ -115,7 +113,10 @@ proc expandDnsAddr(
let
toResolve =
if peerId.isSome:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
try:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
except ResultError[void]:
raiseAssert "checked with if"
else:
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
@@ -123,16 +124,22 @@ proc expandDnsAddr(
for resolvedAddress in resolved:
let lastPart = resolvedAddress[^1].tryGet()
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
let
var peerIdBytes: seq[byte]
try:
peerIdBytes = lastPart.protoArgument().tryGet()
addrPeerId = PeerId.init(peerIdBytes).tryGet()
except ResultError[string] as e:
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
else:
result.add((resolvedAddress, peerId))
proc dialAndUpgrade(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.async.} =
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
for rawAddress in addrs:
@@ -169,47 +176,69 @@ proc internalConnect(
forceDial: bool,
reuseConnection = true,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
raise newException(DialFailedError, "internalConnect can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
try:
await lock.acquire()
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
await lock.acquire()
defer:
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
except CatchableError as exc:
trace "Failed to finish outgoung upgrade", description = exc.msg
await muxed.close()
raise exc
return muxed
finally:
if lock.locked():
lock.release()
except AsyncLockError as e:
raiseAssert "lock must have been acquired in line above: " & e.msg
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot =
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CancelledError as exc:
slot.release()
raise exc
except CatchableError as exc:
slot.release()
raise newException(
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(
DialFailedError, "Unable to establish outgoing link in internalConnect"
)
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
return muxed
except CancelledError as exc:
await muxed.close()
raise exc
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(
DialFailedError,
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
exc,
)
method connect*(
self: Dialer,
@@ -218,7 +247,7 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async.} =
) {.async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
@@ -231,7 +260,7 @@ method connect*(
method connect*(
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async.} =
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
parseFullAddress(address).toOpt().withValue(fullAddress):
@@ -241,7 +270,7 @@ method connect*(
if allowUnknownPeerId == false:
raise newException(
DialFailedError, "Address without PeerID and unknown peer id disabled!"
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
)
return
@@ -249,18 +278,18 @@ method connect*(
proc negotiateStream(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CatchableError]).} =
trace "Negotiating stream", conn, protos
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
return conn
method tryDial*(
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async.} =
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.
@@ -270,27 +299,37 @@ method tryDial*(
try:
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if mux.isNil():
raise newException(DialFailedError, "No valid multiaddress")
raise newException(DialFailedError, "No valid multiaddress in tryDial")
await mux.close()
return mux.connection.observedAddr
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
trace "Dialing (existing)", peerId, protos
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(
DialFailedError,
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", description = exc.msg
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
method dial*(
self: Dialer,
@@ -298,7 +337,7 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
@@ -307,7 +346,7 @@ method dial*(
conn: Muxer
stream: Connection
proc cleanup() {.async.} =
proc cleanup() {.async: (raises: []).} =
if not (isNil(stream)):
await stream.closeWithEOF()
@@ -321,17 +360,20 @@ method dial*(
stream = await self.connManager.getStream(conn)
if isNil(stream):
raise newException(DialFailedError, "Couldn't get muxed stream")
raise newException(
DialFailedError,
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", conn
trace "Dial canceled", conn, description = exc.msg
await cleanup()
raise exc
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
await cleanup()
raise exc
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
import chronos, chronicles, results
import ../errors
type
@@ -38,8 +38,7 @@ proc add*[T](pa: var PeerAttributes, value: T) =
Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
,
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
)
)
@@ -60,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr:
raise newException(KeyError, "Attritute not found")
raise newException(KeyError, "Attribute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -80,16 +79,21 @@ type
advertisementUpdated*: AsyncEvent
advertiseLoop*: Future[void]
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
doAssert(false, "Not implemented!")
method advertise*(self: DiscoveryInterface) {.async, base.} =
doAssert(false, "Not implemented!")
type
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
AdvertiseError* = object of DiscoveryError
method request*(
self: DiscoveryInterface, pa: PeerAttributes
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
method advertise*(
self: DiscoveryInterface
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
type
DiscoveryQuery* = ref object
attr: PeerAttributes
peers: AsyncQueue[PeerAttributes]
@@ -109,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
try:
query.peers.putNoWait(pa)
except AsyncQueueFullError as exc:
debug "Cannot push discovered peer to queue"
debug "Cannot push discovered peer to queue", description = exc.msg
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
@@ -138,7 +142,9 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
## peer attritubtes are available through the variable
## `peer`
proc forEachInternal(q: DiscoveryQuery) {.async.} =
proc forEachInternal(
q: DiscoveryQuery
) {.async: (raises: [CancelledError, DiscoveryError]).} =
while true:
let peer {.inject.} =
try:
@@ -163,7 +169,11 @@ proc stop*(dm: DiscoveryManager) =
continue
i.advertiseLoop.cancel()
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
proc getPeer*(
query: DiscoveryQuery
): Future[PeerAttributes] {.
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
.} =
let getter = query.peers.popFirst()
try:

View File

@@ -23,15 +23,17 @@ type
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
var namespace = ""
method request*(
self: RendezVousInterface, pa: PeerAttributes
) {.async: (raises: [DiscoveryError, CancelledError]).} =
var namespace = Opt.none(string)
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = string attr.to(RdvNamespace)
namespace = Opt.some(string attr.to(RdvNamespace))
elif attr.ofType(DiscoveryService):
namespace = string attr.to(DiscoveryService)
namespace = Opt.some(string attr.to(DiscoveryService))
elif attr.ofType(PeerId):
namespace = $attr.to(PeerId)
namespace = Opt.some($attr.to(PeerId))
else:
# unhandled type
return
@@ -42,13 +44,15 @@ method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
peer.add(DiscoveryService(namespace.get()))
peer.add(RdvNamespace(namespace.get()))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)
method advertise*(self: RendezVousInterface) {.async.} =
method advertise*(
self: RendezVousInterface
) {.async: (raises: [CancelledError, AdvertiseError]).} =
while true:
var toAdvertise: seq[string]
for attr in self.toAdvertise:

View File

@@ -26,7 +26,7 @@ import
errors,
utility
import stew/[base58, base32, endians2]
export results, minprotobuf, vbuffer, errors, utility
export results, vbuffer, errors, utility
logScope:
topics = "libp2p multiaddress"
@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
## IPv6 validateBuffer() implementation.
pathValidateBufferNoSlash(vb)
proc memoryStB(s: string, vb: var VBuffer): bool =
## Memory stringToBuffer() implementation.
pathStringToBuffer(s, vb)
proc memoryBtS(vb: var VBuffer, s: var string): bool =
## Memory bufferToString() implementation.
pathBufferToString(vb, s)
proc memoryVB(vb: var VBuffer): bool =
## Memory validateBuffer() implementation.
pathValidateBuffer(vb)
proc portStB(s: string, vb: var VBuffer): bool =
## Port number stringToBuffer() implementation.
var port: array[2, byte]
@@ -355,6 +367,10 @@ const
)
TranscoderDNS* =
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
TranscoderMemory* = Transcoder(
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
)
ProtocolsList = [
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
@@ -393,6 +409,9 @@ const
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
MAProtocol(
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
),
]
DNSANY* = mapEq("dns")
@@ -453,6 +472,8 @@ const
CircuitRelay* = mapEq("p2p-circuit")
Memory* = mapEq("memory")
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
for item in ProtocolsList:
result[item.mcodec] = item

View File

@@ -16,7 +16,8 @@
{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
import results
import stew/[base32, base58, base64]
type
MultiBaseStatus* {.pure.} = enum

View File

@@ -10,10 +10,11 @@
## This module implements MultiCodec.
{.push raises: [].}
{.used.}
import tables, hashes
import vbuffer
import stew/results
import results
export results
## List of officially supported codecs can BE found here
@@ -396,6 +397,7 @@ const MultiCodecList = [
("onion3", 0x01BD),
("p2p-circuit", 0x0122),
("libp2p-peer-record", 0x0301),
("memory", 0x0309),
("dns", 0x35),
("dns4", 0x36),
("dns6", 0x37),
@@ -403,6 +405,7 @@ const MultiCodecList = [
# IPLD formats
("dag-pb", 0x70),
("dag-cbor", 0x71),
("libp2p-key", 0x72),
("dag-json", 0x129),
("git-raw", 0x78),
("eth-block", 0x90),

View File

@@ -22,12 +22,13 @@
## 2. MURMUR
{.push raises: [].}
{.used.}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
import varint, vbuffer, multicodec, multibase
import stew/base58
import stew/results
import results
export results
# This is workaround for Nim `import` bug.
export sha, sha2, keccak, blake2, hash, utils
@@ -566,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
## Create MultiHash from BASE58 encoded string representation ``data``.
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format")
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
if len(a) != len(b):

View File

@@ -87,7 +87,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
raise exc
except LPStreamError as exc:
await s.conn.close()
raise exc
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
method closed*(s: LPChannel): bool =
s.closedLocal
@@ -169,6 +169,7 @@ method readOnce*(
## channel must not be done from within a callback / read handler of another
## or the reads will lock each other.
if s.remoteReset:
trace "reset stream in readOnce", s
raise newLPStreamResetError()
if s.localReset:
raise newLPStreamClosedError()
@@ -201,6 +202,7 @@ proc prepareWrite(
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.remoteReset:
trace "stream is reset when prepareWrite", s
raise newLPStreamResetError()
if s.closedLocal:
raise newLPStreamClosedError()

View File

@@ -247,7 +247,7 @@ method close*(m: Mplex) {.async: (raises: []).} =
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
for c in m.channels[false].values:
result.add(c)
for c in m.channels[true].values:

View File

@@ -52,7 +52,7 @@ method newStream*(
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("Not implemented!")
raiseAssert("[Muxer.newStream] abstract method not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
@@ -67,5 +67,5 @@ proc new*(
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base.} =
raiseAssert("Not implemented!")
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
raiseAssert("[Muxer.getStreams] abstract method not implemented!")

View File

@@ -82,8 +82,7 @@ proc `$`(header: YamuxHeader): string =
if a != "":
a & ", " & $b
else:
$b
,
$b,
"",
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
"}"
@@ -176,8 +175,7 @@ proc `$`(channel: YamuxChannel): string =
if a != "":
a & ", " & b
else:
b
,
b,
"",
) & "}"
@@ -205,6 +203,7 @@ proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedRemotely.isSet():
channel.closedRemotely.fire()
await channel.actuallyClose()
channel.isClosedRemotely = true
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedLocally:
@@ -270,10 +269,13 @@ method readOnce*(
if channel.isReset:
raise
if channel.remoteReset:
trace "stream is remote reset when readOnce", channel = $channel
newLPStreamResetError()
elif channel.closedLocally:
trace "stream is closed locally when readOnce", channel = $channel
newLPStreamClosedError()
else:
trace "stream is down when readOnce", channel = $channel
newLPStreamConnDownError()
if channel.isEof:
raise newLPStreamRemoteClosedError()
@@ -397,6 +399,7 @@ method write*(
##
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
trace "stream is reset when write", channel = $channel
result.fail(newLPStreamResetError())
return result
if channel.closedLocally or channel.isReset:
@@ -509,7 +512,14 @@ method close*(m: Yamux) {.async: (raises: []).} =
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
await channel.reset(isLocal = true)
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.sendWindow = 0
channel.closedLocally = true
channel.opened = false
await channel.remoteClosed()
channel.receivedData.fire()
try:
await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CancelledError as exc:
@@ -584,10 +594,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
let channel =
try:
m.channels[header.streamId]
except KeyError:
except KeyError as e:
raise newException(
YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId,
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
e.msg,
e,
)
if header.msgType == WindowUpdate:
@@ -632,7 +644,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
for c in m.channels.values:
result.add(c)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -10,7 +10,7 @@
{.push raises: [].}
import
std/[streams, strutils, sets, sequtils],
std/[streams, sets, sequtils],
chronos,
chronicles,
stew/byteutils,
@@ -39,24 +39,32 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
var buf = newSeq[byte](dataLen)
discard requestStream.readData(addr buf[0], dataLen)
return buf
except CatchableError as exc:
buf
except IOError as exc:
info "Failed to created DNS buffer", description = exc.msg
return newSeq[byte](0)
newSeq[byte](0)
except OSError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
except ValueError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress, address: string, kind: QKind
): Future[Response] {.async.} =
): Future[Response] {.
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
.} =
var sendBuf = questionToBuf(address, kind)
if sendBuf.len == 0:
raise newException(ValueError, "Incorrect DNS query")
let receivedDataFuture = newFuture[void]()
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
proc datagramDataReceived(
transp: DatagramTransport, raddr: TransportAddress
): Future[void] {.async, closure.} =
): Future[void] {.async: (raises: []).} =
receivedDataFuture.complete()
let sock =
@@ -68,27 +76,41 @@ proc getDnsResponse(
try:
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
await receivedDataFuture or sleepAsync(5.seconds) #unix default
if not receivedDataFuture.finished:
raise newException(IOError, "DNS server timeout")
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError as e:
raise newException(IOError, "DNS server timeout: " & e.msg, e)
let rawResponse = sock.getMessage()
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return exceptionToAssert:
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
except OSError as exc:
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
except ValueError as exc:
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert "Exception parsing DN response: " & exc.msg
finally:
await sock.closeWait()
method resolveIp*(
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
var responseFutures: seq[Future[Response]]
var responseFutures: seq[
Future[Response].Raising(
[CancelledError, IOError, OSError, TransportError, ValueError]
)
]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
responseFutures.add(getDnsResponse(server, address, A))
@@ -103,23 +125,32 @@ method resolveIp*(
var
resolvedAddresses: OrderedSet[string]
resolveFailed = false
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
for fut in responseFutures:
try:
let resp = await fut
for answer in resp.answers:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(exceptionToAssert(answer.toString()))
resolvedAddresses.incl(answer.toString())
except CancelledError as e:
raise e
except ValueError as e:
info "Invalid DNS query", address, error = e.msg
return @[]
except CatchableError as e:
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
if resolveFailed:
self.nameServers.add(self.nameServers[0])
@@ -132,27 +163,39 @@ method resolveIp*(
debug "Failed to resolve address, returning empty set"
return @[]
method resolveTxt*(self: DnsResolver, address: string): Future[seq[string]] {.async.} =
method resolveTxt*(
self: DnsResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
try:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
let response = await getDnsResponse(server, address, TXT)
return exceptionToAssert:
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except CatchableError as e:
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
try:
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
return response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except ValueError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
debug "Failed to resolve TXT, returning empty set"
return @[]

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -25,17 +25,25 @@ type MockResolver* = ref object of NameResolver
method resolveIp*(
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
var res: seq[TransportAddress]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, false)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, true)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
method resolveTxt*(self: MockResolver, address: string): Future[seq[string]] {.async.} =
return self.txtResponses.getOrDefault(address)
res
method resolveTxt*(
self: MockResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
self.txtResponses.getOrDefault(address)
proc new*(T: typedesc[MockResolver]): T =
T()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -9,7 +9,7 @@
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import std/[sets, sequtils, strutils]
import chronos, chronicles, stew/endians2
import ".."/[multiaddress, multicodec]
@@ -20,19 +20,17 @@ type NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver, address: string
): Future[seq[string]] {.async, base.} =
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
## Get TXT record
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
method resolveIp*(
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async, base.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError]), base
.} =
## Resolve the specified address
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
proc getHostname*(ma: MultiAddress): string =
let
@@ -46,30 +44,40 @@ proc getHostname*(ma: MultiAddress): string =
proc resolveOneAddress(
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
): Future[seq[MultiAddress]] {.async.} =
#Resolve a single address
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
# Resolve a single address
let portPart = ma[1].valueOr:
raise maErr error
var pbuf: array[2, byte]
var dnsval = getHostname(ma)
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
raise newException(MaError, "Incorrect port number")
let plen = portPart.protoArgument(pbuf).valueOr:
raise maErr error
if plen == 0:
raise maErr "Incorrect port number"
let
port = Port(fromBytesBE(uint16, pbuf))
dnsval = getHostname(ma)
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.tryGet()):
continue
createdAddress &= part.tryGet()
createdAddress
resolvedAddresses.mapIt:
let address = MultiAddress.init(it).valueOr:
raise maErr error
var createdAddress = address[0].valueOr:
raise maErr error
for part in ma:
let part = part.valueOr:
raise maErr error
if DNS.match(part):
continue
createdAddress &= part
createdAddress
proc resolveDnsAddr*(
self: NameResolver, ma: MultiAddress, depth: int = 0
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
@@ -78,54 +86,67 @@ proc resolveDnsAddr*(
info "Stopping DNSADDR recursion, probably malicious", ma
return @[]
var dnsval = getHostname(ma)
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
let
dnsval = getHostname(ma)
txt = await self.resolveTxt("_dnsaddr." & dnsval)
trace "txt entries", txt
var result: seq[MultiAddress]
const codec = multiCodec("p2p")
let maCodec = block:
let hasCodec = ma.contains(codec).valueOr:
raise maErr error
if hasCodec:
ma[codec]
else:
(static(default(MaResult[MultiAddress])))
var res: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="):
continue
let entryValue = MultiAddress.init(entry[8 ..^ 1]).tryGet()
if entryValue.contains(multiCodec("p2p")).tryGet() and
ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
let
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
raise maErr error
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
raise maErr error
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
continue
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
result.add(r)
res.add(r)
if result.len == 0:
if res.len == 0:
debug "Failed to resolve a DNSADDR", ma
return @[]
return result
res
proc resolveMAddress*(
self: NameResolver, address: MultiAddress
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):
res.incl(address)
else:
let code = address[0].tryGet().protoCode().tryGet()
let seq =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
assert false
@[address]
for ad in seq:
let
firstPart = address[0].valueOr:
raise maErr error
code = firstPart.protoCode().valueOr:
raise maErr error
ads =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
raise maErr("Unsupported codec " & $code)
for ad in ads:
res.incl(ad)
return res.toSeq
res.toSeq

View File

@@ -11,10 +11,12 @@
{.push raises: [].}
{.push public.}
{.used.}
import
std/[hashes, strutils],
stew/[base58, results],
stew/base58,
results,
chronicles,
nimcrypto/utils,
utility,

View File

@@ -0,0 +1,335 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import base64, json, strutils, uri, times
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
import ../peerinfo, ../crypto/crypto, ../varint.nim
logScope:
topics = "libp2p peeridauth"
const
NimLibp2pUserAgent = "nim-libp2p"
PeerIDAuthPrefix* = "libp2p-PeerID"
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
ChallengeDefaultLen = 48
type PeerIDAuthClient* = ref object of RootObj
session: HttpSessionRef
rng: ref HmacDrbgContext
type PeerIDAuthError* = object of LPError
type PeerIDAuthResponse* = object
status*: int
headers*: HttpTable
body*: seq[byte]
type BearerToken* = object
token*: string
expires*: Opt[DateTime]
type PeerIDAuthOpaque* = string
type PeerIDAuthSignature* = string
type PeerIDAuthChallenge* = string
type PeerIDAuthAuthenticationResponse* = object
challengeClient*: PeerIDAuthChallenge
opaque*: PeerIDAuthOpaque
serverPubkey*: PublicKey
type PeerIDAuthAuthorizationResponse* = object
sig*: PeerIDAuthSignature
bearer*: BearerToken
response*: PeerIDAuthResponse
type SigParam = object
k: string
v: seq[byte]
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
proc sampleChar(
ctx: var HmacDrbgContext, choices: string
): char {.raises: [ValueError].} =
## Samples a random character from the input string using the DRBG context
if choices.len == 0:
raise newException(ValueError, "Cannot sample from an empty string")
var idx: uint32
ctx.generate(idx)
return choices[uint32(idx mod uint32(choices.len))]
proc randomChallenge(
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
var rng = rng[]
var challenge = ""
try:
for _ in 0 ..< challengeLen:
challenge.add(rng.sampleChar(ChallengeCharset))
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
PeerIDAuthChallenge(challenge)
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
# Helper to extract quoted value from key
for segment in data.split(","):
if key in segment:
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
proc genDataToSign(
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
): seq[byte] {.raises: [PeerIDAuthError].} =
var buf: seq[byte] = prefix.toByteSeq()
for p in parts:
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
raise newException(PeerIDAuthError, "could not encode fields length to varint")
buf.add varintLen
buf.add (p.k & "=").toByteSeq()
buf.add p.v
return buf
proc getSigParams(
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
): seq[SigParam] =
if clientSender:
@[
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
]
else:
@[
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
]
proc sign(
privateKey: PrivateKey,
challenge: PeerIDAuthChallenge,
publicKey: PublicKey,
hostname: string,
clientSender: bool = true,
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
PeerIDAuthSignature(
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
)
proc checkSignature*(
serverSig: PeerIDAuthSignature,
serverPublicKey: PublicKey,
challengeServer: PeerIDAuthChallenge,
clientPublicKey: PublicKey,
hostname: string,
): bool {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
var serverSignature: Signature
try:
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
raise newException(
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
)
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
serverSignature.verify(
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
)
method post*(
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
let rawResponse = await HttpClientRequestRef
.post(
self.session,
uri,
body = payload,
headers = [
("Content-Type", "application/json"),
("User-Agent", NimLibp2pUserAgent),
("Authorization", authHeader),
],
)
.get()
.send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
method get*(
self: PeerIDAuthClient, uri: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
proc requestAuthentication*(
self: PeerIDAuthClient, uri: Uri
): Future[PeerIDAuthAuthenticationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let response =
try:
await self.get($uri)
except HttpError as exc:
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
if wwwAuthenticate == "":
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
let serverPubkey: PublicKey =
try:
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
PeerIDAuthAuthenticationResponse(
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
opaque: extractField(wwwAuthenticate, "opaque"),
serverPubkey: serverPubkey,
)
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
try:
pubkey.getBytes().valueOr:
raise
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
except ValueError as exc:
raise newException(
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
)
proc parse3339DateTime(
timeStr: string
): DateTime {.raises: [ValueError, TimeParseError].} =
let parts = timeStr.split('.')
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
let millis = parseInt(parts[1].strip(chars = {'Z'}))
result = base + initDuration(milliseconds = millis)
proc requestAuthorization*(
self: PeerIDAuthClient,
peerInfo: PeerInfo,
uri: Uri,
challengeClient: PeerIDAuthChallenge,
challengeServer: PeerIDAuthChallenge,
serverPubkey: PublicKey,
opaque: PeerIDAuthOpaque,
payload: auto,
): Future[PeerIDAuthAuthorizationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
let authHeader =
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
let response =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
)
let authenticationInfo = response.headers.getString("authentication-info")
let bearerExpires =
try:
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
except ValueError, PeerIDAuthError, TimeParseError:
Opt.none(DateTime)
PeerIDAuthAuthorizationResponse(
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
bearer: BearerToken(
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
),
response: response,
)
proc sendWithoutBearer(
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
# Authenticate in three ways as per the PeerID Auth spec
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
let authenticationResponse = await self.requestAuthentication(uri)
let challengeServer = self.rng.randomChallenge()
let authorizationResponse = await self.requestAuthorization(
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
)
if not checkSignature(
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
peerInfo.publicKey, uri.hostname,
):
raise newException(PeerIDAuthError, "Failed to validate server's signature")
return (authorizationResponse.bearer, authorizationResponse.response)
proc sendWithBearer(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken,
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
raise newException(PeerIDAuthError, "Bearer expired")
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
let response =
try:
await self.post($uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
)
return (bearer, response)
proc send*(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken = BearerToken(),
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.token == "":
await self.sendWithoutBearer(uri, peerInfo, payload)
else:
await self.sendWithBearer(uri, peerInfo, payload, bearer)
proc close*(
self: PeerIDAuthClient
): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,41 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos, chronos/apps/http/httpclient
import ../crypto/crypto
import ./client
export client
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
mockedStatus*: int
mockedHeaders*: HttpTable
mockedBody*: seq[byte]
proc new*(
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
method post*(
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)
method get*(
self: MockPeerIDAuthClient, uri: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)

View File

@@ -11,7 +11,7 @@
{.push public.}
import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import pkg/[chronos, chronicles, results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
export peerid, multiaddress, crypto, routing_record, errors, results
@@ -22,7 +22,7 @@ type
PeerInfoError* = object of LPError
AddressMapper* = proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.
gcsafe, raises: []
gcsafe, async: (raises: [CancelledError])
.} ## A proc that expected to resolve the listen addresses into dialable addresses
PeerInfo* {.public.} = ref object
@@ -52,7 +52,7 @@ func shortLog*(p: PeerInfo): auto =
chronicles.formatIt(PeerInfo):
shortLog(it)
proc update*(p: PeerInfo) {.async.} =
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.
@@ -101,8 +101,10 @@ proc new*(
let pubkey =
try:
key.getPublicKey().tryGet()
except CatchableError:
raise newException(PeerInfoError, "invalid private key")
except CatchableError as e:
raise newException(
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
)
let peerId = PeerId.init(key).tryGet()

View File

@@ -63,6 +63,7 @@ type
KeyBook* {.public.} = ref object of PeerBook[PublicKey]
AgentBook* {.public.} = ref object of PeerBook[string]
LastSeenBook* {.public.} = ref object of PeerBook[Opt[MultiAddress]]
ProtoVersionBook* {.public.} = ref object of PeerBook[string]
SPRBook* {.public.} = ref object of PeerBook[Envelope]
@@ -145,18 +146,24 @@ proc del*(peerStore: PeerStore, peerId: PeerId) {.public.} =
for _, book in peerStore.books:
book.deletor(peerId)
proc updatePeerInfo*(peerStore: PeerStore, info: IdentifyInfo) =
if info.addrs.len > 0:
proc updatePeerInfo*(
peerStore: PeerStore,
info: IdentifyInfo,
observedAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
) =
if len(info.addrs) > 0:
peerStore[AddressBook][info.peerId] = info.addrs
peerStore[LastSeenBook][info.peerId] = observedAddr
info.pubkey.withValue(pubkey):
peerStore[KeyBook][info.peerId] = pubkey
info.agentVersion.withValue(agentVersion):
peerStore[AgentBook][info.peerId] = agentVersion.string
peerStore[AgentBook][info.peerId] = agentVersion
info.protoVersion.withValue(protoVersion):
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
peerStore[ProtoVersionBook][info.peerId] = protoVersion
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos
@@ -181,7 +188,16 @@ proc cleanup*(peerStore: PeerStore, peerId: PeerId) =
peerStore.del(peerStore.toClean[0])
peerStore.toClean.delete(0)
proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
proc identify*(
peerStore: PeerStore, muxer: Muxer
) {.
async: (
raises: [
CancelledError, IdentityNoMatchError, IdentityInvalidMsgError, MultiStreamError,
LPStreamError, MuxerError,
]
)
.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
@@ -200,7 +216,7 @@ proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
peerStore.updatePeerInfo(info, stream.observedAddr)
finally:
await stream.closeWithEOF()

View File

@@ -11,13 +11,11 @@
{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
import ../varint, ../utility, stew/endians2, results
export results, utility
{.push public.}
const MaxMessageSize = 1'u shl 22
type
ProtoFieldKind* = enum
## Protobuf's field types enum
@@ -39,7 +37,6 @@ type
buffer*: seq[byte]
offset*: int
length*: int
maxSize*: uint
ProtoHeader* = object
wire*: ProtoFieldKind
@@ -63,7 +60,6 @@ type
VarintDecode
MessageIncomplete
BufferOverflow
MessageTooBig
BadWireType
IncorrectBlob
RequiredFieldMissing
@@ -99,11 +95,14 @@ template getProtoHeader*(field: ProtoField): uint64 =
template toOpenArray*(pb: ProtoBuffer): untyped =
toOpenArray(pb.buffer, pb.offset, len(pb.buffer) - 1)
template lenu64*(x: untyped): untyped =
uint64(len(x))
template isEmpty*(pb: ProtoBuffer): bool =
len(pb.buffer) - pb.offset <= 0
template isEnough*(pb: ProtoBuffer, length: int): bool =
len(pb.buffer) - pb.offset - length >= 0
template isEnough*(pb: ProtoBuffer, length: uint64): bool =
pb.offset <= len(pb.buffer) and length <= uint64(len(pb.buffer) - pb.offset)
template getPtr*(pb: ProtoBuffer): pointer =
cast[pointer](unsafeAddr pb.buffer[pb.offset])
@@ -127,33 +126,25 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
0
proc initProtoBuffer*(
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
result.buffer = data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
data: openArray[byte],
offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize,
data: openArray[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
): ProtoBuffer =
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
result.buffer = newSeq[byte]()
result.options = options
result.maxSize = maxSize
if WithVarintLength in options:
# Our buffer will start from position 10, so we can store length of buffer
# in [0, 9].
@@ -194,12 +185,12 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -242,12 +233,12 @@ proc writePacked*[T: ProtoScalar](
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -268,7 +259,7 @@ proc write*[T: byte | char](pb: var ProtoBuffer, field: int, value: openArray[T]
doAssert(lres.isOk())
pb.offset += length
if len(value) > 0:
doAssert(pb.isEnough(len(value)))
doAssert(pb.isEnough(value.lenu64))
copyMem(addr pb.buffer[pb.offset], unsafeAddr value[0], len(value))
pb.offset += len(value)
@@ -327,13 +318,13 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed32:
if data.isEnough(sizeof(uint32)):
if data.isEnough(uint64(sizeof(uint32))):
data.offset += sizeof(uint32)
ok()
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed64:
if data.isEnough(sizeof(uint64)):
if data.isEnough(uint64(sizeof(uint64))):
data.offset += sizeof(uint64)
ok()
else:
@@ -343,14 +334,11 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
var bsize = 0'u64
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.StartGroup, ProtoFieldKind.EndGroup:
@@ -382,7 +370,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.VarintDecode)
elif T is float32:
doAssert(header.wire == ProtoFieldKind.Fixed32)
if data.isEnough(sizeof(float32)):
if data.isEnough(uint64(sizeof(float32))):
outval = cast[float32](fromBytesLE(uint32, data.toOpenArray()))
data.offset += sizeof(float32)
ok()
@@ -390,7 +378,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.MessageIncomplete)
elif T is float64:
doAssert(header.wire == ProtoFieldKind.Fixed64)
if data.isEnough(sizeof(float64)):
if data.isEnough(uint64(sizeof(float64))):
outval = cast[float64](fromBytesLE(uint64, data.toOpenArray()))
data.offset += sizeof(float64)
ok()
@@ -410,22 +398,19 @@ proc getValue[T: byte | char](
outLength = 0
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
if data.isEnough(bsize):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
@@ -439,17 +424,14 @@ proc getValue[T: seq[byte] | string](
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import stew/results
import results
import chronos, chronicles
import ../../../switch, ../../../multiaddress, ../../../peerid
import core
@@ -19,7 +19,9 @@ logScope:
type AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -30,7 +32,9 @@ method dialMe*(
switch: Switch,
pid: PeerId,
addrs: seq[MultiAddress] = newSeq[MultiAddress](),
): Future[MultiAddress] {.base, async.} =
): Future[MultiAddress] {.
base, async: (raises: [AutonatError, AutonatUnreachableError, CancelledError])
.} =
proc getResponseOrRaise(
autonatMsg: Opt[AutonatMsg]
): AutonatDialResponse {.raises: [AutonatError].} =
@@ -47,7 +51,9 @@ method dialMe*(
await switch.dial(pid, @[AutonatCodec])
else:
await switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
except CancelledError as err:
raise err
except DialFailedError as err:
raise
newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
@@ -61,14 +67,37 @@ method dialMe*(
incomingConnection.cancel()
# Safer to always try to cancel cause we aren't sure if the peer dialled us or not
if incomingConnection.completed():
await (await incomingConnection).connection.close()
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
try:
await (await incomingConnection).connection.close()
except AlreadyExpectingConnectionError as e:
# this err is already handled above and could not happen later
error "Unexpected error", description = e.msg
try:
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "Sending dial failed", e)
var respBytes: seq[byte]
try:
respBytes = await conn.readLp(1024)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
return
case response.status
of ResponseStatus.Ok:
response.ma.tryGet()
try:
response.ma.tryGet()
except ResultError[void]:
raiseAssert("checked with if")
of ResponseStatus.DialError:
raise newException(
AutonatUnreachableError, "Peer could not dial us back: " & response.text.get("")

View File

@@ -9,9 +9,10 @@
{.push raises: [].}
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import ../../../multiaddress, ../../../peerid, ../../../errors
import ../../../protobuf/minprotobuf
logScope:
topics = "libp2p autonat"

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/results
import results
import chronos, chronicles
import
../../protocol,
@@ -32,7 +32,9 @@ type Autonat* = ref object of LPProtocol
switch*: Switch
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [LPStreamError, CancelledError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -40,28 +42,37 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
proc sendResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: status,
text:
if text == "":
Opt.none(string)
else:
Opt.some(text)
,
Opt.some(text),
ma: Opt.none(MultiAddress),
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response error", description = exc.msg, conn
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
proc sendResponseOk(
conn: Connection, ma: MultiAddress
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok, text: Opt.some("Ok"), ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response ok", description = exc.msg, conn
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
proc tryDial(
autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]
) {.async: (raises: [DialFailedError, CancelledError]).} =
await autonat.sem.acquire()
var futs: seq[Future[Opt[MultiAddress]]]
var futs: seq[Future[Opt[MultiAddress]].Raising([DialFailedError, CancelledError])]
try:
# This is to bypass the per peer max connections limit
let outgoingConnection =
@@ -72,7 +83,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
return
# Safer to always try to cancel cause we aren't sure if the connection was established
defer:
outgoingConnection.cancel()
outgoingConnection.cancelSoon()
# tryDial is to bypass the global max connections limit
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
@@ -89,9 +101,6 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
except AsyncTimeoutError as exc:
debug "Dial timeout", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Dial timeout")
except CatchableError as exc:
debug "Unexpected error", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Unexpected error")
finally:
autonat.sem.release()
for f in futs:
@@ -155,7 +164,9 @@ proc new*(
): T =
let autonat =
T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")
@@ -163,6 +174,7 @@ proc new*(
raise newException(AutonatError, "Message type should be dial")
await autonat.handleDial(conn, msg)
except CancelledError as exc:
trace "cancelled autonat handler"
raise exc
except CatchableError as exc:
debug "exception in autonat handler", description = exc.msg, conn

View File

@@ -50,7 +50,7 @@ type
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatService],
@@ -79,7 +79,7 @@ proc new*(
enableAddressMapper: enableAddressMapper,
)
proc callHandler(self: AutonatService) {.async.} =
proc callHandler(self: AutonatService) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
@@ -92,7 +92,7 @@ proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
proc handleAnswer(
self: AutonatService, ans: NetworkReachability
): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
@@ -127,7 +127,7 @@ proc handleAnswer(
proc askPeer(
self: AutonatService, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async.} =
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
@@ -160,7 +160,9 @@ proc askPeer(
await switch.peerInfo.update()
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
proc askConnectedPeers(
self: AutonatService, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
@@ -175,13 +177,15 @@ proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
proc schedule(
service: AutonatService, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatService run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatService, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -198,10 +202,12 @@ proc addressMapper(
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: AutonatService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
@@ -210,22 +216,30 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
method run*(
self: AutonatService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
method stop*(
self: AutonatService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
info "Stopping AutonatService"
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:

View File

@@ -11,7 +11,7 @@
import std/sequtils
import stew/results
import results
import chronos, chronicles
import core
@@ -34,7 +34,7 @@ proc new*(
proc startSync*(
self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]
) {.async.} =
) {.async: (raises: [DcutrError, CancelledError]).} =
logScope:
peerId = switch.peerInfo.peerId
@@ -107,7 +107,9 @@ proc startSync*(
description = err.msg
raise newException(
DcutrError,
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
err.msg,
err,
)
finally:
if stream != nil:

View File

@@ -15,6 +15,7 @@ import chronos
import stew/objects
import ../../../multiaddress, ../../../errors, ../../../stream/connection
import ../../../protobuf/minprotobuf
export multiaddress
@@ -49,7 +50,9 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrEr
raise newException(DcutrError, "Received malformed message")
return dcutrMsg
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
proc send*(
conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)

View File

@@ -10,8 +10,8 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import core
import
@@ -30,7 +30,9 @@ proc new*(
connectTimeout = 15.seconds,
maxDialableAddrs = 8,
): T =
proc handleStream(stream: Connection, proto: string) {.async.} =
proc handleStream(
stream: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -77,6 +79,7 @@ proc new*(
for fut in futs:
fut.cancel()
except CancelledError as err:
trace "cancelled Dcutr receiver"
raise err
except AllFuturesFailedError as err:
debug "Dcutr receiver could not connect to the remote peer, " &

View File

@@ -29,11 +29,12 @@ const RelayClientMsgSize = 4096
type
RelayClientError* = object of LPError
ReservationError* = object of RelayClientError
RelayV1DialError* = object of RelayClientError
RelayV2DialError* = object of RelayClientError
RelayDialError* = object of DialFailedError
RelayV1DialError* = object of RelayDialError
RelayV2DialError* = object of RelayDialError
RelayClientAddConn* = proc(
conn: Connection, duration: uint32, data: uint64
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
@@ -45,14 +46,21 @@ type
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
proc sendStopError(
conn: Connection, code: StatusV2
) {.async: (raises: [CancelledError]).} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
try:
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "failed to send stop status", description = e.msg
proc handleRelayedConnect(
cl: RelayClient, conn: Connection, msg: StopMessage
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
@@ -80,7 +88,7 @@ proc handleRelayedConnect(
proc reserve*(
cl: RelayClient, peerId: PeerId, addrs: seq[MultiAddress] = @[]
): Future[Rsvp] {.async.} =
): Future[Rsvp] {.async: (raises: [ReservationError, DialFailedError, CancelledError]).} =
let conn = await cl.switch.dial(peerId, addrs, RelayV2HopCodec)
defer:
await conn.close()
@@ -121,7 +129,7 @@ proc reserve*(
proc dialPeerV1*(
cl: RelayClient, conn: Connection, dstPeerId: PeerId, dstAddrs: seq[MultiAddress]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CancelledError, RelayV1DialError]).} =
var
msg = RelayMessage(
msgType: Opt.some(RelayType.Hop),
@@ -138,19 +146,20 @@ proc dialPeerV1*(
await conn.writeLp(pb.buffer)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error writing hop request", description = exc.msg
raise exc
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
let msgRcvFromRelayOpt =
try:
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error reading stop response", description = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
@@ -165,10 +174,16 @@ proc dialPeerV1*(
)
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise newException(
RelayV1DialError,
"Hop can't open destination stream after sendStatus: " & exc.msg,
exc,
)
except ValueError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, exc.msg)
raise newException(
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
)
result = conn
proc dialPeerV2*(
@@ -176,7 +191,7 @@ proc dialPeerV2*(
conn: RelayConnection,
dstPeerId: PeerId,
dstAddrs: seq[MultiAddress],
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [RelayV2DialError, CancelledError]).} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
@@ -191,7 +206,8 @@ proc dialPeerV2*(
raise exc
except CatchableError as exc:
trace "error reading stop response", description = exc.msg
raise newException(RelayV2DialError, exc.msg)
raise
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")
@@ -202,7 +218,9 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
proc handleStopStreamV2(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +232,9 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
trace "Unexpected client / relayv2 handshake", msgType = msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
proc handleStop(
cl: RelayClient, conn: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -241,7 +261,9 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.}
else:
await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
proc handleStreamV1(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -290,7 +312,9 @@ proc new*(
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV1Codec:
@@ -300,6 +324,7 @@ proc new*(
of RelayV2HopCodec:
await cl.handleHopStreamV2(conn)
except CancelledError as exc:
trace "cancelled client handler"
raise exc
except CatchableError as exc:
trace "exception in client handler", description = exc.msg, conn

View File

@@ -10,8 +10,10 @@
{.push raises: [].}
import macros
import stew/[objects, results]
import stew/objects
import results
import ../../../peerinfo, ../../../signed_envelope
import ../../../protobuf/minprotobuf
# Circuit Relay V1 Message

View File

@@ -112,7 +112,9 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async.} =
proc handleReserve(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -133,7 +135,9 @@ proc handleReserve(r: Relay, conn: Connection) {.async.} =
r.rsvp[pid] = expire
await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
proc handleConnect(
r: Relay, connSrc: Connection, msg: HopMessage
) {.async: (raises: [CancelledError, LPStreamError]).} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -166,14 +170,14 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await r.switch.dial(dst, RelayV2StopCodec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendHopStatus(connSrc, ConnectionFailed)
return
defer:
await connDst.close()
proc sendStopMsg() {.async.} =
proc sendStopMsg() {.async: (raises: [SendStopError, CancelledError, LPStreamError]).} =
let stopMsg = StopMessage(
msgType: StopMessageType.Connect,
peer: Opt.some(Peer(peerId: src, addrs: @[])),
@@ -209,7 +213,9 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
proc handleHopStreamV2*(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -225,7 +231,9 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
proc handleHop*(
r: Relay, connSrc: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
r.streamCount.inc()
defer:
r.streamCount.dec()
@@ -271,7 +279,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
await r.switch.dial(dst.peerId, RelayV1Codec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendStatus(connSrc, StatusV1.HopCantDialDst)
return
@@ -309,7 +317,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc handleStreamV1(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -333,9 +343,8 @@ proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc setup*(r: Relay, switch: Switch) =
r.switch = switch
r.switch.addPeerEventHandler(
proc(peerId: PeerId, event: PeerEvent) {.async.} =
r.rsvp.del(peerId)
,
proc(peerId: PeerId, event: PeerEvent) {.async: (raises: [CancelledError]).} =
r.rsvp.del(peerId),
Left,
)
@@ -360,7 +369,9 @@ proc new*(
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV2HopCodec:
@@ -368,6 +379,7 @@ proc new*(
of RelayV1Codec:
await r.handleStreamV1(conn)
except CancelledError as exc:
trace "cancelled relayv2 handler"
raise exc
except CatchableError as exc:
debug "exception in relayv2 handler", description = exc.msg, conn
@@ -383,12 +395,15 @@ proc new*(
r.handler = handleStream
r
proc deletesReservation(r: Relay) {.async.} =
proc deletesReservation(r: Relay) {.async: (raises: [CancelledError]).} =
heartbeat "Reservation timeout", r.heartbeatSleepTime.seconds():
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
try:
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
except KeyError:
raiseAssert "checked with in"
method start*(r: Relay): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()

View File

@@ -29,71 +29,100 @@ type RelayTransport* = ref object of Transport
queue: AsyncQueue[Connection]
selfRunning: bool
method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
method start*(
self: RelayTransport, ma: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
if self.selfRunning:
trace "Relay transport already running"
return
self.client.onNewConnection = proc(
conn: Connection, duration: uint32 = 0, data: uint64 = 0
) {.async.} =
) {.async: (raises: [CancelledError]).} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async.} =
method stop*(self: RelayTransport) {.async: (raises: []).} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
try:
await self.queue.popFirstNoWait().close()
except AsyncQueueEmptyError:
continue # checked with self.queue.empty()
method accept*(self: RelayTransport): Future[Connection] {.async.} =
method accept*(
self: RelayTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
proc dial*(
self: RelayTransport, ma: MultiAddress
): Future[Connection] {.async: (raises: [RelayDialError, CancelledError]).} =
var
relayAddrs: MultiAddress
relayPeerId: PeerId
dstPeerId: PeerId
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Destination doesn't exist")
try:
let sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Destination doesn't exist")
except RelayDialError as e:
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
except CatchableError:
raise newException(RelayDialError, "dial address not valid")
trace "Dial", relayPeerId, dstPeerId
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
var rc: RelayConnection
try:
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
case conn.protocol
of RelayV1Codec:
return await self.client.dialPeerV1(conn, dstPeerId, @[])
of RelayV2HopCodec:
rc = RelayConnection.new(conn, 0, 0)
return await self.client.dialPeerV2(rc, dstPeerId, @[])
except CancelledError as exc:
raise exc
except CatchableError as exc:
if not rc.isNil:
await rc.close()
raise exc
except CancelledError as e:
safeClose(rc)
raise e
except DialFailedError as e:
safeClose(rc)
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
except RelayV1DialError as e:
safeClose(rc)
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
except RelayV2DialError as e:
safeClose(rc)
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
try:
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
except CancelledError as e:
raise e
except CatchableError as e:
raise
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:

View File

@@ -22,12 +22,17 @@ const
proc sendStatus*(
conn: Connection, code: StatusV1
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
) {.async: (raises: [CancelledError]).} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
conn.writeLp(pb.buffer)
try:
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "error sending relay status", description = e.msg
proc sendHopStatus*(
conn: Connection, code: StatusV2
@@ -64,8 +69,8 @@ proc bridge*(
while not connSrc.closed() and not connDst.closed():
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(futSrc, futDst)
except ValueError:
raiseAssert("Futures list is not empty")
except ValueError as e:
raiseAssert("Futures list is not empty: " & e.msg)
if futSrc.finished():
bufRead = await futSrc
if bufRead > 0:

View File

@@ -13,8 +13,7 @@
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import results, chronos, chronicles
import
../protobuf/minprotobuf,
../peerinfo,
@@ -148,12 +147,13 @@ proc new*(
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
await conn.writeLp(pb.buffer)
except CancelledError as exc:
trace "cancelled identify handler"
raise exc
except CatchableError as exc:
trace "exception in identify handler", description = exc.msg, conn
@@ -166,7 +166,12 @@ method init*(p: Identify) =
proc identify*(
self: Identify, conn: Connection, remotePeerId: PeerId
): Future[IdentifyInfo] {.async.} =
): Future[IdentifyInfo] {.
async: (
raises:
[IdentityInvalidMsgError, IdentityNoMatchError, LPStreamError, CancelledError]
)
.} =
trace "initiating identify", conn
var message = await conn.readLp(64 * 1024)
if len(message) == 0:
@@ -205,7 +210,7 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64 * 1024)
@@ -224,6 +229,7 @@ proc init*(p: IdentifyPush) =
if not isNil(p.identifyHandler):
await p.identifyHandler(conn.peerId, identInfo)
except CancelledError as exc:
trace "cancelled identify push handler"
raise exc
except CatchableError as exc:
info "exception in identify push handler", description = exc.msg, conn
@@ -234,7 +240,9 @@ proc init*(p: IdentifyPush) =
p.handler = handle
p.codec = IdentifyPushCodec
proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, public.} =
proc push*(
p: IdentifyPush, peerInfo: PeerInfo, conn: Connection
) {.public, async: (raises: [CancelledError, LPStreamError]).} =
## Send new `peerInfo`s to a connection
var pb = encodeMsg(peerInfo, conn.observedAddr, true)
await conn.writeLp(pb.buffer)

View File

@@ -0,0 +1,3 @@
import ./kademlia/kademlia
export kademlia

View File

@@ -0,0 +1,6 @@
const
IdLength* = 32 # 256-bit IDs
k* = 20 # replication parameter
maxBuckets* = 256
const KadCodec* = "/ipfs/kad/1.0.0"

View File

@@ -0,0 +1,74 @@
import chronos
import chronicles
import ../../peerid
import ./consts
import ./routingtable
import ../protocol
import ../../switch
import ./protobuf
import ../../utils/heartbeat
logScope:
topics = "kad-dht"
type KadDHT* = ref object of LPProtocol
switch: Switch
rng: ref HmacDrbgContext
rtable*: RoutingTable
maintenanceLoop: Future[void]
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
heartbeat "refresh buckets", 10.minutes:
debug "TODO: implement bucket maintenance"
proc new*(
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
): T {.raises: [].} =
var rtable = RoutingTable.init(switch.peerInfo.peerId)
let kad = T(rng: rng, switch: switch, rtable: rtable)
kad.codec = KadCodec
kad.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while not conn.atEof:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
# TODO: handle msg.msgType
except CancelledError as exc:
raise exc
except CatchableError:
error "could not handle request",
peerId = conn.PeerId, err = getCurrentExceptionMsg()
finally:
await conn.close()
return kad
method start*(
kad: KadDHT
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if kad.started:
warn "Starting kad-dht twice"
return fut
kad.maintenanceLoop = kad.maintainBuckets()
kad.started = true
info "kad-dht started"
fut
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
if not kad.started:
return
kad.started = false
kad.maintenanceLoop.cancelSoon()
kad.maintenanceLoop = nil
return

View File

@@ -0,0 +1,48 @@
import ../../peerid
import ./consts
import chronicles
import stew/byteutils
type
KeyType* {.pure.} = enum
Unhashed
Raw
PeerId
Key* = object
case kind*: KeyType
of KeyType.PeerId:
peerId*: PeerId
of KeyType.Raw, KeyType.Unhashed:
data*: array[IdLength, byte]
proc toKey*(s: seq[byte]): Key =
doAssert s.len == IdLength
var data: array[IdLength, byte]
for i in 0 ..< IdLength:
data[i] = s[i]
return Key(kind: KeyType.Raw, data: data)
proc toKey*(p: PeerId): Key =
return Key(kind: KeyType.PeerId, peerId: p)
proc getBytes*(k: Key): seq[byte] =
return
case k.kind
of KeyType.PeerId:
k.peerId.getBytes()
of KeyType.Raw, KeyType.Unhashed:
@(k.data)
template `==`*(a, b: Key): bool =
a.getBytes() == b.getBytes() and a.kind == b.kind
proc shortLog*(k: Key): string =
case k.kind
of KeyType.PeerId:
"PeerId:" & $k.peerId
of KeyType.Raw, KeyType.Unhashed:
$k.kind & ":" & toHex(k.data)
chronicles.formatIt(Key):
shortLog(it)

View File

@@ -0,0 +1,159 @@
import ../../protobuf/minprotobuf
import ../../varint
import ../../utility
import results
import ../../multiaddress
import stew/objects
import stew/assign2
import options
type
Record* {.public.} = object
key*: Option[seq[byte]]
value*: Option[seq[byte]]
timeReceived*: Option[string]
MessageType* = enum
putValue = 0
getValue = 1
addProvider = 2
getProviders = 3
findNode = 4
ping = 5 # Deprecated
ConnectionType* = enum
notConnected = 0
connected = 1
canConnect = 2 # Unused
cannotConnect = 3 # Unused
Peer* {.public.} = object
id*: seq[byte]
addrs*: seq[MultiAddress]
connection*: ConnectionType
Message* {.public.} = object
msgType*: MessageType
key*: Option[seq[byte]]
record*: Option[Record]
closerPeers*: seq[Peer]
providerPeers*: seq[Peer]
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.writeOpt(1, record.key)
pb.writeOpt(2, record.value)
pb.writeOpt(5, record.timeReceived)
pb.finish()
return pb
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, peer.id)
for address in peer.addrs:
pb.write(2, address.data.buffer)
pb.write(3, uint32(ord(peer.connection)))
pb.finish()
return pb
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, uint32(ord(msg.msgType)))
pb.writeOpt(2, msg.key)
msg.record.withValue(record):
pb.writeOpt(3, msg.record)
for peer in msg.closerPeers:
pb.write(8, peer.encode())
for peer in msg.providerPeers:
pb.write(9, peer.encode())
pb.finish()
return pb
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
opt.withValue(v):
pb.write(field, v)
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
pb.write(field, value.encode())
proc getOptionField[T: ProtoScalar | string | seq[byte]](
pb: ProtoBuffer, field: int, output: var Option[T]
): ProtoResult[void] =
var f: T
if ?pb.getField(field, f):
assign(output, some(f))
ok()
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
var r: Record
?pb.getOptionField(1, r.key)
?pb.getOptionField(2, r.value)
?pb.getOptionField(5, r.timeReceived)
return ok(some(r))
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
var
p: Peer
id: seq[byte]
?pb.getRequiredField(1, p.id)
discard ?pb.getRepeatedField(2, p.addrs)
var connVal: uint32
if ?pb.getField(3, connVal):
var connType: ConnectionType
if not checkedEnumAssign(connType, connVal):
return err(ProtoError.BadWireType)
p.connection = connType
return ok(some(p))
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
var
m: Message
key: seq[byte]
recPb: seq[byte]
closerPbs: seq[seq[byte]]
providerPbs: seq[seq[byte]]
var pb = initProtoBuffer(buf)
var msgTypeVal: uint32
?pb.getRequiredField(1, msgTypeVal)
var msgType: MessageType
if not checkedEnumAssign(msgType, msgTypeVal):
return err(ProtoError.BadWireType)
m.msgType = msgType
?pb.getOptionField(2, m.key)
if ?pb.getField(3, recPb):
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
discard ?pb.getRepeatedField(8, closerPbs)
for ppb in closerPbs:
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
peerOpt.withValue(peer):
m.closerPeers.add(peer)
discard ?pb.getRepeatedField(9, providerPbs)
for ppb in providerPbs:
let peer = ?Peer.decode(initProtoBuffer(ppb))
peer.withValue(peer):
m.providerPeers.add(peer)
return ok(some(m))

View File

@@ -0,0 +1,129 @@
import algorithm
import bearssl/rand
import chronos
import chronicles
import ./consts
import ./keys
import ./xordistance
import ../../peerid
import sequtils
logScope:
topics = "kad-dht rtable"
type
NodeEntry* = object
nodeId*: Key
lastSeen*: Moment
Bucket* = object
peers*: seq[NodeEntry]
RoutingTable* = ref object
selfId*: Key
buckets*: seq[Bucket]
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
return RoutingTable(selfId: selfId, buckets: @[])
proc bucketIndex*(selfId, key: Key): int =
return xorDistance(selfId, key).leadingZeros
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
for i, p in bucket.peers:
if p.nodeId == nodeId:
return Opt.some(i)
return Opt.none(int)
proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
if nodeId == rtable.selfId:
return false # No self insertion
let idx = bucketIndex(rtable.selfId, nodeId)
if idx >= maxBuckets:
trace "cannot insert node. max buckets have been reached",
nodeId, bucketIdx = idx, maxBuckets
return false
if idx >= rtable.buckets.len:
# expand buckets lazily if needed
rtable.buckets.setLen(idx + 1)
var bucket = rtable.buckets[idx]
let keyx = peerIndexInBucket(bucket, nodeId)
if keyx.isSome:
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
elif bucket.peers.len < k:
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
else:
# TODO: eviction policy goes here, rn we drop the node
trace "cannot insert node in bucket, dropping node",
nodeId, bucket = k, bucketIdx = idx
return false
rtable.buckets[idx] = bucket
return true
proc insert*(rtable: var RoutingTable, peerId: PeerId): bool =
insert(rtable, peerId.toKey())
proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
var allNodes: seq[Key] = @[]
for bucket in rtable.buckets:
for p in bucket.peers:
allNodes.add(p.nodeId)
allNodes.sort(
proc(a, b: Key): int =
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
)
return allNodes[0 ..< min(count, allNodes.len)]
proc findClosestPeers*(rtable: RoutingTable, targetId: Key, count: int): seq[PeerId] =
findClosest(rtable, targetId, count).mapIt(it.peerId)
proc isStale*(bucket: Bucket): bool =
if bucket.peers.len == 0:
return true
for p in bucket.peers:
if Moment.now() - p.lastSeen > 30.minutes:
return true
return false
proc randomKeyInBucketRange*(
selfId: Key, bucketIndex: int, rng: ref HmacDrbgContext
): Key =
var raw = selfId.getBytes()
# zero out higher bits
for i in 0 ..< bucketIndex:
let byteIdx = i div 8
let bitInByte = 7 - (i mod 8)
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
# flip the target bit
let tgtByte = bucketIndex div 8
let tgtBitInByte = 7 - (bucketIndex mod 8)
raw[tgtByte] = raw[tgtByte] xor (1'u8 shl tgtBitInByte)
# randomize all less significant bits
let totalBits = raw.len * 8
let lsbStart = bucketIndex + 1
let lsbBytes = (totalBits - lsbStart + 7) div 8
var randomBuf = newSeq[byte](lsbBytes)
hmacDrbgGenerate(rng[], randomBuf)
for i in lsbStart ..< totalBits:
let byteIdx = i div 8
let bitInByte = 7 - (i mod 8)
let lsbByte = (i - lsbStart) div 8
let lsbBit = 7 - ((i - lsbStart) mod 8)
let randBit = (randomBuf[lsbByte] shr lsbBit) and 1
if randBit == 1:
raw[byteIdx] = raw[byteIdx] or (1'u8 shl bitInByte)
else:
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
return raw.toKey()

View File

@@ -0,0 +1,55 @@
import ./consts
import ./keys
import nimcrypto/sha2
import ../../peerid
type XorDistance* = array[IdLength, byte]
proc countLeadingZeroBits*(b: byte): int =
for i in 0 .. 7:
if (b and (0x80'u8 shr i)) != 0:
return i
return 8
proc leadingZeros*(dist: XorDistance): int =
for i in 0 ..< dist.len:
if dist[i] != 0:
return i * 8 + countLeadingZeroBits(dist[i])
return dist.len * 8
proc cmp*(a, b: XorDistance): int =
for i in 0 ..< IdLength:
if a[i] < b[i]:
return -1
elif a[i] > b[i]:
return 1
return 0
proc `<`*(a, b: XorDistance): bool =
cmp(a, b) < 0
proc `<=`*(a, b: XorDistance): bool =
cmp(a, b) <= 0
proc hashFor(k: Key): seq[byte] =
return
@(
case k.kind
of KeyType.PeerId:
sha256.digest(k.peerId.getBytes()).data
of KeyType.Raw:
sha256.digest(k.data).data
of KeyType.Unhashed:
k.data
)
proc xorDistance*(a, b: Key): XorDistance =
let hashA = a.hashFor()
let hashB = b.hashFor()
var response: XorDistance
for i in 0 ..< hashA.len:
response[i] = hashA[i] xor hashB[i]
return response
proc xorDistance*(a: PeerId, b: Key): XorDistance =
xorDistance(a.toKey(), b)

View File

@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
logScope:
topics = "libp2p perf"
type PerfClient* = ref object of RootObj
type Stats* = object
isFinal*: bool
uploadBytes*: uint
downloadBytes*: uint
duration*: Duration
type PerfClient* = ref object
stats: Stats
proc new*(T: typedesc[PerfClient]): T =
return T()
proc currentStats*(p: PerfClient): Stats =
return p.stats
proc perf*(
_: typedesc[PerfClient],
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
): Future[Duration] {.async, public.} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
let start = Moment.now()
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite
p.stats = Stats()
await conn.close()
try:
var
size = sizeToWrite
buf: array[PerfSize, byte]
size = sizeToRead
let start = Moment.now()
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite.uint
let duration = Moment.now() - start
trace "finishing performance benchmark", duration
return duration
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.uploadBytes += toWrite.uint
p.stats = statsCopy
await conn.close()
size = sizeToRead
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead.uint
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.downloadBytes += toRead.uint
p.stats = statsCopy
except CancelledError as e:
raise e
except LPStreamError as e:
raise e
finally:
p.stats.isFinal = true
trace "finishing performance benchmark", duration = p.stats.duration
return p.stats.duration

View File

@@ -24,7 +24,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
@@ -47,10 +47,12 @@ proc new*(T: typedesc[Perf]): T {.public.} =
await conn.write(buf[0 ..< toWrite])
size -= toWrite
except CancelledError as exc:
trace "cancelled perf handler"
raise exc
except CatchableError as exc:
trace "exception in perf handler", description = exc.msg, conn
await conn.close()
finally:
await conn.close()
p.handler = handle
p.codec = PerfCodec

View File

@@ -51,7 +51,7 @@ proc new*(
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -61,6 +61,7 @@ method init*(p: Ping) =
if not isNil(p.pingHandler):
await p.pingHandler(conn.peerId)
except CancelledError as exc:
trace "cancelled ping handler"
raise exc
except CatchableError as exc:
trace "exception in ping handler", description = exc.msg, conn
@@ -68,7 +69,11 @@ method init*(p: Ping) =
p.handler = handle
p.codec = PingCodec
proc ping*(p: Ping, conn: Connection): Future[Duration] {.async, public.} =
proc ping*(
p: Ping, conn: Connection
): Future[Duration] {.
public, async: (raises: [CancelledError, LPStreamError, WrongPingAckError])
.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

Some files were not shown because too many files have changed in this diff Show More