mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 10:37:55 -05:00
Compare commits
427 Commits
nimbus
...
no-splitti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b0565725f | ||
|
|
f345026900 | ||
|
|
4780af2036 | ||
|
|
5d6578a06f | ||
|
|
871a5d047f | ||
|
|
061195195b | ||
|
|
8add5aaaab | ||
|
|
dbf60b74c7 | ||
|
|
d2eaf07960 | ||
|
|
6e5274487e | ||
|
|
7ed62461d7 | ||
|
|
6059ee8332 | ||
|
|
4f7e232a9e | ||
|
|
5eaa43b860 | ||
|
|
17ed2d88df | ||
|
|
c7f29ed5db | ||
|
|
9865cc39b5 | ||
|
|
601f56b786 | ||
|
|
25a8ed4d07 | ||
|
|
955e28ff70 | ||
|
|
f952e6d436 | ||
|
|
bed83880bf | ||
|
|
9bd4b7393f | ||
|
|
12d1fae404 | ||
|
|
17073dc9e0 | ||
|
|
b1649b3566 | ||
|
|
ef20f46b47 | ||
|
|
9161529c84 | ||
|
|
8b70384b6a | ||
|
|
f25814a890 | ||
|
|
3d5ea1fa3c | ||
|
|
2114008704 | ||
|
|
04796b210b | ||
|
|
59faa023aa | ||
|
|
fdebea4e14 | ||
|
|
0c188df806 | ||
|
|
abee5326dc | ||
|
|
71f04d1bb3 | ||
|
|
41ae43ae80 | ||
|
|
5dbf077d9e | ||
|
|
b5fc7582ff | ||
|
|
7f83ebb198 | ||
|
|
ceb89986c1 | ||
|
|
f4ff27ca6b | ||
|
|
b517b692df | ||
|
|
7cfd26035a | ||
|
|
cd5fea53e3 | ||
|
|
d9aa393761 | ||
|
|
a4a0d9e375 | ||
|
|
c8b406d6ed | ||
|
|
f0125a62df | ||
|
|
9bf2636186 | ||
|
|
01a33ebe5c | ||
|
|
c1cd31079b | ||
|
|
9f9f38e314 | ||
|
|
f83638eb82 | ||
|
|
882cb5dfe3 | ||
|
|
81310df2a2 | ||
|
|
34110a37d7 | ||
|
|
1035e4f314 | ||
|
|
d08bad5893 | ||
|
|
7bdba4909f | ||
|
|
e71c7caf82 | ||
|
|
45476bdd6b | ||
|
|
c7ee7b950d | ||
|
|
87b3d2c864 | ||
|
|
19b4c20e2f | ||
|
|
514bd4b5f5 | ||
|
|
46d936b80c | ||
|
|
80bf27c6bb | ||
|
|
6576c5c3bf | ||
|
|
2e6b1d2738 | ||
|
|
9e6c4cb4d2 | ||
|
|
5f256049ab | ||
|
|
e29ca73386 | ||
|
|
577809750a | ||
|
|
46a5430cc2 | ||
|
|
d8b9f59c5e | ||
|
|
2951356c9d | ||
|
|
7ae21d0cbd | ||
|
|
eee8341ad2 | ||
|
|
e83bd2d582 | ||
|
|
998bb58aef | ||
|
|
c1f6dec7d3 | ||
|
|
13c613c26c | ||
|
|
45f0f9f47a | ||
|
|
b1dd0a2ec6 | ||
|
|
beecfdfadb | ||
|
|
e4faec5570 | ||
|
|
41c9bf8e8c | ||
|
|
7ae366d979 | ||
|
|
9b33cea225 | ||
|
|
f8077f7432 | ||
|
|
773fc67865 | ||
|
|
7e07ffc5a8 | ||
|
|
aa1c33ffe9 | ||
|
|
f1e220fba4 | ||
|
|
5ad656bf26 | ||
|
|
cfd631457a | ||
|
|
4f8597609b | ||
|
|
4ed72a753c | ||
|
|
2a9abbe925 | ||
|
|
ee61e234ac | ||
|
|
8f54367e3a | ||
|
|
61826a20e4 | ||
|
|
32951e1a68 | ||
|
|
1d13e405e4 | ||
|
|
729e879c1c | ||
|
|
64c9cf1b9e | ||
|
|
4d94892eb0 | ||
|
|
3ecb1744ce | ||
|
|
2f9c3fb3e2 | ||
|
|
2609c270b8 | ||
|
|
48b3e34cd3 | ||
|
|
abb2c43667 | ||
|
|
d1cfbb35d3 | ||
|
|
38a630eee0 | ||
|
|
be1a2023ce | ||
|
|
021d0c1700 | ||
|
|
f49cd377ce | ||
|
|
fc80840784 | ||
|
|
7742d06a58 | ||
|
|
e0ea1d48a4 | ||
|
|
f028ad8c12 | ||
|
|
9c153c822b | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 | ||
|
|
848fdde0a8 | ||
|
|
31e7dc68e2 | ||
|
|
08299a2059 | ||
|
|
2f3156eafb | ||
|
|
72e85101b0 | ||
|
|
d205260a3e | ||
|
|
97e576d146 | ||
|
|
888cb78331 | ||
|
|
1d4c261d2a | ||
|
|
83de0c0abd | ||
|
|
c501adc9ab | ||
|
|
f9fc24cc08 | ||
|
|
cd26244ccc | ||
|
|
cabab6aafe | ||
|
|
fb42a9b4aa | ||
|
|
141f4d9116 | ||
|
|
cb31152b53 | ||
|
|
3a7745f920 | ||
|
|
a89916fb1a | ||
|
|
c6cf46c904 | ||
|
|
b28a71ab13 | ||
|
|
95b9859bcd | ||
|
|
9e599753af | ||
|
|
2e924906bb | ||
|
|
e811c1ad32 | ||
|
|
86695b55bb | ||
|
|
8c3a4d882a | ||
|
|
4bad343ddc | ||
|
|
47b8a05c32 | ||
|
|
4e6f4af601 | ||
|
|
7275f6f9c3 | ||
|
|
c3dae6a7d4 | ||
|
|
bb404eda4a | ||
|
|
584710bd80 | ||
|
|
ad5eae9adf | ||
|
|
26fae7cd2d | ||
|
|
87d6655368 | ||
|
|
cd60b254a0 | ||
|
|
b88cdcdd4b | ||
|
|
4a5e06cb45 | ||
|
|
fff3a7ad1f | ||
|
|
05c894d487 | ||
|
|
8850e9ccd9 | ||
|
|
2746531851 | ||
|
|
2856db5490 | ||
|
|
b29e78ccae | ||
|
|
c9761c3588 | ||
|
|
e4ef21e07c | ||
|
|
61429aa0d6 | ||
|
|
c1ef011556 | ||
|
|
cd1424c09f | ||
|
|
878d627f93 | ||
|
|
1d6385ddc5 | ||
|
|
873f730b4e | ||
|
|
1c1547b137 | ||
|
|
9997f3e3d3 | ||
|
|
4d0b4ecc22 | ||
|
|
ccb24b5f1f | ||
|
|
5cb493439d | ||
|
|
24b284240a | ||
|
|
b0f77d24f9 | ||
|
|
e32ac492d3 | ||
|
|
470a7f8cc5 | ||
|
|
b269fce289 | ||
|
|
bc4febe92c | ||
|
|
b5f9bfe0f4 | ||
|
|
4ce1e8119b | ||
|
|
65136b38e2 | ||
|
|
ffc114e8d9 | ||
|
|
f2be2d6ed5 | ||
|
|
ab690a06a6 | ||
|
|
10cdaf14c5 | ||
|
|
ebbfb63c17 | ||
|
|
ac25da6cea | ||
|
|
fb41972ba3 | ||
|
|
504d1618af | ||
|
|
0f91b23f12 | ||
|
|
5ddd62a8b9 | ||
|
|
e7f13a7e73 | ||
|
|
89e825fb0d | ||
|
|
1b706e84fa | ||
|
|
5cafcb70dc | ||
|
|
8c71266058 | ||
|
|
9c986c5c13 | ||
|
|
3d0451d7f2 | ||
|
|
b1f65c97ae | ||
|
|
5584809fca | ||
|
|
7586f17b15 | ||
|
|
0e16d873c8 | ||
|
|
b11acd2118 | ||
|
|
1376f5b077 | ||
|
|
340ea05ae5 | ||
|
|
024ec51f66 | ||
|
|
efe453df87 | ||
|
|
c0f4d903ba | ||
|
|
28f2b268ae | ||
|
|
5abb6916b6 | ||
|
|
e6aec94c0c | ||
|
|
9eddc7c662 | ||
|
|
028c730a4f | ||
|
|
3c93bdaf80 | ||
|
|
037b99997e | ||
|
|
e67744bf2a | ||
|
|
5843e6fb4f | ||
|
|
f0ff7e4c69 | ||
|
|
24808ad534 | ||
|
|
c4bccef138 | ||
|
|
adf2345adb | ||
|
|
f7daad91e6 | ||
|
|
65052d7b59 | ||
|
|
b07ec5c0c6 | ||
|
|
f4c94ddba1 | ||
|
|
a7ec485ca9 | ||
|
|
86b6469e35 | ||
|
|
3e16ca724d | ||
|
|
93dd5a6768 | ||
|
|
ec43d0cb9f | ||
|
|
8469a750e7 | ||
|
|
fc6ac07ce8 | ||
|
|
79cdc31b37 | ||
|
|
be33ad6ac7 | ||
|
|
a6e45d6157 | ||
|
|
37e0f61679 | ||
|
|
5d382b6423 | ||
|
|
78a4344054 | ||
|
|
a4f0a638e7 | ||
|
|
c5aa3736f9 | ||
|
|
b0f83fd48c | ||
|
|
d6e5094095 | ||
|
|
483e1d91ba | ||
|
|
d215bb21e0 | ||
|
|
61ac0c5b95 | ||
|
|
1fa30f07e8 | ||
|
|
39d0451a10 | ||
|
|
4dc7a89f45 | ||
|
|
fd26f93b80 | ||
|
|
dd2c74d413 | ||
|
|
b7e0df127f | ||
|
|
f591e692fc | ||
|
|
8855bce085 | ||
|
|
ed5670408b | ||
|
|
97192a3c80 | ||
|
|
294d06323c | ||
|
|
a3b8729cbe | ||
|
|
6c970911f2 | ||
|
|
5d48776b02 | ||
|
|
d389d96789 | ||
|
|
09fe199b6b | ||
|
|
68306cf1f1 | ||
|
|
b37133ca43 | ||
|
|
3e3df07269 | ||
|
|
1771534030 | ||
|
|
21a444197c | ||
|
|
966996542e | ||
|
|
8070b21825 | ||
|
|
d98152f266 | ||
|
|
47a51983b5 | ||
|
|
70754cd575 | ||
|
|
a1811e7395 | ||
|
|
c6e8fadbda | ||
|
|
48846d69cb | ||
|
|
18a2e79ce2 | ||
|
|
55cc5434fe | ||
|
|
cde5ed7e8c | ||
|
|
6ec038d29a | ||
|
|
fdae9e4b42 | ||
|
|
a60f0c5532 | ||
|
|
62f2d85f11 | ||
|
|
e5e319c1a9 | ||
|
|
f8d4da6421 | ||
|
|
b5fb7b3a97 | ||
|
|
fa19bbbbb7 | ||
|
|
86563cbddd | ||
|
|
be801602f6 | ||
|
|
94d93cbf25 | ||
|
|
78f0855419 | ||
|
|
2195313dba | ||
|
|
100f3188ed | ||
|
|
d1d53ff369 | ||
|
|
0f27f896ab | ||
|
|
0be7144e34 | ||
|
|
fba6dc31b0 | ||
|
|
02f6e6127c | ||
|
|
1d826ee26f | ||
|
|
7498258f7c | ||
|
|
4618f4c68f | ||
|
|
3bf8a2907f | ||
|
|
96bfefc928 | ||
|
|
dc83a1e9b6 | ||
|
|
d0af3fbe85 | ||
|
|
120549e313 | ||
|
|
bccb305cf5 | ||
|
|
f9a6ef06cf | ||
|
|
8cb7dbb425 | ||
|
|
368c9765f7 | ||
|
|
d6feb1bbc2 | ||
|
|
3f5b5cee75 | ||
|
|
8a4e8a00a2 | ||
|
|
77d40c34f4 | ||
|
|
2fa2c4425f | ||
|
|
0911cb20f4 | ||
|
|
3ca49a2f40 | ||
|
|
1b91b97499 | ||
|
|
21cbe3a91a | ||
|
|
88e233db81 | ||
|
|
84659af45b | ||
|
|
aef44ed1ce | ||
|
|
02c96fc003 | ||
|
|
c4da9be32c | ||
|
|
2b5319622c | ||
|
|
5cbb473d1b | ||
|
|
b30b2656d5 | ||
|
|
89cad5a3ba | ||
|
|
09b3e11956 | ||
|
|
03f67d3db5 | ||
|
|
bb97a9de79 | ||
|
|
1a707e1264 | ||
|
|
458b0885dd | ||
|
|
a2027003cd | ||
|
|
c5db35d9b0 | ||
|
|
d1e51beb7f | ||
|
|
275d649287 | ||
|
|
467b5b4f0c | ||
|
|
fdf53d18cd | ||
|
|
48a3ac06ff | ||
|
|
49a92e5641 | ||
|
|
08a48faf41 | ||
|
|
61b299e411 | ||
|
|
ca01ee06a8 | ||
|
|
6c43ab3fce | ||
|
|
ae13a0d583 | ||
|
|
28609597d1 | ||
|
|
8294d5b9df | ||
|
|
78e83889ee | ||
|
|
7603b8de5e | ||
|
|
8cccd54125 | ||
|
|
18e00a741b | ||
|
|
ee264fdf11 | ||
|
|
9059a8aced | ||
|
|
0b753e7cf2 | ||
|
|
d43c5feab0 | ||
|
|
1609fd7197 | ||
|
|
42cd78e95b | ||
|
|
44cada9c55 | ||
|
|
6c873481ac | ||
|
|
d08ce17144 | ||
|
|
bd6ead95ef | ||
|
|
53e3825e07 | ||
|
|
e9b456162a | ||
|
|
250024f6cc | ||
|
|
fec632d28d | ||
|
|
349496e40f | ||
|
|
7faa0fac23 | ||
|
|
c5e4f8e12d | ||
|
|
fe4ff79885 | ||
|
|
aa4ebb0b3c | ||
|
|
e0f70b7177 | ||
|
|
c1dfd58772 | ||
|
|
04af0c4323 | ||
|
|
eb0890cd6f | ||
|
|
9bc5ec1566 | ||
|
|
5594bcb33e | ||
|
|
d46bcdb6ac | ||
|
|
9468bb6b4d | ||
|
|
2725be64ba | ||
|
|
e3c967ad19 | ||
|
|
d2c98bd87d | ||
|
|
3011ba4326 | ||
|
|
c6566707fa | ||
|
|
3be681ec4d | ||
|
|
2ede0fa40c | ||
|
|
7c195ab927 | ||
|
|
3230407ffe | ||
|
|
deb72c8580 | ||
|
|
ce0685c272 | ||
|
|
1f4b090227 | ||
|
|
fb05f5ae22 | ||
|
|
e12f65f193 | ||
|
|
4b3bc4f819 | ||
|
|
6791f5e7bb | ||
|
|
08d9c84aca | ||
|
|
4e7eaba67a | ||
|
|
5f7a3ab829 | ||
|
|
ebef85c9d7 | ||
|
|
3fc1236659 | ||
|
|
fc4e9a8bb8 | ||
|
|
60f953629d | ||
|
|
18b0f726df | ||
|
|
459f6851e7 | ||
|
|
575344e2e9 | ||
|
|
75871817ee | ||
|
|
61929aed6c | ||
|
|
56599f5b9d | ||
|
|
b2eac7ecbd | ||
|
|
20b0e40f7d | ||
|
|
ff77d52851 | ||
|
|
545a31d4f0 | ||
|
|
b76bac752f | ||
|
|
c6aa085e98 | ||
|
|
e03547ea3e |
@@ -1,52 +0,0 @@
|
||||
version: '{build}'
|
||||
|
||||
image: Visual Studio 2015
|
||||
|
||||
cache:
|
||||
- NimBinaries
|
||||
- p2pdCache
|
||||
|
||||
matrix:
|
||||
# We always want 32 and 64-bit compilation
|
||||
fast_finish: false
|
||||
|
||||
platform:
|
||||
- x86
|
||||
- x64
|
||||
|
||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
||||
clone_depth: 10
|
||||
|
||||
install:
|
||||
- git submodule update --init --recursive
|
||||
|
||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
||||
|
||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
||||
# regressions of the fast-paced Nim development while maintaining the
|
||||
# flexibility to apply patches
|
||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
- SET PATH=%CD%\Nim\bin;%PATH%
|
||||
|
||||
# set path for produced Go binaries
|
||||
- MKDIR goblin
|
||||
- CD goblin
|
||||
- SET GOPATH=%CD%
|
||||
- SET PATH=%GOPATH%\bin;%PATH%
|
||||
- CD ..
|
||||
|
||||
# install and build go-libp2p-daemon
|
||||
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
|
||||
|
||||
build_script:
|
||||
- nimble install -y --depsOnly
|
||||
|
||||
test_script:
|
||||
- nimble test
|
||||
- nimble examples_build
|
||||
|
||||
deploy: off
|
||||
|
||||
2
.git-blame-ignore-revs
Normal file
2
.git-blame-ignore-revs
Normal file
@@ -0,0 +1,2 @@
|
||||
# Formatted with nph 0.5.1
|
||||
dc83a1e9b68f00b3be7e09febdb1a3f877321b9a
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @vacp2p/p2p
|
||||
34
.github/actions/add_comment/action.yml
vendored
Normal file
34
.github/actions/add_comment/action.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Add Comment
|
||||
description: "Add or update comment in the PR"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Add/Update Comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const marker = "${{ env.MARKER }}";
|
||||
const body = fs.readFileSync("${{ env.COMMENT_SUMMARY_PATH }}", 'utf8');
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
const existing = comments.find(c => c.body && c.body.startsWith(marker));
|
||||
if (existing) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: existing.id,
|
||||
body,
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body,
|
||||
});
|
||||
}
|
||||
49
.github/actions/discord_notify/action.yml
vendored
Normal file
49
.github/actions/discord_notify/action.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Discord Failure Notification
|
||||
description: "Send Discord notification when CI jobs fail"
|
||||
inputs:
|
||||
webhook_url:
|
||||
description: "Discord webhook URL"
|
||||
required: true
|
||||
workflow_name:
|
||||
description: "Name of the workflow that failed"
|
||||
required: false
|
||||
default: ${{ github.workflow }}
|
||||
branch:
|
||||
description: "Branch name"
|
||||
required: false
|
||||
default: ${{ github.ref_name }}
|
||||
repository:
|
||||
description: "Repository name"
|
||||
required: false
|
||||
default: ${{ github.repository }}
|
||||
run_id:
|
||||
description: "GitHub run ID"
|
||||
required: false
|
||||
default: ${{ github.run_id }}
|
||||
server_url:
|
||||
description: "GitHub server URL"
|
||||
required: false
|
||||
default: ${{ github.server_url }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Send Discord notification
|
||||
shell: bash
|
||||
run: |
|
||||
curl -H "Content-Type: application/json" \
|
||||
-X POST \
|
||||
-d "{
|
||||
\"embeds\": [{
|
||||
\"title\": \"${{ inputs.workflow_name }} Job Failed\",
|
||||
\"url\": \"${{ inputs.server_url }}/${{ inputs.repository }}/actions/runs/${{ inputs.run_id }}\",
|
||||
\"description\": \"The workflow has failed on branch \`${{ inputs.branch }}\`\",
|
||||
\"color\": 15158332,
|
||||
\"fields\": [
|
||||
{\"name\": \"Repository\", \"value\": \"${{ inputs.repository }}\", \"inline\": true},
|
||||
{\"name\": \"Branch\", \"value\": \"${{ inputs.branch }}\", \"inline\": true}
|
||||
],
|
||||
\"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%S.000Z)\"
|
||||
}]
|
||||
}" \
|
||||
"${{ inputs.webhook_url }}"
|
||||
24
.github/actions/generate_plots/action.yml
vendored
Normal file
24
.github/actions/generate_plots/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Generate Plots
|
||||
description: "Set up Python and run script to generate plots with Docker Stats"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install Python dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install matplotlib
|
||||
|
||||
- name: Plot Docker Stats
|
||||
shell: bash
|
||||
run: python performance/scripts/plot_docker_stats.py
|
||||
|
||||
- name: Plot Latency History
|
||||
shell: bash
|
||||
run: python performance/scripts/plot_latency_history.py
|
||||
14
.github/actions/install_nim/action.yml
vendored
14
.github/actions/install_nim/action.yml
vendored
@@ -6,9 +6,9 @@ inputs:
|
||||
cpu:
|
||||
description: "CPU to build for"
|
||||
default: "amd64"
|
||||
nim_branch:
|
||||
nim_ref:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
default: "version-2-0"
|
||||
shell:
|
||||
description: "Shell to run commands in"
|
||||
default: "bash --noprofile --norc -e -o pipefail"
|
||||
@@ -61,7 +61,7 @@ runs:
|
||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: inputs.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
@@ -88,6 +88,8 @@ runs:
|
||||
run: |
|
||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
|
||||
PLATFORM=arm64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
@@ -114,10 +116,10 @@ runs:
|
||||
|
||||
- name: Restore Nim from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '${{ github.workspace }}/nim'
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_ref }}-cache-${{ env.cache_nonce }}
|
||||
|
||||
- name: Build Nim and Nimble
|
||||
shell: ${{ inputs.shell }}
|
||||
@@ -126,6 +128,6 @@ runs:
|
||||
# We don't want partial matches of the cache restored
|
||||
rm -rf nim
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_ref }} \
|
||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
|
||||
21
.github/actions/process_stats/action.yml
vendored
Normal file
21
.github/actions/process_stats/action.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Process Stats
|
||||
description: "Set up Nim and run scripts to aggregate latency and process raw docker stats"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Nim
|
||||
uses: jiro4989/setup-nim-action@v2
|
||||
with:
|
||||
nim-version: "2.x"
|
||||
repo-token: ${{ env.GITHUB_TOKEN }}
|
||||
|
||||
- name: Aggregate latency stats and prepare markdown for comment and summary
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/process_latency_stats ./performance/scripts/process_latency_stats.nim
|
||||
|
||||
- name: Process raw docker stats to csv files
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/process_docker_stats ./performance/scripts/process_docker_stats.nim
|
||||
36
.github/actions/publish_history/action.yml
vendored
Normal file
36
.github/actions/publish_history/action.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Latency History
|
||||
description: "Publish latency history CSVs in a configurable branch and folder"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clone the branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.PUBLISH_BRANCH_NAME }}
|
||||
path: ${{ env.CHECKOUT_SUBFOLDER_HISTORY }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push latency history CSVs
|
||||
shell: bash
|
||||
run: |
|
||||
cd "$CHECKOUT_SUBFOLDER_HISTORY"
|
||||
git fetch origin "$PUBLISH_BRANCH_NAME"
|
||||
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
|
||||
|
||||
mkdir -p "$PUBLISH_DIR_LATENCY_HISTORY"
|
||||
|
||||
cp ../$SHARED_VOLUME_PATH/$LATENCY_HISTORY_PREFIX*.csv "$PUBLISH_DIR_LATENCY_HISTORY/"
|
||||
git add "$PUBLISH_DIR_LATENCY_HISTORY"
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git commit -m "Update latency history CSVs"
|
||||
git push origin "$PUBLISH_BRANCH_NAME"
|
||||
fi
|
||||
|
||||
cd ..
|
||||
56
.github/actions/publish_plots/action.yml
vendored
Normal file
56
.github/actions/publish_plots/action.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Publish Plots
|
||||
description: "Publish plots in performance_plots branch and add to the workflow summary"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clone the performance_plots branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.PUBLISH_BRANCH_NAME }}
|
||||
path: ${{ env.CHECKOUT_SUBFOLDER_SUBPLOTS }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push plots
|
||||
shell: bash
|
||||
run: |
|
||||
cd $CHECKOUT_SUBFOLDER_SUBPLOTS
|
||||
git fetch origin "$PUBLISH_BRANCH_NAME"
|
||||
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
|
||||
|
||||
# Remove any branch folder older than 7 days
|
||||
DAYS=7
|
||||
cutoff=$(( $(date +%s) - DAYS*24*3600 ))
|
||||
scan_dir="${PUBLISH_DIR_PLOTS%/}"
|
||||
find "$scan_dir" -mindepth 1 -maxdepth 1 -type d -print0 \
|
||||
| while IFS= read -r -d $'\0' d; do \
|
||||
ts=$(git log -1 --format=%ct -- "$d" 2>/dev/null || true); \
|
||||
if [ -n "$ts" ] && [ "$ts" -le "$cutoff" ]; then \
|
||||
echo "[cleanup] Deleting: $d"; \
|
||||
rm -rf -- "$d"; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
rm -rf $PUBLISH_DIR_PLOTS/$BRANCH_NAME
|
||||
mkdir -p $PUBLISH_DIR_PLOTS/$BRANCH_NAME
|
||||
|
||||
cp ../$SHARED_VOLUME_PATH/*.png $PUBLISH_DIR_PLOTS/$BRANCH_NAME/ 2>/dev/null || true
|
||||
cp ../$LATENCY_HISTORY_PATH/*.png $PUBLISH_DIR_PLOTS/ 2>/dev/null || true
|
||||
git add -A "$PUBLISH_DIR_PLOTS/"
|
||||
|
||||
git status
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git commit -m "Update performance plots for $BRANCH_NAME"
|
||||
git push origin $PUBLISH_BRANCH_NAME
|
||||
fi
|
||||
|
||||
- name: Add plots to GitHub Actions summary
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/add_plots_to_summary ./performance/scripts/add_plots_to_summary.nim
|
||||
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Auto Assign PR to Creator
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
assign_creator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
45
.github/workflows/bumper.yml
vendored
45
.github/workflows/bumper.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: Bumper
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- bumper
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
bumpProjects:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [
|
||||
{ repo: status-im/nimbus-eth2, branch: unstable },
|
||||
{ repo: waku-org/nwaku, branch: master },
|
||||
{ repo: codex-storage/nim-codex, branch: master }
|
||||
]
|
||||
steps:
|
||||
- name: Clone repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: ${{ matrix.target.repo }}
|
||||
ref: ${{ matrix.target.branch }}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout this ref
|
||||
run: |
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Commit this bump
|
||||
run: |
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
|
||||
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
72
.github/workflows/ci.yml
vendored
72
.github/workflows/ci.yml
vendored
@@ -1,10 +1,11 @@
|
||||
name: CI
|
||||
name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -12,62 +13,69 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 90
|
||||
test:
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
platform:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
- os: linux-gcc-14
|
||||
cpu: amd64
|
||||
- os: macos-14
|
||||
cpu: arm64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-6]
|
||||
nim:
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
- ref: version-2-2
|
||||
memory_management: refc
|
||||
include:
|
||||
- target:
|
||||
- platform:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
- platform:
|
||||
os: linux-gcc-14
|
||||
builder: ubuntu-24.04
|
||||
shell: bash
|
||||
- target:
|
||||
- platform:
|
||||
os: macos-14
|
||||
builder: macos-14
|
||||
shell: bash
|
||||
- platform:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
os: ${{ matrix.platform.os }}
|
||||
cpu: ${{ matrix.platform.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
@@ -78,15 +86,29 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Use gcc 14
|
||||
if : ${{ matrix.platform.os == 'linux-gcc-14'}}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
name: nim-libp2p codecov builds
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
#On push to common branches, this computes the "bases stats" for PRs
|
||||
# On push to common branches, this computes the coverage that PRs will use for diff
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -14,12 +14,13 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
Coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
codecov:
|
||||
name: Run coverage and upload to codecov
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CICOV: YES
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -32,7 +33,7 @@ jobs:
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
@@ -42,24 +43,28 @@ jobs:
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Run
|
||||
- name: Setup coverage
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
|
||||
- name: Run test suite with coverage flags
|
||||
run: |
|
||||
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
nimble testnative
|
||||
nimble testpubsub
|
||||
nimble testfilter
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
find nimcache -name *.c -delete
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/output
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
|
||||
#- uses: actions/upload-artifact@master
|
||||
# with:
|
||||
# name: coverage
|
||||
# path: coverage
|
||||
- name: Upload coverage to codecov
|
||||
run: |
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
42
.github/workflows/daily_amd64.yml
vendored
Normal file
42
.github/workflows/daily_amd64.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Daily amd64
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64_latest:
|
||||
name: Daily test amd64 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
test_amd64_pinned:
|
||||
name: Daily test amd64 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [test_amd64_latest, test_amd64_pinned]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
107
.github/workflows/daily_common.yml
vendored
Normal file
107
.github/workflows/daily_common.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Daily Common
|
||||
# Serves as base workflow for daily tasks, it's not run by itself.
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
pinned_deps:
|
||||
description: 'Should dependencies be installed from pinned file or use latest versions'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
|
||||
jobs:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
nim: ${{ fromJSON(inputs.nim) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.16.0'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies (pinned)
|
||||
if: ${{ inputs.pinned_deps }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Install dependencies (latest)
|
||||
if: ${{ inputs.pinned_deps == false }}
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ matrix.platform.os == 'linux' && matrix.cpu == 'amd64' }}
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble testintegration
|
||||
50
.github/workflows/daily_i386.yml
vendored
Normal file
50
.github/workflows/daily_i386.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Daily i386
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_i386_latest:
|
||||
name: Daily i386 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
test_i386_pinned:
|
||||
name: Daily i386 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [test_i386_latest, test_i386_pinned]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
39
.github/workflows/daily_nimbus.yml
vendored
Normal file
39
.github/workflows/daily_nimbus.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Daily Nimbus
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
compile_nimbus:
|
||||
timeout-minutes: 80
|
||||
name: 'Compile Nimbus (linux-amd64)'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Compile nimbus using nim-libp2p
|
||||
run: |
|
||||
git clone --branch unstable --single-branch https://github.com/status-im/nimbus-eth2.git
|
||||
cd nimbus-eth2
|
||||
git submodule set-branch --branch ${{ github.sha }} vendor/nim-libp2p
|
||||
|
||||
make -j"$(nproc)"
|
||||
make -j"$(nproc)" nimbus_beacon_node
|
||||
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: compile_nimbus
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
|
||||
65
.github/workflows/dependencies.yml
vendored
Normal file
65
.github/workflows/dependencies.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
bumper:
|
||||
# Pushes new refs to interested external repositories, so they can do early testing against libp2p's newer versions
|
||||
runs-on: ubuntu-latest
|
||||
name: Bump libp2p's version for ${{ matrix.target.repository }}:${{ matrix.target.ref }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
secret: ACTIONS_GITHUB_TOKEN_NWAKU
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
|
||||
steps:
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ matrix.target.repository }}
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets[matrix.target.secret] }}
|
||||
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Push this ref to target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
|
||||
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [bumper]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
@@ -1,25 +1,27 @@
|
||||
name: Docgen
|
||||
name: Documentation Generation And Publishing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 20
|
||||
|
||||
name: 'Generate & upload documentation'
|
||||
runs-on: 'ubuntu-20.04'
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
nim-version: '2.2.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
@@ -27,15 +29,15 @@ jobs:
|
||||
nimble --version
|
||||
nimble install_pinned
|
||||
# nim doc can "fail", but the doc is still generated
|
||||
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
|
||||
nim doc --git.url:https://github.com/vacp2p/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
|
||||
|
||||
# check that the folder exists
|
||||
ls ${GITHUB_REF##*/}
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: status-im/nim-libp2p
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
||||
path: subdoc
|
||||
submodules: true
|
||||
@@ -45,9 +47,6 @@ jobs:
|
||||
run: |
|
||||
cd subdoc
|
||||
|
||||
# Delete merged branches doc's
|
||||
for branch in $(git branch -vv | grep ': gone]' | awk '{print $1}'); do rm -rf $branch; done
|
||||
|
||||
# Update / create this branch doc
|
||||
rm -rf ${GITHUB_REF##*/}
|
||||
mv ../${GITHUB_REF##*/} .
|
||||
@@ -63,12 +62,11 @@ jobs:
|
||||
git push origin gh-pages
|
||||
|
||||
update_site:
|
||||
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/docs'
|
||||
name: 'Rebuild website'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
@@ -79,12 +77,12 @@ jobs:
|
||||
nim-version: 'stable'
|
||||
|
||||
- name: Generate website
|
||||
run: pip install mkdocs-material && nimble website
|
||||
run: pip install mkdocs-material && nimble -y website
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: status-im/nim-libp2p
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
||||
path: subdoc
|
||||
fetch-depth: 0
|
||||
@@ -93,11 +91,21 @@ jobs:
|
||||
run: |
|
||||
cd subdoc
|
||||
|
||||
# Ensure the latest changes are fetched and reset to the remote branch
|
||||
git fetch origin gh-pages
|
||||
git reset --hard origin/gh-pages
|
||||
|
||||
rm -rf docs
|
||||
mv ../site docs
|
||||
|
||||
git add .
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit -a -m "update website"
|
||||
git push origin gh-pages
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name "${{ github.actor }}"
|
||||
|
||||
git commit -m "update website"
|
||||
git push origin gh-pages
|
||||
fi
|
||||
60
.github/workflows/examples.yml
vendored
Normal file
60
.github/workflows/examples.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Examples
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
examples:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
name: "Build Examples"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
shell: bash
|
||||
os: linux
|
||||
cpu: amd64
|
||||
nim_ref: version-2-2
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Build and run examples
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble examples
|
||||
80
.github/workflows/interop.yml
vendored
80
.github/workflows/interop.yml
vendored
@@ -1,9 +1,11 @@
|
||||
name: Interoperability Testing
|
||||
name: Interoperability Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -11,44 +13,50 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
name: Run multidimensional interoperability tests
|
||||
run-transport-interop:
|
||||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Free Disk Space
|
||||
# For some reason we have space issues while running this action. Likely while building the image.
|
||||
# This action will free up some space to avoid the issue.
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
repository: libp2p/test-plans
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
tool-cache: true
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: >
|
||||
cd multidim-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
run: >
|
||||
(cat << EOF
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
# without suffix action fails because "hole-punching-interop" artifacts have
|
||||
# the same name as "transport-interop" artifacts
|
||||
test-results-suffix: transport-interop
|
||||
extra-versions: ${{ github.workspace }}/interop/transport/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
# nim-libp2p#1367: hole punching tests are temporary disabled as they keep failing
|
||||
# and issue does not seem to be on nim-libp2p side
|
||||
# run-hole-punching-interop:
|
||||
# name: Run hole-punching interoperability tests
|
||||
# runs-on: ubuntu-22.04
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: docker/setup-buildx-action@v3
|
||||
# - name: Build image
|
||||
# run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
# - name: Run tests
|
||||
# uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
# with:
|
||||
# test-filter: nim-libp2p-head
|
||||
# extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
# s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
27
.github/workflows/linters.yml
vendored
Normal file
27
.github/workflows/linters.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
nph:
|
||||
name: NPH
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||
|
||||
- name: Check `nph` formatting
|
||||
uses: arnetheduck/nph-action@ef5e9fae6dbaf88ec4308cbede780a0ba45f845d
|
||||
with:
|
||||
version: 0.6.1
|
||||
options: "examples libp2p tests interop tools *.nim*"
|
||||
fail: true
|
||||
suggest: true
|
||||
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-6, version-2-0, devel]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
94
.github/workflows/performance.yml
vendored
Normal file
94
.github/workflows/performance.yml
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
name: Performance
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
performance:
|
||||
timeout-minutes: 20
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
VACP2P: "vacp2p"
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
MARKER: "<!-- perf-summary-marker -->"
|
||||
COMMENT_SUMMARY_PATH: "/tmp/perf-summary.md"
|
||||
SHARED_VOLUME_PATH: "performance/output"
|
||||
DOCKER_STATS_PREFIX: "docker_stats_"
|
||||
PUBLISH_BRANCH_NAME: "performance_plots"
|
||||
CHECKOUT_SUBFOLDER_SUBPLOTS: "subplots"
|
||||
PUBLISH_DIR_PLOTS: "plots"
|
||||
CHECKOUT_SUBFOLDER_HISTORY: "history"
|
||||
PUBLISH_DIR_LATENCY_HISTORY: "latency_history"
|
||||
LATENCY_HISTORY_PATH: "history/latency_history"
|
||||
LATENCY_HISTORY_PREFIX: "pr"
|
||||
LATENCY_HISTORY_PLOT_FILENAME: "latency_history_all_scenarios.png"
|
||||
|
||||
name: "Performance"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker Image with cache
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: performance/Dockerfile
|
||||
tags: test-node:latest
|
||||
load: true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Run
|
||||
run: |
|
||||
./performance/runner.sh
|
||||
|
||||
- name: Process latency and docker stats
|
||||
uses: ./.github/actions/process_stats
|
||||
|
||||
- name: Publish history
|
||||
if: github.repository_owner == env.VACP2P
|
||||
uses: ./.github/actions/publish_history
|
||||
|
||||
- name: Generate plots
|
||||
if: github.repository_owner == env.VACP2P
|
||||
uses: ./.github/actions/generate_plots
|
||||
|
||||
- name: Post/Update PR comment
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: ./.github/actions/add_comment
|
||||
|
||||
- name: Upload performance artifacts
|
||||
if: success() || failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: performance-artifacts
|
||||
path: |
|
||||
performance/output/pr*_latency.csv
|
||||
performance/output/*.png
|
||||
history/latency_history/*.png
|
||||
if-no-files-found: ignore
|
||||
retention-days: 7
|
||||
35
.github/workflows/pr_lint.yml
vendored
Normal file
35
.github/workflows/pr_lint.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: "Conventional Commits"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
jobs:
|
||||
main:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
id: lint_pr_title
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: marocchino/sticky-pull-request-comment@v2
|
||||
# When the previous steps fails, the workflow would stop. By adding this
|
||||
# condition you can continue the execution with the populated error message.
|
||||
if: always() && (steps.lint_pr_title.outputs.error_message != null)
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
message: |
|
||||
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
|
||||
# Delete a previous comment when the issue has been resolved
|
||||
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
|
||||
uses: marocchino/sticky-pull-request-comment@v2
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
delete: true
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -16,3 +16,12 @@ tests/pubsub/testgossipsub
|
||||
examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
# Ignore all test build files in tests folder (auto generated when running tests).
|
||||
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
|
||||
# Second and third rules are here to un-ignores all files with extension and Docker file,
|
||||
# because it appears that vs code is skipping text search is some tests files without these rules.
|
||||
tests/**/test*[^.]*
|
||||
!tests/**/*.*
|
||||
!tests/**/Dockerfile
|
||||
37
.pinned
37
.pinned
@@ -1,16 +1,21 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#9372f27a25d0718d3527afad6cc936f6a853f86e
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#2b3d4b4e35b5e698fbbeafe16a4fa757926a4673
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#2a771bb91f8aae8520a5553955a2acce5fdd0c87
|
||||
httputils;https://github.com/status-im/nim-http-utils@#aad684d3758a74c1b327df93da2e956458410b48
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#aa44ee61dd323022d4abe7cbf4e44668aad88454
|
||||
metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#5fd81357839d57ef38fb17647bd5e31dfa9f55b8
|
||||
serialization;https://github.com/status-im/nim-serialization@#f0860e1c25acf26ef5e6ea231c7c0537c793b555
|
||||
stew;https://github.com/status-im/nim-stew@#000eeb14a34832e6c95303e6508e2925db56be7c
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#b178f47527074964f76c395ad0dfc81cf118f379
|
||||
websock;https://github.com/status-im/nim-websock@#3696e3f3a5b938e478e473a6089bf8de386d2f04
|
||||
zlib;https://github.com/status-im/nim-zlib@#d65ee2a7611eb9f0ef0e7350caed6e93ccfa9651
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#61759a5e8df8f4d68bcd1b4b8c1adab3e72bbd8d
|
||||
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
quic;https://github.com/vacp2p/nim-quic@#cae13c2d22ba2730c979486cf89b88927045c3ae
|
||||
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
|
||||
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
|
||||
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
|
||||
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
|
||||
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
|
||||
|
||||
210
README.md
210
README.md
@@ -5,8 +5,8 @@
|
||||
<h3 align="center">The <a href="https://nim-lang.org/">Nim</a> implementation of the <a href="https://libp2p.io/">libp2p</a> Networking Stack.</h3>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://codecov.io/gh/status-im/nim-libp2p"><img src="https://codecov.io/gh/status-im/nim-libp2p/branch/master/graph/badge.svg?token=UR5JRQ249W"/></a>
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/actions"><img src="https://github.com/vacp2p/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://codecov.io/gh/vacp2p/nim-libp2p"><img src="https://codecov.io/gh/vacp2p/nim-libp2p/branch/master/graph/badge.svg?token=UR5JRQ249W"/></a>
|
||||
|
||||
</p>
|
||||
|
||||
@@ -20,111 +20,78 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [Development](#development)
|
||||
- [Contribute](#contribute)
|
||||
- [Contributors](#contributors)
|
||||
- [Core Maintainers](#core-maintainers)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [License](#license)
|
||||
|
||||
## Background
|
||||
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
|
||||
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
|
||||
|
||||
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
|
||||
> The currently supported Nim versions are 2.0 and 2.2.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
|
||||
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
**Go Daemon:**
|
||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
|
||||
## Modules
|
||||
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
| **Libp2p** | |
|
||||
| [libp2p](libp2p/switch.nim) | The core of the project |
|
||||
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
|
||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
|
||||
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
||||
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
||||
| **Transports** | |
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
| **Stream Multiplexers** | |
|
||||
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
|
||||
| **Pubsub** | |
|
||||
| [libp2p-pubsub](libp2p/protocols/pubsub/pubsub.nim) | Pub-Sub generic interface |
|
||||
| [libp2p-floodsub](libp2p/protocols/pubsub/floodsub.nim) | FloodSub implementation |
|
||||
| [libp2p-gossipsub](libp2p/protocols/pubsub/gossipsub.nim) | [GossipSub](https://docs.libp2p.io/concepts/publish-subscribe/) implementation |
|
||||
|
||||
## Users
|
||||
|
||||
nim-libp2p is used by:
|
||||
- [Nimbus](https://github.com/status-im/nimbus-eth2), an Ethereum client
|
||||
- [nwaku](https://github.com/status-im/nwaku), a decentralized messaging application
|
||||
- [nim-codex](https://github.com/status-im/nim-codex), a decentralized storage application
|
||||
- (open a pull request if you want to be included here)
|
||||
|
||||
## Stability
|
||||
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
|
||||
Some modules are more recent and less stable.
|
||||
|
||||
The versioning follows [semver](https://semver.org/), with some additions:
|
||||
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
|
||||
## Development
|
||||
Clone and Install dependencies:
|
||||
```sh
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
# to use dependencies computed by nimble
|
||||
nimble install -dy
|
||||
# OR to install the dependencies versions used in CI
|
||||
nimble install_pinned
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
## Development
|
||||
Clone the repository and install the dependencies:
|
||||
```sh
|
||||
git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install -dy
|
||||
```
|
||||
You can use `nix develop` to start a shell with Nim and Nimble.
|
||||
|
||||
nimble 0.20.1 is required for running `testnative`. At time of writing, this is not available in nixpkgs: If using `nix develop`, follow up with `nimble install nimble`, and use that (typically `~/.nimble/bin/nimble`).
|
||||
|
||||
### Testing
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
|
||||
Or use `nimble tasks` to show all available tasks.
|
||||
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
|
||||
|
||||
If you only want to run tests that don't require `go-libp2p-daemon`, use:
|
||||
```
|
||||
nimble testnative
|
||||
```
|
||||
|
||||
For a list of all available test suites, use:
|
||||
```
|
||||
nimble tasks
|
||||
```
|
||||
|
||||
### Contribute
|
||||
|
||||
@@ -132,25 +99,36 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
- **Join the Conversation**. Connect with other contributors in our [community channel](https://discord.com/channels/1204447718093750272/1351621032263417946). Ask questions, share ideas, get support, and stay informed about the latest updates from the maintainers.
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/status-im/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=status-im/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable quic transport support
|
||||
```bash
|
||||
nim c -d:libp2p_quic_support some_file.nim
|
||||
```
|
||||
|
||||
Enable autotls support
|
||||
```bash
|
||||
nim c -d:libp2p_autotls_support some_file.nim
|
||||
```
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
@@ -166,6 +144,64 @@ Specify gossipsub specific topics to measure in the metrics:
|
||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||
```
|
||||
|
||||
|
||||
## Modules
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
| **Libp2p** | |
|
||||
| [libp2p](libp2p/switch.nim) | The core of the project |
|
||||
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
|
||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
|
||||
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
||||
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
||||
| **Transports** | |
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
|
||||
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
| **Stream Multiplexers** | |
|
||||
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
|
||||
| **Pubsub** | |
|
||||
| [libp2p-pubsub](libp2p/protocols/pubsub/pubsub.nim) | Pub-Sub generic interface |
|
||||
| [libp2p-floodsub](libp2p/protocols/pubsub/floodsub.nim) | FloodSub implementation |
|
||||
| [libp2p-gossipsub](libp2p/protocols/pubsub/gossipsub.nim) | [GossipSub](https://docs.libp2p.io/concepts/publish-subscribe/) implementation |
|
||||
|
||||
## Users
|
||||
|
||||
nim-libp2p is used by:
|
||||
- [Nimbus](https://github.com/status-im/nimbus-eth2), an Ethereum client
|
||||
- [nwaku](https://github.com/waku-org/nwaku), a decentralized messaging application
|
||||
- [nim-codex](https://github.com/codex-storage/nim-codex), a decentralized storage application
|
||||
- (open a pull request if you want to be included here)
|
||||
|
||||
## Stability
|
||||
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
|
||||
Some modules are more recent and less stable.
|
||||
|
||||
The versioning follows [semver](https://semver.org/), with some additions:
|
||||
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `2.0 & 2.2`
|
||||
|
||||
## License
|
||||
|
||||
Licensed and distributed under either of
|
||||
|
||||
14
config.nims
14
config.nims
@@ -4,18 +4,24 @@ if dirExists("nimbledeps/pkgs"):
|
||||
if dirExists("nimbledeps/pkgs2"):
|
||||
switch("NimblePath", "nimbledeps/pkgs2")
|
||||
|
||||
switch("warningAsError", "UnusedImport:on")
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
--define:chronosStrictException
|
||||
--styleCheck:usages
|
||||
--styleCheck:
|
||||
usages
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
--styleCheck:
|
||||
error
|
||||
--mm:
|
||||
refc
|
||||
# reconsider when there's a version-2-2 branch worth testing with as we might switch to orc
|
||||
|
||||
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
|
||||
if defined(windows) and not defined(vcc):
|
||||
--define:nimRawSetjmp
|
||||
--define:
|
||||
nimRawSetjmp
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when fileExists("nimble.paths"):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# nim-libp2p examples
|
||||
|
||||
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
|
||||
In this folder, you'll find the sources of the [nim-libp2p website](https://vacp2p.github.io/nim-libp2p/docs/)
|
||||
|
||||
We recommand to follow the tutorials on the website, but feel free to grok the sources here!
|
||||
|
||||
@@ -1,40 +1,48 @@
|
||||
{.used.}
|
||||
## # Circuit Relay example
|
||||
##
|
||||
## Circuit Relay can be used when a node cannot reach another node
|
||||
## directly, but can reach it through a another node (the Relay).
|
||||
## directly, but can reach it through another node (the Relay).
|
||||
##
|
||||
## That may happen because of NAT, Firewalls, or incompatible transports.
|
||||
##
|
||||
## More informations [here](https://docs.libp2p.io/concepts/circuit-relay/).
|
||||
import chronos, stew/byteutils
|
||||
import libp2p,
|
||||
libp2p/protocols/connectivity/relay/[relay, client]
|
||||
import libp2p, libp2p/protocols/connectivity/relay/[relay, client]
|
||||
|
||||
# Helper to create a circuit relay node
|
||||
proc createCircuitRelaySwitch(r: Relay): Switch =
|
||||
SwitchBuilder.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withCircuitRelay(r)
|
||||
.build()
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withCircuitRelay(r)
|
||||
.build()
|
||||
|
||||
proc main() {.async.} =
|
||||
# Create a custom protocol
|
||||
let customProtoCodec = "/test"
|
||||
var proto = new LPProtocol
|
||||
proto.codec = customProtoCodec
|
||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||
var msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "1 - Dst Received: ", msg
|
||||
assert "test1" == msg
|
||||
await conn.writeLp("test2")
|
||||
msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "2 - Dst Received: ", msg
|
||||
assert "test3" == msg
|
||||
await conn.writeLp("test4")
|
||||
proto.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
var msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "1 - Dst Received: ", msg
|
||||
assert "test1" == msg
|
||||
await conn.writeLp("test2")
|
||||
msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "2 - Dst Received: ", msg
|
||||
assert "test3" == msg
|
||||
await conn.writeLp("test4")
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
|
||||
let
|
||||
relay = Relay.new()
|
||||
@@ -56,8 +64,11 @@ proc main() {.async.} =
|
||||
|
||||
let
|
||||
# Create a relay address to swDst using swRel as the relay
|
||||
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
|
||||
$swRel.peerInfo.peerId & "/p2p-circuit").get()
|
||||
addrs = MultiAddress
|
||||
.init(
|
||||
$swRel.peerInfo.addrs[0] & "/p2p/" & $swRel.peerInfo.peerId & "/p2p-circuit"
|
||||
)
|
||||
.get()
|
||||
|
||||
# Connect Dst to the relay
|
||||
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
||||
@@ -66,7 +77,7 @@ proc main() {.async.} =
|
||||
let rsvp = await clDst.reserve(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
||||
|
||||
# Src dial Dst using the relay
|
||||
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[addrs], customProtoCodec)
|
||||
|
||||
await conn.writeLp("test1")
|
||||
var msg = string.fromBytes(await conn.readLp(1024))
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
when not(compileOption("threads")):
|
||||
{.used.}
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
import
|
||||
strformat, strutils,
|
||||
stew/byteutils,
|
||||
chronos,
|
||||
libp2p
|
||||
import strformat, strutils, stew/byteutils, chronos, libp2p
|
||||
|
||||
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
|
||||
|
||||
const Help = """
|
||||
const Help =
|
||||
"""
|
||||
Commands: /[?|help|connect|disconnect|exit]
|
||||
help: Prints this help
|
||||
connect: dials a remote peer
|
||||
@@ -17,12 +15,11 @@ const Help = """
|
||||
exit: closes the chat
|
||||
"""
|
||||
|
||||
type
|
||||
Chat = ref object
|
||||
switch: Switch # a single entry point for dialing and listening to peer
|
||||
stdinReader: StreamTransport # transport streams between read & write file descriptor
|
||||
conn: Connection # connection to the other peer
|
||||
connected: bool # if the node is connected to another peer
|
||||
type Chat = ref object
|
||||
switch: Switch # a single entry point for dialing and listening to peer
|
||||
stdinReader: StreamTransport # transport streams between read & write file descriptor
|
||||
conn: Connection # connection to the other peer
|
||||
connected: bool # if the node is connected to another peer
|
||||
|
||||
##
|
||||
# Stdout helpers, to write the prompt
|
||||
@@ -41,19 +38,23 @@ proc writeStdout(c: Chat, str: string) =
|
||||
##
|
||||
const ChatCodec = "/nim-libp2p/chat/1.0.0"
|
||||
|
||||
type
|
||||
ChatProto = ref object of LPProtocol
|
||||
type ChatProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[ChatProto], c: Chat): T =
|
||||
let chatproto = T()
|
||||
|
||||
# create handler for incoming connection
|
||||
proc handle(stream: Connection, proto: string) {.async.} =
|
||||
if c.connected and not c.conn.closed:
|
||||
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
|
||||
await stream.close()
|
||||
else:
|
||||
await c.handlePeer(stream)
|
||||
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
if c.connected and not c.conn.closed:
|
||||
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
|
||||
else:
|
||||
await c.handlePeer(stream)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
# assign the new handler
|
||||
@@ -77,9 +78,9 @@ proc handlePeer(c: Chat, conn: Connection) {.async.} =
|
||||
strData = await conn.readLp(1024)
|
||||
str = string.fromBytes(strData)
|
||||
c.writeStdout $conn.peerId & ": " & $str
|
||||
|
||||
except LPStreamEOFError:
|
||||
defer: c.writeStdout $conn.peerId & " disconnected"
|
||||
defer:
|
||||
c.writeStdout $conn.peerId & " disconnected"
|
||||
await c.conn.close()
|
||||
c.connected = false
|
||||
|
||||
@@ -88,10 +89,7 @@ proc dialPeer(c: Chat, address: string) {.async.} =
|
||||
let
|
||||
multiAddr = MultiAddress.init(address).tryGet()
|
||||
# split the peerId part /p2p/...
|
||||
peerIdBytes = multiAddr[multiCodec("p2p")]
|
||||
.tryGet()
|
||||
.protoAddress()
|
||||
.tryGet()
|
||||
peerIdBytes = multiAddr[multiCodec("p2p")].tryGet().protoAddress().tryGet()
|
||||
remotePeer = PeerId.init(peerIdBytes).tryGet()
|
||||
# split the wire address
|
||||
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
|
||||
@@ -124,7 +122,6 @@ proc readLoop(c: Chat) {.async.} =
|
||||
let address = await c.stdinReader.readLine()
|
||||
if address.len > 0:
|
||||
await c.dialPeer(address)
|
||||
|
||||
elif line.startsWith("/exit"):
|
||||
if c.connected and c.conn.closed.not:
|
||||
await c.conn.close()
|
||||
@@ -171,16 +168,18 @@ proc main() {.async.} =
|
||||
|
||||
var switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng) # Give the application RNG
|
||||
.withRng(rng)
|
||||
# Give the application RNG
|
||||
.withAddress(localAddress)
|
||||
.withTcpTransport() # Use TCP as transport
|
||||
.withMplex() # Use Mplex as muxer
|
||||
.withNoise() # Use Noise as secure manager
|
||||
.withTcpTransport()
|
||||
# Use TCP as transport
|
||||
.withMplex()
|
||||
# Use Mplex as muxer
|
||||
.withNoise()
|
||||
# Use Noise as secure manager
|
||||
.build()
|
||||
|
||||
let chat = Chat(
|
||||
switch: switch,
|
||||
stdinReader: stdinReader)
|
||||
let chat = Chat(switch: switch, stdinReader: stdinReader)
|
||||
|
||||
switch.mount(ChatProto.new(chat))
|
||||
|
||||
|
||||
3
examples/examples_build.nim
Normal file
3
examples/examples_build.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import directchat, tutorial_6_game
|
||||
5
examples/examples_run.nim
Normal file
5
examples/examples_run.nim
Normal file
@@ -0,0 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
|
||||
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery
|
||||
@@ -2,8 +2,7 @@ import chronos, nimcrypto, strutils
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
import ../hexdump
|
||||
|
||||
const
|
||||
PubSubTopic = "test-net"
|
||||
const PubSubTopic = "test-net"
|
||||
|
||||
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
|
||||
var peers = await api.pubsubListPeers(PubSubTopic)
|
||||
@@ -37,12 +36,12 @@ proc main() {.async.} =
|
||||
|
||||
asyncSpawn monitor(api)
|
||||
|
||||
proc pubsubLogger(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
message: PubSubMessage): Future[bool] {.async.} =
|
||||
proc pubsubLogger(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async.} =
|
||||
let msglen = len(message.data)
|
||||
echo "= Recieved pubsub message with length ", msglen,
|
||||
" bytes from peer ", message.peer.pretty()
|
||||
echo "= Recieved pubsub message with length ",
|
||||
msglen, " bytes from peer ", message.peer.pretty()
|
||||
echo dumpHex(message.data)
|
||||
await api.dumpSubscribedPeers()
|
||||
result = true
|
||||
|
||||
@@ -2,18 +2,16 @@ import chronos, nimcrypto, strutils
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
|
||||
## nim c -r --threads:on chat.nim
|
||||
when not(compileOption("threads")):
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
const
|
||||
ServerProtocols = @["/test-chat-stream"]
|
||||
const ServerProtocols = @["/test-chat-stream"]
|
||||
|
||||
type
|
||||
CustomData = ref object
|
||||
api: DaemonAPI
|
||||
remotes: seq[StreamTransport]
|
||||
consoleFd: AsyncFD
|
||||
serveFut: Future[void]
|
||||
type CustomData = ref object
|
||||
api: DaemonAPI
|
||||
remotes: seq[StreamTransport]
|
||||
consoleFd: AsyncFD
|
||||
serveFut: Future[void]
|
||||
|
||||
proc threadMain(wfd: AsyncFD) {.thread.} =
|
||||
## This procedure performs reading from `stdin` and sends data over
|
||||
@@ -82,7 +80,7 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||
relay = true
|
||||
break
|
||||
if relay:
|
||||
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
|
||||
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
|
||||
else:
|
||||
echo peer.pretty(), " [", addresses.join(", "), "]"
|
||||
elif line.startsWith("/exit"):
|
||||
@@ -95,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||
pending.add(item.write(msg))
|
||||
if len(pending) > 0:
|
||||
var results = await all(pending)
|
||||
except:
|
||||
echo getCurrentException().msg
|
||||
except CatchableError as err:
|
||||
echo err.msg
|
||||
|
||||
proc main() {.async.} =
|
||||
var data = new CustomData
|
||||
|
||||
@@ -1,56 +1,43 @@
|
||||
# Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Script](#script)
|
||||
- [Examples](#examples)
|
||||
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.16.0`
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
```sh
|
||||
# clone and install dependencies
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install
|
||||
|
||||
# perform unit tests
|
||||
nimble test
|
||||
|
||||
# update the git submodule to install the go daemon
|
||||
git submodule update --init --recursive
|
||||
go version
|
||||
git clone https://github.com/libp2p/go-libp2p-daemon
|
||||
cd go-libp2p-daemon
|
||||
git checkout v0.0.1
|
||||
go install ./...
|
||||
cd ..
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh -f
|
||||
```
|
||||
Or:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh --force
|
||||
```
|
||||
|
||||
# Usage
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
|
||||
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
|
||||
|
||||
## Example
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Examples
|
||||
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
|
||||
|
||||
## Getting Started
|
||||
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,26 +1,25 @@
|
||||
import chronos, nimcrypto, strutils, os
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
|
||||
const
|
||||
PubSubTopic = "test-net"
|
||||
const PubSubTopic = "test-net"
|
||||
|
||||
proc main(bn: string) {.async.} =
|
||||
echo "= Starting P2P node"
|
||||
var bootnodes = bn.split(",")
|
||||
var api = await newDaemonApi({DHTFull, PSGossipSub, WaitBootstrap},
|
||||
bootstrapNodes = bootnodes,
|
||||
peersRequired = 1)
|
||||
var api = await newDaemonApi(
|
||||
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
|
||||
)
|
||||
var id = await api.identity()
|
||||
echo "= P2P node ", id.peer.pretty(), " started:"
|
||||
for item in id.addresses:
|
||||
echo item
|
||||
|
||||
proc pubsubLogger(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
message: PubSubMessage): Future[bool] {.async.} =
|
||||
proc pubsubLogger(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async.} =
|
||||
let msglen = len(message.data)
|
||||
echo "= Recieved pubsub message with length ", msglen,
|
||||
" bytes from peer ", message.peer.pretty(), ": "
|
||||
echo "= Recieved pubsub message with length ",
|
||||
msglen, " bytes from peer ", message.peer.pretty(), ": "
|
||||
var strdata = cast[string](message.data)
|
||||
echo strdata
|
||||
result = true
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import chronos # an efficient library for async
|
||||
import stew/byteutils # various utils
|
||||
{.used.}
|
||||
|
||||
import chronos # an efficient library for async
|
||||
import stew/byteutils # various utils
|
||||
import libp2p
|
||||
|
||||
##
|
||||
@@ -7,20 +9,23 @@ import libp2p
|
||||
##
|
||||
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
|
||||
|
||||
type
|
||||
TestProto = ref object of LPProtocol # declare a custom protocol
|
||||
type TestProto = ref object of LPProtocol # declare a custom protocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
return T(codecs: @[TestCodec], handler: handle)
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
##
|
||||
# Helper to create a switch/node
|
||||
@@ -28,11 +33,16 @@ proc new(T: typedesc[TestProto]): T =
|
||||
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
var switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng) # Give the application RNG
|
||||
.withAddress(ma) # Our local address(es)
|
||||
.withTcpTransport() # Use TCP as transport
|
||||
.withMplex() # Use Mplex as muxer
|
||||
.withNoise() # Use Noise as secure manager
|
||||
.withRng(rng)
|
||||
# Give the application RNG
|
||||
.withAddress(ma)
|
||||
# Our local address(es)
|
||||
.withTcpTransport()
|
||||
# Use TCP as transport
|
||||
.withMplex()
|
||||
# Use Mplex as muxer
|
||||
.withNoise()
|
||||
# Use Noise as secure manager
|
||||
.build()
|
||||
|
||||
result = switch
|
||||
@@ -40,7 +50,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
# The actual application
|
||||
##
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng() # Single random number source for the whole application
|
||||
# port 0 will take a random available port
|
||||
@@ -73,7 +83,8 @@ proc main() {.async, gcsafe.} =
|
||||
# use the second node to dial the first node
|
||||
# using the first node peerid and address
|
||||
# and specify our custom protocol codec
|
||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||
let conn =
|
||||
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||
|
||||
# conn is now a fully setup connection, we talk directly to the node1 custom protocol handler
|
||||
await conn.writeLp("Hello p2p!") # writeLp send a length prefixed buffer over the wire
|
||||
@@ -84,6 +95,7 @@ proc main() {.async, gcsafe.} =
|
||||
# We must close the connection ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||
await allFutures(switch1.stop(), switch2.stop())
|
||||
# close connections and shutdown all transports
|
||||
|
||||
waitFor(main())
|
||||
|
||||
@@ -35,26 +35,24 @@ proc dumpHex*(pbytes: pointer, nbytes: int, items = 1, ascii = true): string =
|
||||
var asciiText = ""
|
||||
while i < nbytes:
|
||||
if i %% 16 == 0:
|
||||
result = result & toHex(cast[BiggestInt](slider),
|
||||
sizeof(BiggestInt) * 2) & ": "
|
||||
result = result & toHex(cast[BiggestInt](slider), sizeof(BiggestInt) * 2) & ": "
|
||||
var k = 0
|
||||
while k < items:
|
||||
var ch = cast[ptr char](cast[uint](slider) + k.uint)[]
|
||||
if ord(ch) > 31 and ord(ch) < 127: asciiText &= ch else: asciiText &= "."
|
||||
if ord(ch) > 31 and ord(ch) < 127:
|
||||
asciiText &= ch
|
||||
else:
|
||||
asciiText &= "."
|
||||
inc(k)
|
||||
case items:
|
||||
case items
|
||||
of 1:
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]),
|
||||
hexSize)
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]), hexSize)
|
||||
of 2:
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]),
|
||||
hexSize)
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]), hexSize)
|
||||
of 4:
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]),
|
||||
hexSize)
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]), hexSize)
|
||||
of 8:
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]),
|
||||
hexSize)
|
||||
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]), hexSize)
|
||||
else:
|
||||
raise newException(ValueError, "Wrong items size!")
|
||||
result = result & " "
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
Welcome to the nim-libp2p documentation!
|
||||
|
||||
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
|
||||
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
||||
the [full reference](https://vacp2p.github.io/nim-libp2p/master/libp2p.html).
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Simple ping tutorial
|
||||
##
|
||||
## Hi all, welcome to the first nim-libp2p tutorial!
|
||||
@@ -34,15 +35,20 @@ import libp2p/protocols/ping
|
||||
|
||||
## [chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
|
||||
##
|
||||
## Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
|
||||
## Next, we'll create a helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
|
||||
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
var switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng) # Give the application RNG
|
||||
.withAddress(ma) # Our local address(es)
|
||||
.withTcpTransport() # Use TCP as transport
|
||||
.withMplex() # Use Mplex as muxer
|
||||
.withNoise() # Use Noise as secure manager
|
||||
.withRng(rng)
|
||||
# Give the application RNG
|
||||
.withAddress(ma)
|
||||
# Our local address(es)
|
||||
.withTcpTransport()
|
||||
# Use TCP as transport
|
||||
.withMplex()
|
||||
# Use Mplex as muxer
|
||||
.withNoise()
|
||||
# Use Noise as secure manager
|
||||
.build()
|
||||
|
||||
return switch
|
||||
@@ -53,11 +59,11 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
##
|
||||
## Let's now start to create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
pingProtocol = Ping.new(rng=rng)
|
||||
pingProtocol = Ping.new(rng = rng)
|
||||
## We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
|
||||
## The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
|
||||
##
|
||||
@@ -77,8 +83,9 @@ proc main() {.async, gcsafe.} =
|
||||
## Now that we've started the nodes, they are listening for incoming peers.
|
||||
## We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
|
||||
##
|
||||
## We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
|
||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
|
||||
## We'll **dial** the first switch from the second one, by specifying its **Peer ID**, its **MultiAddress** and the **`Ping` protocol codec**:
|
||||
let conn =
|
||||
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
|
||||
## We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
|
||||
# ping the other node and echo the ping duration
|
||||
echo "ping: ", await pingProtocol.ping(conn)
|
||||
@@ -86,7 +93,8 @@ proc main() {.async, gcsafe.} =
|
||||
# We must close the connection ourselves when we're done with it
|
||||
await conn.close()
|
||||
## And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
|
||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||
await allFutures(switch1.stop(), switch2.stop())
|
||||
# close connections and shutdown all transports
|
||||
|
||||
waitFor(main())
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Custom protocol in libp2p
|
||||
##
|
||||
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
|
||||
@@ -18,19 +19,24 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
## We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
|
||||
##
|
||||
## A protocol generally has two part: and handling/server part, and a dialing/client part.
|
||||
## Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
|
||||
## A protocol generally has two parts: a handling/server part, and a dialing/client part.
|
||||
## These two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
|
||||
##
|
||||
## Let's start with the server part:
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
try:
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
@@ -41,29 +47,31 @@ proc new(T: typedesc[TestProto]): T =
|
||||
proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||
await conn.writeLp("Hello p2p!")
|
||||
|
||||
## Again, pretty straight-forward, we just send a message on the connection.
|
||||
## Again, pretty straightforward, we just send a message on the connection.
|
||||
##
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
testProto = TestProto.new()
|
||||
switch1 = newStandardSwitch(rng=rng)
|
||||
switch2 = newStandardSwitch(rng=rng)
|
||||
switch1 = newStandardSwitch(rng = rng)
|
||||
switch2 = newStandardSwitch(rng = rng)
|
||||
|
||||
switch1.mount(testProto)
|
||||
|
||||
await switch1.start()
|
||||
await switch2.start()
|
||||
|
||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||
let conn =
|
||||
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||
|
||||
await testProto.hello(conn)
|
||||
|
||||
# We must close the connection ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||
await allFutures(switch1.stop(), switch2.stop())
|
||||
# close connections and shutdown all transports
|
||||
|
||||
## This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
|
||||
##
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Protobuf usage
|
||||
##
|
||||
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.
|
||||
@@ -57,8 +58,8 @@ proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
|
||||
#
|
||||
# We are just checking the error, and ignoring whether the value
|
||||
# is present or not (default values are valid).
|
||||
discard ? pb.getField(1, res.name)
|
||||
discard ? pb.getField(2, res.value)
|
||||
discard ?pb.getField(1, res.name)
|
||||
discard ?pb.getField(2, res.value)
|
||||
ok(res)
|
||||
|
||||
proc encode(m: MetricList): ProtoBuffer =
|
||||
@@ -72,10 +73,10 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
|
||||
res: MetricList
|
||||
metrics: seq[seq[byte]]
|
||||
let pb = initProtoBuffer(buf)
|
||||
discard ? pb.getRepeatedField(1, metrics)
|
||||
discard ?pb.getRepeatedField(1, metrics)
|
||||
|
||||
for metric in metrics:
|
||||
res.metrics &= ? Metric.decode(metric)
|
||||
res.metrics &= ?Metric.decode(metric)
|
||||
ok(res)
|
||||
|
||||
## ## Results instead of exceptions
|
||||
@@ -102,18 +103,24 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
|
||||
## ## Creating the protocol
|
||||
## We'll next create a protocol, like in the last tutorial, to request these metrics from our host
|
||||
type
|
||||
MetricCallback = proc: Future[MetricList] {.raises: [], gcsafe.}
|
||||
MetricCallback = proc(): Future[MetricList] {.raises: [], gcsafe.}
|
||||
MetricProto = ref object of LPProtocol
|
||||
metricGetter: MetricCallback
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
await conn.writeLp(asProtobuf.buffer)
|
||||
await conn.close()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
await conn.writeLp(asProtobuf.buffer)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
|
||||
res.metricGetter = cb
|
||||
@@ -126,21 +133,21 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
return MetricList.decode(protobuf).tryGet()
|
||||
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||
proc randomMetricGenerator(): Future[MetricList] {.async.} =
|
||||
let metricCount = rng[].generate(uint32) mod 16
|
||||
for i in 0 ..< metricCount + 1:
|
||||
result.metrics.add(Metric(
|
||||
name: "metric_" & $i,
|
||||
value: float(rng[].generate(uint16)) / 1000.0
|
||||
))
|
||||
result.metrics.add(
|
||||
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
|
||||
)
|
||||
return result
|
||||
|
||||
let
|
||||
metricProto1 = MetricProto.new(randomMetricGenerator)
|
||||
metricProto2 = MetricProto.new(randomMetricGenerator)
|
||||
switch1 = newStandardSwitch(rng=rng)
|
||||
switch2 = newStandardSwitch(rng=rng)
|
||||
switch1 = newStandardSwitch(rng = rng)
|
||||
switch2 = newStandardSwitch(rng = rng)
|
||||
|
||||
switch1.mount(metricProto1)
|
||||
|
||||
@@ -148,14 +155,17 @@ proc main() {.async, gcsafe.} =
|
||||
await switch2.start()
|
||||
|
||||
let
|
||||
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs)
|
||||
conn = await switch2.dial(
|
||||
switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs
|
||||
)
|
||||
metrics = await metricProto2.fetch(conn)
|
||||
await conn.close()
|
||||
|
||||
for metric in metrics.metrics:
|
||||
echo metric.name, " = ", metric.value
|
||||
|
||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||
await allFutures(switch1.stop(), switch2.stop())
|
||||
# close connections and shutdown all transports
|
||||
|
||||
waitFor(main())
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # GossipSub
|
||||
##
|
||||
## In this tutorial, we'll build a simple GossipSub network
|
||||
@@ -7,7 +8,7 @@
|
||||
## and allows to balance between latency, bandwidth usage,
|
||||
## privacy and attack resistance.
|
||||
##
|
||||
## You'll find a good explanation on how GossipSub works
|
||||
## You'll find a good explanation of how GossipSub works
|
||||
## [here.](https://docs.libp2p.io/concepts/publish-subscribe/) There are a lot
|
||||
## of parameters you can tweak to adjust how GossipSub behaves but here we'll
|
||||
## use the sane defaults shipped with libp2p.
|
||||
@@ -40,8 +41,8 @@ proc encode(m: Metric): ProtoBuffer =
|
||||
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
|
||||
var res: Metric
|
||||
let pb = initProtoBuffer(buf)
|
||||
discard ? pb.getField(1, res.name)
|
||||
discard ? pb.getField(2, res.value)
|
||||
discard ?pb.getField(1, res.name)
|
||||
discard ?pb.getField(2, res.value)
|
||||
ok(res)
|
||||
|
||||
proc encode(m: MetricList): ProtoBuffer =
|
||||
@@ -56,11 +57,11 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
|
||||
res: MetricList
|
||||
metrics: seq[seq[byte]]
|
||||
let pb = initProtoBuffer(buf)
|
||||
discard ? pb.getRepeatedField(1, metrics)
|
||||
discard ?pb.getRepeatedField(1, metrics)
|
||||
|
||||
for metric in metrics:
|
||||
res.metrics &= ? Metric.decode(metric)
|
||||
? pb.getRequiredField(2, res.hostname)
|
||||
res.metrics &= ?Metric.decode(metric)
|
||||
?pb.getRequiredField(2, res.hostname)
|
||||
ok(res)
|
||||
|
||||
## This is exactly like the previous structure, except that we added
|
||||
@@ -73,11 +74,13 @@ type Node = tuple[switch: Switch, gossip: GossipSub, hostname: string]
|
||||
|
||||
proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||
# This procedure will handle one of the node of the network
|
||||
node.gossip.addValidator(["metrics"],
|
||||
node.gossip.addValidator(
|
||||
["metrics"],
|
||||
proc(topic: string, message: Message): Future[ValidationResult] {.async.} =
|
||||
let decoded = MetricList.decode(message.data)
|
||||
if decoded.isErr: return ValidationResult.Reject
|
||||
return ValidationResult.Accept
|
||||
if decoded.isErr:
|
||||
return ValidationResult.Reject
|
||||
return ValidationResult.Accept,
|
||||
)
|
||||
# This "validator" will attach to the `metrics` topic and make sure
|
||||
# that every message in this topic is valid. This allows us to stop
|
||||
@@ -87,23 +90,25 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||
# `John` will be responsible to log the metrics, the rest of the nodes
|
||||
# will just forward them in the network
|
||||
if node.hostname == "John":
|
||||
node.gossip.subscribe("metrics",
|
||||
proc (topic: string, data: seq[byte]) {.async.} =
|
||||
echo MetricList.decode(data).tryGet()
|
||||
node.gossip.subscribe(
|
||||
"metrics",
|
||||
proc(topic: string, data: seq[byte]) {.async.} =
|
||||
let m = MetricList.decode(data).expect("metric can be decoded")
|
||||
echo m
|
||||
,
|
||||
)
|
||||
else:
|
||||
node.gossip.subscribe("metrics", nil)
|
||||
|
||||
# Create random metrics 10 times and broadcast them
|
||||
for _ in 0..<10:
|
||||
for _ in 0 ..< 10:
|
||||
await sleepAsync(500.milliseconds)
|
||||
var metricList = MetricList(hostname: node.hostname)
|
||||
let metricCount = rng[].generate(uint32) mod 4
|
||||
for i in 0 ..< metricCount + 1:
|
||||
metricList.metrics.add(Metric(
|
||||
name: "metric_" & $i,
|
||||
value: float(rng[].generate(uint16)) / 1000.0
|
||||
))
|
||||
metricList.metrics.add(
|
||||
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
|
||||
)
|
||||
|
||||
discard await node.gossip.publish("metrics", encode(metricList).buffer)
|
||||
await node.switch.stop()
|
||||
@@ -111,13 +116,13 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||
## For our main procedure, we'll create a few nodes, and connect them together.
|
||||
## Note that they are not all interconnected, but GossipSub will take care of
|
||||
## broadcasting to the full network nonetheless.
|
||||
proc main {.async.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
var nodes: seq[Node]
|
||||
|
||||
for hostname in ["John", "Walter", "David", "Thuy", "Amy"]:
|
||||
let
|
||||
switch = newStandardSwitch(rng=rng)
|
||||
switch = newStandardSwitch(rng = rng)
|
||||
gossip = GossipSub.init(switch = switch, triggerSelf = true)
|
||||
switch.mount(gossip)
|
||||
await switch.start()
|
||||
@@ -127,11 +132,12 @@ proc main {.async.} =
|
||||
for index, node in nodes:
|
||||
# Connect to a few neighbors
|
||||
for otherNodeIdx in index - 1 .. index + 2:
|
||||
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index: continue
|
||||
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index:
|
||||
continue
|
||||
let otherNode = nodes[otherNodeIdx]
|
||||
await node.switch.connect(
|
||||
otherNode.switch.peerInfo.peerId,
|
||||
otherNode.switch.peerInfo.addrs)
|
||||
otherNode.switch.peerInfo.peerId, otherNode.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var allFuts: seq[Future[void]]
|
||||
for node in nodes:
|
||||
@@ -153,8 +159,8 @@ waitFor(main())
|
||||
## This is John receiving & logging everyone's metrics.
|
||||
##
|
||||
## ## Going further
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## you can achieve very different properties.
|
||||
##
|
||||
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{.used.}
|
||||
## # Discovery Manager
|
||||
##
|
||||
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and
|
||||
## spread informations (some metrics) on the network using gossipsub.
|
||||
## For this tutorial, on the other hand, we'll go back on a simple example
|
||||
## For this tutorial, on the other hand, we'll go back to a simple example
|
||||
## we'll try to discover a specific peers to greet on the network.
|
||||
##
|
||||
## First, as usual, we import the dependencies:
|
||||
@@ -20,22 +21,30 @@ import libp2p/discovery/discoverymngr
|
||||
##
|
||||
## Note that other discovery methods such as [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) or [discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) exist.
|
||||
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
SwitchBuilder.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||
.withTcpTransport()
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
.withRendezVous(rdv)
|
||||
.build()
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
.withRendezVous(rdv)
|
||||
.build()
|
||||
|
||||
# Create a really simple protocol to log one message received then close the stream
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
|
||||
## ## Bootnodes
|
||||
@@ -49,7 +58,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||
## create a bootnode, and then every peer will advertise itself on the
|
||||
## bootnode, and use it to find other peers
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let bootNode = createSwitch()
|
||||
await bootNode.start()
|
||||
|
||||
@@ -58,7 +67,7 @@ proc main() {.async, gcsafe.} =
|
||||
switches: seq[Switch] = @[]
|
||||
discManagers: seq[DiscoveryManager] = @[]
|
||||
|
||||
for i in 0..5:
|
||||
for i in 0 .. 5:
|
||||
let rdv = RendezVous.new()
|
||||
# Create a remote future to await at the end of the program
|
||||
let switch = createSwitch(rdv)
|
||||
@@ -93,7 +102,7 @@ proc main() {.async, gcsafe.} =
|
||||
|
||||
# Use the discovery manager to find peers on the OddClub topic to greet them
|
||||
let queryOddClub = dm.request(RdvNamespace("OddClub"))
|
||||
for _ in 0..2:
|
||||
for _ in 0 .. 2:
|
||||
let
|
||||
# getPeer give you a PeerAttribute containing informations about the peer.
|
||||
res = await queryOddClub.getPeer()
|
||||
@@ -109,7 +118,7 @@ proc main() {.async, gcsafe.} =
|
||||
|
||||
# Maybe it was because he wanted to join the EvenGang
|
||||
let queryEvenGang = dm.request(RdvNamespace("EvenGang"))
|
||||
for _ in 0..2:
|
||||
for _ in 0 .. 2:
|
||||
let
|
||||
res = await queryEvenGang.getPeer()
|
||||
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Tron example
|
||||
##
|
||||
## In this tutorial, we will create a video game based on libp2p, using
|
||||
@@ -51,7 +52,7 @@ proc new(_: type[Game]): Game =
|
||||
tickTime: -3.0, # 3 seconds of "warm-up" time
|
||||
localPlayer: Player(x: 4, y: 16, currentDir: 3, nextDir: 3, color: 8),
|
||||
remotePlayer: Player(x: 27, y: 16, currentDir: 1, nextDir: 1, color: 12),
|
||||
peerFound: newFuture[Connection]()
|
||||
peerFound: newFuture[Connection](),
|
||||
)
|
||||
for pos in 0 .. result.gameMap.high:
|
||||
if pos mod mapSize in [0, mapSize - 1] or pos div mapSize in [0, mapSize - 1]:
|
||||
@@ -81,7 +82,8 @@ proc update(g: Game, dt: float32) =
|
||||
# This is a hacky way to make this happen
|
||||
waitFor(sleepAsync(1.milliseconds))
|
||||
# Don't do anything if we are still waiting for an opponent
|
||||
if not(g.peerFound.finished()) or isNil(g.tickFinished): return
|
||||
if not (g.peerFound.finished()) or isNil(g.tickFinished):
|
||||
return
|
||||
g.tickTime += dt
|
||||
|
||||
# Update the wanted direction, making sure we can't go backward
|
||||
@@ -98,7 +100,8 @@ proc tick(g: Game, p: Player) =
|
||||
# Move player and check if he lost
|
||||
p.x += directions[p.currentDir][1]
|
||||
p.y += directions[p.currentDir][2]
|
||||
if g.gameMap[p.y * mapSize + p.x] != 0: p.lost = true
|
||||
if g.gameMap[p.y * mapSize + p.x] != 0:
|
||||
p.lost = true
|
||||
g.gameMap[p.y * mapSize + p.x] = p.color
|
||||
|
||||
proc mainLoop(g: Game, peer: Connection) {.async.} =
|
||||
@@ -123,16 +126,23 @@ proc draw(g: Game) =
|
||||
for pos, color in g.gameMap:
|
||||
setColor(color)
|
||||
boxFill(pos mod 32 * 4, pos div 32 * 4, 4, 4)
|
||||
let text = if not(g.peerFound.finished()): "Matchmaking.."
|
||||
elif g.tickTime < -1.5: "Welcome to Etron"
|
||||
elif g.tickTime < 0.0: "- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
|
||||
elif g.remotePlayer.lost and g.localPlayer.lost: "DEUCE"
|
||||
elif g.localPlayer.lost: "YOU LOOSE"
|
||||
elif g.remotePlayer.lost: "YOU WON"
|
||||
else: ""
|
||||
let text =
|
||||
if not (g.peerFound.finished()):
|
||||
"Matchmaking.."
|
||||
elif g.tickTime < -1.5:
|
||||
"Welcome to Etron"
|
||||
elif g.tickTime < 0.0:
|
||||
"- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
|
||||
elif g.remotePlayer.lost and g.localPlayer.lost:
|
||||
"DEUCE"
|
||||
elif g.localPlayer.lost:
|
||||
"YOU LOOSE"
|
||||
elif g.remotePlayer.lost:
|
||||
"YOU WON"
|
||||
else:
|
||||
""
|
||||
printc(text, screenWidth div 2, screenHeight div 2)
|
||||
|
||||
|
||||
## ## Matchmaking
|
||||
## To find an opponent, we will broadcast our address on a
|
||||
## GossipSub topic, and wait for someone to connect to us.
|
||||
@@ -143,20 +153,27 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
defer: await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
return
|
||||
g.hasCandidate = true
|
||||
await conn.writeLp("ok")
|
||||
if "ok" != string.fromBytes(await conn.readLp(1024)):
|
||||
g.hasCandidate = false
|
||||
return
|
||||
g.peerFound.complete(conn)
|
||||
# The handler of a protocol must wait for the stream to
|
||||
# be finished before returning
|
||||
await conn.join()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
await conn.closeWithEof()
|
||||
try:
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
return
|
||||
g.hasCandidate = true
|
||||
await conn.writeLp("ok")
|
||||
if "ok" != string.fromBytes(await conn.readLp(1024)):
|
||||
g.hasCandidate = false
|
||||
return
|
||||
g.peerFound.complete(conn)
|
||||
# The handler of a protocol must wait for the stream to
|
||||
# be finished before returning
|
||||
await conn.join()
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
|
||||
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
|
||||
|
||||
proc networking(g: Game) {.async.} =
|
||||
@@ -164,9 +181,10 @@ proc networking(g: Game) {.async.} =
|
||||
# the Discovery examples combined
|
||||
let
|
||||
rdv = RendezVous.new()
|
||||
switch = SwitchBuilder.new()
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
@@ -174,9 +192,7 @@ proc networking(g: Game) {.async.} =
|
||||
.build()
|
||||
dm = DiscoveryManager()
|
||||
gameProto = GameProto.new(g)
|
||||
gossip = GossipSub.init(
|
||||
switch = switch,
|
||||
triggerSelf = false)
|
||||
gossip = GossipSub.init(switch = switch, triggerSelf = false)
|
||||
dm.add(RendezVousInterface.new(rdv))
|
||||
|
||||
switch.mount(gossip)
|
||||
@@ -184,10 +200,11 @@ proc networking(g: Game) {.async.} =
|
||||
|
||||
gossip.subscribe(
|
||||
"/tron/matchmaking",
|
||||
proc (topic: string, data: seq[byte]) {.async.} =
|
||||
proc(topic: string, data: seq[byte]) {.async.} =
|
||||
# If we are still looking for an opponent,
|
||||
# try to match anyone broadcasting it's address
|
||||
if g.peerFound.finished or g.hasCandidate: return
|
||||
# try to match anyone broadcasting its address
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
return
|
||||
g.hasCandidate = true
|
||||
|
||||
try:
|
||||
@@ -203,11 +220,12 @@ proc networking(g: Game) {.async.} =
|
||||
# We are "player 2"
|
||||
swap(g.localPlayer, g.remotePlayer)
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
discard,
|
||||
)
|
||||
|
||||
await switch.start()
|
||||
defer: await switch.stop()
|
||||
defer:
|
||||
await switch.stop()
|
||||
|
||||
# As explained in the last tutorial, we need a bootnode to be able
|
||||
# to find peers. We could use any libp2p running rendezvous (or any
|
||||
@@ -243,7 +261,8 @@ proc networking(g: Game) {.async.} =
|
||||
|
||||
# We now wait for someone to connect to us (or for us to connect to someone)
|
||||
let peerConn = await g.peerFound
|
||||
defer: await peerConn.closeWithEof()
|
||||
defer:
|
||||
await peerConn.closeWithEof()
|
||||
|
||||
await g.mainLoop(peerConn)
|
||||
|
||||
@@ -252,7 +271,14 @@ let
|
||||
netFut = networking(game)
|
||||
nico.init("Status", "Tron")
|
||||
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
|
||||
nico.run(proc = discard, proc(dt: float32) = game.update(dt), proc = game.draw())
|
||||
nico.run(
|
||||
proc() =
|
||||
discard,
|
||||
proc(dt: float32) =
|
||||
game.update(dt),
|
||||
proc() =
|
||||
game.draw(),
|
||||
)
|
||||
waitFor(netFut.cancelAndWait())
|
||||
|
||||
## And that's it! If you want to run this code locally, the simplest way is to use the
|
||||
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1752620740,
|
||||
"narHash": "sha256-f3pO+9lg66mV7IMmmIqG4PL3223TYMlnlw+pnpelbss=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "32a4e87942101f1c9f9865e04dc3ddb175f5f32e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
34
flake.nix
Normal file
34
flake.nix
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
description = "nim-libp2p dev shell flake";
|
||||
|
||||
nixConfig = {
|
||||
extra-substituters = [ "https://nix-cache.status.im/" ];
|
||||
extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
|
||||
};
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
let
|
||||
stableSystems = [
|
||||
"x86_64-linux" "aarch64-linux" "armv7a-linux"
|
||||
"x86_64-darwin" "aarch64-darwin"
|
||||
"x86_64-windows"
|
||||
];
|
||||
forEach = nixpkgs.lib.genAttrs;
|
||||
forAllSystems = forEach stableSystems;
|
||||
pkgsFor = forEach stableSystems (
|
||||
system: import nixpkgs { inherit system; }
|
||||
);
|
||||
in rec {
|
||||
devShells = forAllSystems (system: {
|
||||
default = pkgsFor.${system}.mkShell {
|
||||
nativeBuildInputs = with pkgsFor.${system}; [
|
||||
nim-2_2 nimble openssl.dev
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
5
funding.json
Normal file
5
funding.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"opRetro": {
|
||||
"projectId": "0xc9561ba3e4eca5483b40f8b1a254a73c91fefe4f8aee32dc20c0d96dcf33fe80"
|
||||
}
|
||||
}
|
||||
19
interop/hole-punching/Dockerfile
Normal file
19
interop/hole-punching/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:latest as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs2 --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bullseye-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
|
||||
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||
ENV RUST_BACKTRACE=1
|
||||
138
interop/hole-punching/hole_punching.nim
Normal file
138
interop/hole-punching/hole_punching.nim
Normal file
@@ -0,0 +1,138 @@
|
||||
import std/[os, options, strformat, sequtils]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
switch,
|
||||
multicodec,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping,
|
||||
]
|
||||
import ../../tests/[stubs/autonatclientstub, errorhelpers]
|
||||
|
||||
logScope:
|
||||
topics = "hp interop node"
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng = rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "All relay addresses", relayAddr
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
|
||||
try:
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
7
interop/hole-punching/version.json
Normal file
7
interop/hole-punching/version.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp"
|
||||
]
|
||||
}
|
||||
18
interop/transport/Dockerfile
Normal file
18
interop/transport/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:latest as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN \
|
||||
cd nim-libp2p && \
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs2 -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
|
||||
|
||||
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]
|
||||
113
interop/transport/main.nim
Normal file
113
interop/transport/main.nim
Normal file
@@ -0,0 +1,113 @@
|
||||
import std/[os, strutils, sequtils], chronos, redis, serialization, json_serialization
|
||||
import ../../libp2p/[builders, protocols/ping, transports/wstransport]
|
||||
|
||||
type ResultJson = object
|
||||
handshakePlusOneRTTMillis: float
|
||||
pingRTTMilllis: float
|
||||
|
||||
let testTimeout =
|
||||
try:
|
||||
seconds(parseInt(getEnv("test_timeout_seconds")))
|
||||
except CatchableError:
|
||||
3.minutes
|
||||
|
||||
proc main() {.async.} =
|
||||
let
|
||||
transport = getEnv("transport")
|
||||
muxer = getEnv("muxer")
|
||||
secureChannel = getEnv("security")
|
||||
isDialer = getEnv("is_dialer") == "true"
|
||||
envIp = getEnv("ip", "0.0.0.0")
|
||||
ip =
|
||||
# nim-libp2p doesn't do snazzy ip expansion
|
||||
if envIp == "0.0.0.0":
|
||||
block:
|
||||
let addresses =
|
||||
getInterfaces().filterIt(it.name == "eth0").mapIt(it.addresses)
|
||||
if addresses.len < 1 or addresses[0].len < 1:
|
||||
quit "Can't find local ip!"
|
||||
($addresses[0][0].host).split(":")[0]
|
||||
else:
|
||||
envIp
|
||||
redisAddr = getEnv("redis_addr", "redis:6379").split(":")
|
||||
|
||||
# using synchronous redis because async redis is based on
|
||||
# asyncdispatch instead of chronos
|
||||
redisClient = open(redisAddr[0], Port(parseInt(redisAddr[1])))
|
||||
|
||||
switchBuilder = SwitchBuilder.new()
|
||||
|
||||
case transport
|
||||
of "tcp":
|
||||
discard switchBuilder.withTcpTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
|
||||
)
|
||||
of "quic-v1":
|
||||
discard switchBuilder.withQuicTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder.withWsTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
|
||||
)
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
case secureChannel
|
||||
of "noise":
|
||||
discard switchBuilder.withNoise()
|
||||
|
||||
case muxer
|
||||
of "yamux":
|
||||
discard switchBuilder.withYamux()
|
||||
of "mplex":
|
||||
discard switchBuilder.withMplex()
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
switch = switchBuilder.withRng(rng).build()
|
||||
pingProtocol = Ping.new(rng = rng)
|
||||
switch.mount(pingProtocol)
|
||||
await switch.start()
|
||||
defer:
|
||||
await switch.stop()
|
||||
|
||||
if not isDialer:
|
||||
discard redisClient.rPush("listenerAddr", $switch.peerInfo.fullAddrs.tryGet()[0])
|
||||
await sleepAsync(100.hours) # will get cancelled
|
||||
else:
|
||||
let listenerAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
|
||||
let
|
||||
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
|
||||
dialingStart = Moment.now()
|
||||
remotePeerId = await switch.connect(remoteAddr)
|
||||
stream = await switch.dial(remotePeerId, PingCodec)
|
||||
pingDelay = await pingProtocol.ping(stream)
|
||||
totalDelay = Moment.now() - dialingStart
|
||||
await stream.close()
|
||||
|
||||
echo Json.encode(
|
||||
ResultJson(
|
||||
handshakePlusOneRTTMillis: float(totalDelay.milliseconds),
|
||||
pingRTTMilllis: float(pingDelay.milliseconds),
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(testTimeout))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
16
interop/transport/version.json
Normal file
16
interop/transport/version.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws",
|
||||
"quic-v1"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
61
libp2p.nim
61
libp2p.nim
@@ -17,11 +17,12 @@ when defined(nimdoc):
|
||||
## stay backward compatible during the Major version, whereas private ones can
|
||||
## change at each new Minor version.
|
||||
##
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## that can help you get started.
|
||||
|
||||
# Import stuff for doc
|
||||
import libp2p/[
|
||||
import
|
||||
libp2p/[
|
||||
protobuf/minprotobuf,
|
||||
switch,
|
||||
stream/lpstream,
|
||||
@@ -33,37 +34,43 @@ when defined(nimdoc):
|
||||
peerid,
|
||||
peerinfo,
|
||||
peerstore,
|
||||
multiaddress]
|
||||
multiaddress,
|
||||
]
|
||||
|
||||
proc dummyPrivateProc*() =
|
||||
## A private proc example
|
||||
discard
|
||||
|
||||
else:
|
||||
import
|
||||
libp2p/[protobuf/minprotobuf,
|
||||
muxers/muxer,
|
||||
muxers/mplex/mplex,
|
||||
stream/lpstream,
|
||||
stream/bufferstream,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
multicodec,
|
||||
errors,
|
||||
switch,
|
||||
peerid,
|
||||
peerinfo,
|
||||
multiaddress,
|
||||
builders,
|
||||
crypto/crypto,
|
||||
protocols/pubsub]
|
||||
libp2p/[
|
||||
protobuf/minprotobuf,
|
||||
muxers/muxer,
|
||||
muxers/mplex/mplex,
|
||||
stream/lpstream,
|
||||
stream/bufferstream,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
multicodec,
|
||||
errors,
|
||||
switch,
|
||||
peerid,
|
||||
peerinfo,
|
||||
multiaddress,
|
||||
builders,
|
||||
crypto/crypto,
|
||||
protocols/pubsub,
|
||||
]
|
||||
|
||||
export
|
||||
minprotobuf, switch, peerid, peerinfo,
|
||||
connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, muxer, mplex, transport,
|
||||
tcptransport, noise, errors, cid, multihash,
|
||||
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
|
||||
multicodec, builders, pubsub
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import libp2p/transports/quictransport
|
||||
export quictransport
|
||||
|
||||
114
libp2p.nimble
114
libp2p.nimble
@@ -1,23 +1,17 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.1.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
packageName = "libp2p"
|
||||
version = "1.12.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.4.1",
|
||||
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
"chronicles >= 0.10.2",
|
||||
"chronos >= 3.0.6",
|
||||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & < 0.1.0"
|
||||
requires "nim >= 2.0.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.15",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -26,21 +20,17 @@ let verbose = getEnv("V", "") notin ["", "0"]
|
||||
|
||||
let cfg =
|
||||
" --styleCheck:usages --styleCheck:error" &
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") &
|
||||
" --skipParentCfg --skipUserCfg -f" &
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") & " --skipUserCfg -f" &
|
||||
" --threads:on --opt:speed"
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
proc runTest(filename: string, moreoptions: string = "") =
|
||||
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
exec excstr & " -r " & " tests/" & filename
|
||||
exec excstr & " -r -d:libp2p_quic_support -d:libp2p_autotls_support tests/" & filename
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
@@ -52,7 +42,8 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
rmFile "examples/" & filename.toExe
|
||||
|
||||
proc tutorialToMd(filename: string) =
|
||||
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang &
|
||||
" -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
writeFile(filename.replace(".nim", ".md"), markdown)
|
||||
|
||||
task testnative, "Runs libp2p native tests":
|
||||
@@ -65,38 +56,18 @@ task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
|
||||
runTest("pubsub/testpubsub")
|
||||
runTest("pubsub/testpubsub", sign = false, verify = false)
|
||||
runTest("pubsub/testpubsub", sign = false, verify = false, moreoptions = "-d:libp2p_pubsub_anonymize=true")
|
||||
|
||||
task testpubsub_slim, "Runs pubsub tests":
|
||||
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
|
||||
runTest("pubsub/testpubsub")
|
||||
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")
|
||||
|
||||
task testfilter, "Run PKI filter test":
|
||||
runTest("testpkifilter",
|
||||
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
|
||||
runTest("testpkifilter",
|
||||
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
|
||||
runTest("testpkifilter",
|
||||
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\"")
|
||||
runTest("testpkifilter",
|
||||
moreoptions = "-d:libp2p_pki_schemes=")
|
||||
runTest("testpkifilter")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
|
||||
|
||||
task testintegration, "Runs integraion tests":
|
||||
runTest("testintegration")
|
||||
|
||||
task test, "Runs the test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub"
|
||||
exec "nimble testdaemon"
|
||||
exec "nimble testinterop"
|
||||
runTest("testall")
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
|
||||
task test_slim, "Runs the (slimmed down) test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub_slim"
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
|
||||
task website, "Build the website":
|
||||
tutorialToMd("examples/tutorial_1_connect.nim")
|
||||
@@ -108,18 +79,12 @@ task website, "Build the website":
|
||||
tutorialToMd("examples/circuitrelay.nim")
|
||||
exec "mkdocs build"
|
||||
|
||||
task examples_build, "Build the samples":
|
||||
buildSample("directchat")
|
||||
buildSample("helloworld", true)
|
||||
buildSample("circuitrelay", true)
|
||||
buildSample("tutorial_1_connect", true)
|
||||
buildSample("tutorial_2_customproto", true)
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
task examples, "Build and run examples":
|
||||
exec "nimble install -y nimpng"
|
||||
exec "nimble install -y nico --passNim=--skipParentCfg"
|
||||
buildSample("examples_build", false, "--styleCheck:off") # build only
|
||||
|
||||
buildSample("examples_run", true)
|
||||
|
||||
# pin system
|
||||
# while nimble lockfile
|
||||
@@ -135,7 +100,9 @@ task pin, "Create a lockfile":
|
||||
import sequtils
|
||||
import os
|
||||
task install_pinned, "Reads the lockfile":
|
||||
let toInstall = readFile(PinFile).splitWhitespace().mapIt((it.split(";", 1)[0], it.split(";", 1)[1]))
|
||||
let toInstall = readFile(PinFile).splitWhitespace().mapIt(
|
||||
(it.split(";", 1)[0], it.split(";", 1)[1])
|
||||
)
|
||||
# [('packageName', 'packageFullUri')]
|
||||
|
||||
rmDir("nimbledeps")
|
||||
@@ -145,8 +112,7 @@ task install_pinned, "Reads the lockfile":
|
||||
# Remove the automatically installed deps
|
||||
# (inefficient you say?)
|
||||
let nimblePkgs =
|
||||
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
|
||||
else: "nimbledeps/pkgs2"
|
||||
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs" else: "nimbledeps/pkgs2"
|
||||
for dependency in listDirs(nimblePkgs):
|
||||
let
|
||||
fileName = dependency.extractFilename
|
||||
@@ -154,14 +120,12 @@ task install_pinned, "Reads the lockfile":
|
||||
packageName = fileName.split('-')[0]
|
||||
|
||||
if toInstall.anyIt(
|
||||
it[0] == packageName and
|
||||
(
|
||||
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
|
||||
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
|
||||
)
|
||||
) == false or
|
||||
fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
|
||||
rmDir(dependency)
|
||||
it[0] == packageName and (
|
||||
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
|
||||
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
|
||||
)
|
||||
) == false or fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
|
||||
rmDir(dependency)
|
||||
|
||||
task unpin, "Restore global package use":
|
||||
rmDir("nimbledeps")
|
||||
|
||||
533
libp2p/autotls/acme/api.nim
Normal file
533
libp2p/autotls/acme/api.nim
Normal file
@@ -0,0 +1,533 @@
|
||||
import json, uri
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, results, chronicles
|
||||
|
||||
import ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export ACMEError
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme api"
|
||||
|
||||
const
|
||||
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
|
||||
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
|
||||
Alg = "RS256"
|
||||
DefaultChalCompletedRetries = 10
|
||||
DefaultChalCompletedRetryTime = 1.seconds
|
||||
DefaultFinalizeRetries = 10
|
||||
DefaultFinalizeRetryTime = 1.seconds
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Authorization* = string
|
||||
type Domain* = string
|
||||
type Kid* = string
|
||||
type Nonce* = string
|
||||
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
newOrder*: string
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: Opt[ACMEDirectory]
|
||||
session: HttpSessionRef
|
||||
acmeServerURL*: Uri
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
headers*: HttpTable
|
||||
|
||||
type JWK = object
|
||||
kty: string
|
||||
n: string
|
||||
e: string
|
||||
|
||||
# whether the request uses Kid or not
|
||||
type ACMERequestType = enum
|
||||
ACMEJwkRequest
|
||||
ACMEKidRequest
|
||||
|
||||
type ACMERequestHeader = object
|
||||
alg: string
|
||||
typ: string
|
||||
nonce: Nonce
|
||||
url: string
|
||||
case kind: ACMERequestType
|
||||
of ACMEJwkRequest:
|
||||
jwk: JWK
|
||||
of ACMEKidRequest:
|
||||
kid: Kid
|
||||
|
||||
type Email = string
|
||||
|
||||
type ACMERegisterRequest* = object
|
||||
termsOfServiceAgreed: bool
|
||||
contact: seq[Email]
|
||||
|
||||
type ACMEAccountStatus = enum
|
||||
valid = "valid"
|
||||
deactivated = "deactivated"
|
||||
revoked = "revoked"
|
||||
|
||||
type ACMERegisterResponseBody = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMERegisterResponse* = object
|
||||
kid*: Kid
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
PROCESSING = "processing"
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
type ACMEChallengeType* {.pure.} = enum
|
||||
DNS01 = "dns-01"
|
||||
HTTP01 = "http-01"
|
||||
TLSALPN01 = "tls-alpn-01"
|
||||
|
||||
type ACMEChallengeToken* = string
|
||||
|
||||
type ACMEChallenge* = object
|
||||
url*: string
|
||||
`type`*: ACMEChallengeType
|
||||
status*: ACMEChallengeStatus
|
||||
token*: ACMEChallengeToken
|
||||
|
||||
type ACMEChallengeIdentifier = object
|
||||
`type`: string
|
||||
value: string
|
||||
|
||||
type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEOrderStatus
|
||||
authorizations: seq[Authorization]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEOrderStatus
|
||||
authorizations*: seq[Authorization]
|
||||
finalize*: string
|
||||
order*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalize*: string
|
||||
order*: string
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
url: string
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
ACMEChallengeCheck
|
||||
|
||||
type ACMECheckResponse* = object
|
||||
case kind: ACMECheckKind
|
||||
of ACMEOrderCheck:
|
||||
orderStatus: ACMEOrderStatus
|
||||
of ACMEChallengeCheck:
|
||||
chalStatus: ACMEChallengeStatus
|
||||
retryAfter: Duration
|
||||
|
||||
type ACMEFinalizeResponse* = object
|
||||
status: ACMEOrderStatus
|
||||
|
||||
type ACMEOrderResponse* = object
|
||||
certificate: string
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
|
||||
type ACMECertificate* = object
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
certKeyPair*: KeyPair
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import options, sequtils, strutils, jwt, bearssl/pem
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except ACMEError as exc:
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except JsonKindError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except HttpError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
|
||||
except CatchableError as exc:
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
|
||||
): ACMEApi =
|
||||
let session = HttpSessionRef.new()
|
||||
|
||||
ACMEApi(
|
||||
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
|
||||
)
|
||||
|
||||
proc getDirectory(
|
||||
self: ACMEApi
|
||||
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("getDirectory"):
|
||||
self.directory.valueOr:
|
||||
let acmeResponse = await self.get(self.acmeServerURL / "directory")
|
||||
let directory = acmeResponse.body.to(ACMEDirectory)
|
||||
self.directory = Opt.some(directory)
|
||||
directory
|
||||
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone():
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let newNonce = await self.requestNonce()
|
||||
if needsJwk:
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
ACMERequestHeader(
|
||||
kind: ACMEJwkRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: $uri,
|
||||
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
|
||||
)
|
||||
else:
|
||||
ACMERequestHeader(
|
||||
kind: ACMEKidRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: $uri,
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
uri: Uri,
|
||||
payload: auto,
|
||||
key: KeyPair,
|
||||
needsJwk: bool = false,
|
||||
kid: Opt[Kid] = Opt.none(Kid),
|
||||
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
|
||||
handleError("createSignedAcmeRequest"):
|
||||
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
|
||||
let derPrivKey = key.seckey.rsakey.getBytes.get
|
||||
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
|
||||
token.sign(pemPrivKey)
|
||||
$token.toFlattenedJson()
|
||||
|
||||
proc requestRegister*(
|
||||
self: ACMEApi, key: KeyPair
|
||||
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
|
||||
handleError("acmeRegister"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
parseUri((await self.getDirectory()).newAccount),
|
||||
registerRequest,
|
||||
key,
|
||||
needsJwk = true,
|
||||
)
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
status: acmeResponseBody.status,
|
||||
kid: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
let orderRequest = ACMEChallengeRequest(
|
||||
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
|
||||
)
|
||||
handleError("requestNewOrder"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
parseUri((await self.getDirectory()).newOrder),
|
||||
orderRequest,
|
||||
key,
|
||||
kid = Opt.some(kid),
|
||||
)
|
||||
let acmeResponse =
|
||||
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
order: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let acmeResponse = await self.get(parseUri(authorizations[0]))
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.
|
||||
async: (raises: [ACMEError, CancelledError])
|
||||
.} =
|
||||
let orderResponse = await self.requestNewOrder(domains, key, kid)
|
||||
if orderResponse.status != ACMEOrderStatus.PENDING and
|
||||
orderResponse.status != ACMEOrderStatus.READY:
|
||||
# ready is a valid status when renewing certs before expiry
|
||||
raise
|
||||
newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
|
||||
if authorizationsResponse.challenges.len == 0:
|
||||
raise newException(ACMEError, "No challenges received")
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalize: orderResponse.finalize,
|
||||
order: orderResponse.order,
|
||||
dns01: authorizationsResponse.challenges.filterIt(
|
||||
it.`type` == ACMEChallengeType.DNS01
|
||||
)[0],
|
||||
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
|
||||
)
|
||||
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
let retryAfter =
|
||||
try:
|
||||
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
|
||||
except ValueError:
|
||||
DefaultChalCompletedRetryTime
|
||||
|
||||
case checkKind
|
||||
of ACMEOrderCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
of ACMEChallengeCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
chalStatus:
|
||||
parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc sendChallengeCompleted*(
|
||||
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("sendChallengeCompleted"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
acmeResponse.body.to(ACMECompletedResponse)
|
||||
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
checkURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse =
|
||||
await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.PENDING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.VALID:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed challenge completion: expected 'valid', got '" &
|
||||
$checkResponse.chalStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi,
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
certKeyPair: KeyPair,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalize, %*{"csr": createCSR(domain, certKeyPair)}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalize, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
order: Uri,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.VALID:
|
||||
return true
|
||||
of ACMEOrderStatus.PROCESSING:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
error "Failed certificate finalization",
|
||||
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
|
||||
return false # do not try again
|
||||
|
||||
return false
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
order: Uri,
|
||||
certKeyPair: KeyPair,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse =
|
||||
await self.requestFinalize(domain, finalize, certKeyPair, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(order, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let acmeResponse = await self.get(order)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, order: Uri
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let orderResponse = await self.requestGetOrder(order)
|
||||
|
||||
handleError("downloadCertificate"):
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.get(self.session, orderResponse.certificate)
|
||||
.get()
|
||||
.send()
|
||||
ACMECertificateResponse(
|
||||
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
else:
|
||||
{.hint: "autotls disabled. Use -d:libp2p_autotls_support".}
|
||||
98
libp2p/autotls/acme/client.nim
Normal file
98
libp2p/autotls/acme/client.nim
Normal file
@@ -0,0 +1,98 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronicles
|
||||
import ../../crypto/crypto
|
||||
import ./api
|
||||
|
||||
export api
|
||||
|
||||
type KeyAuthorization* = string
|
||||
|
||||
type ACMEClient* = ref object
|
||||
api: ACMEApi
|
||||
key*: KeyPair
|
||||
kid*: Kid
|
||||
|
||||
logScope:
|
||||
topics = "libp2p acme client"
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import uri
|
||||
import chronos, results, stew/byteutils
|
||||
import ../../crypto/rsa
|
||||
import ./utils
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEClient],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
|
||||
key: Opt[KeyPair] = Opt.none(KeyPair),
|
||||
kid: Kid = Kid(""),
|
||||
): T {.raises: [].} =
|
||||
let key = key.valueOr:
|
||||
KeyPair.random(PKScheme.RSA, rng[]).get()
|
||||
T(api: api, key: key, kid: kid)
|
||||
|
||||
proc getOrInitKid*(
|
||||
self: ACMEClient
|
||||
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if self.kid.len == 0:
|
||||
let registerResponse = await self.api.requestRegister(self.key)
|
||||
self.kid = registerResponse.kid
|
||||
return self.kid
|
||||
|
||||
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
|
||||
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
|
||||
|
||||
proc getChallenge*(
|
||||
self: ACMEClient, domains: seq[api.Domain]
|
||||
): Future[ACMEChallengeResponseWrapper] {.
|
||||
async: (raises: [ACMEError, CancelledError])
|
||||
.} =
|
||||
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
|
||||
|
||||
proc getCertificate*(
|
||||
self: ACMEClient,
|
||||
domain: api.Domain,
|
||||
certKeyPair: KeyPair,
|
||||
challenge: ACMEChallengeResponseWrapper,
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let chalURL = parseUri(challenge.dns01.url)
|
||||
let orderURL = parseUri(challenge.order)
|
||||
let finalizeURL = parseUri(challenge.finalize)
|
||||
trace "Sending challenge completed notification"
|
||||
discard await self.api.sendChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
|
||||
trace "Checking for completed challenge"
|
||||
let completed = await self.api.checkChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not completed:
|
||||
raise newException(
|
||||
ACMEError, "Failed to signal ACME server about challenge completion"
|
||||
)
|
||||
|
||||
trace "Waiting for certificate to be finalized"
|
||||
let finalized = await self.api.certificateFinalized(
|
||||
domain, finalizeURL, orderURL, certKeyPair, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not finalized:
|
||||
raise
|
||||
newException(ACMEError, "Failed to finalize certificate for domain " & domain)
|
||||
|
||||
trace "Downloading certificate"
|
||||
await self.api.downloadCertificate(orderURL)
|
||||
|
||||
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
|
||||
await self.api.close()
|
||||
40
libp2p/autotls/acme/mockapi.nim
Normal file
40
libp2p/autotls/acme/mockapi.nim
Normal file
@@ -0,0 +1,40 @@
|
||||
import uri
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
mockedResponses*: seq[HTTPResponse]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(),
|
||||
directory: Opt.some(directory),
|
||||
acmeServerURL: parseUri(LetsEncryptURL),
|
||||
)
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return $self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
73
libp2p/autotls/acme/utils.nim
Normal file
73
libp2p/autotls/acme/utils.nim
Normal file
@@ -0,0 +1,73 @@
|
||||
import ../../errors
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
table.getString(key)
|
||||
|
||||
proc base64UrlEncode*(data: seq[byte]): string =
|
||||
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
|
||||
var encoded = base64.encode(data, safe = true)
|
||||
encoded.removeSuffix("=")
|
||||
encoded.removeSuffix("=")
|
||||
return encoded
|
||||
|
||||
proc thumbprint*(key: KeyPair): string =
|
||||
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
|
||||
let n = base64UrlEncode(nArray)
|
||||
let e = base64UrlEncode(eArray)
|
||||
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
|
||||
let digest = sha256.digest($keyJson)
|
||||
return base64UrlEncode(@(digest.data))
|
||||
|
||||
proc getResponseBody*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let bodyBytes = await response.getBodyBytes()
|
||||
if bodyBytes.len > 0:
|
||||
return bytesToString(bodyBytes).parseJson()
|
||||
return %*{} # empty body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(
|
||||
ACMEError, "Unexpected error occurred while getting body bytes", exc
|
||||
)
|
||||
except Exception as exc: # this is required for nim 1.6
|
||||
raise newException(
|
||||
ACMEError, "Unexpected error occurred while getting body bytes", exc
|
||||
)
|
||||
|
||||
proc createCSR*(
|
||||
domain: string, certKeyPair: KeyPair
|
||||
): string {.raises: [ACMEError].} =
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
# convert KeyPair to cert_key_t
|
||||
let rawSeckey: seq[byte] = certKeyPair.seckey.getRawBytes.valueOr:
|
||||
raise newException(ACMEError, "Failed to get seckey raw bytes (DER)")
|
||||
let seckeyBuffer = rawSeckey.toCertBuffer()
|
||||
if cert_new_key_t(seckeyBuffer.unsafeAddr, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to convert key pair to cert_key_t")
|
||||
|
||||
# create CSR
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
|
||||
base64.encode(derCSR.toSeq, safe = true)
|
||||
33
libp2p/autotls/mockservice.nim
Normal file
33
libp2p/autotls/mockservice.nim
Normal file
@@ -0,0 +1,33 @@
|
||||
when defined(libp2p_autotls_support):
|
||||
import ./service, ./acme/client, ../peeridauth/client
|
||||
|
||||
import ../crypto/crypto, ../crypto/rsa, websock/websock
|
||||
|
||||
type MockAutotlsService* = ref object of AutotlsService
|
||||
mockedCert*: TLSCertificate
|
||||
mockedKey*: TLSPrivateKey
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockAutotlsService],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
config: AutotlsConfig = AutotlsConfig.new(),
|
||||
): T =
|
||||
T(
|
||||
acmeClient:
|
||||
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
|
||||
brokerClient: PeerIDAuthClient.new(),
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
running: newAsyncEvent(),
|
||||
config: config,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
method getCertWhenReady*(
|
||||
self: MockAutotlsService
|
||||
): Future[AutotlsCert] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
AutotlsCert.new(self.mockedCert, self.mockedKey, Moment.now)
|
||||
|
||||
method setup*(self: MockAutotlsService) {.base, async.} =
|
||||
self.running.fire()
|
||||
295
libp2p/autotls/service.nim
Normal file
295
libp2p/autotls/service.nim
Normal file
@@ -0,0 +1,295 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import chronos, chronicles, net, results
|
||||
import chronos/apps/http/httpclient, bearssl/rand
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
./utils,
|
||||
../crypto/crypto,
|
||||
../nameresolving/nameresolver,
|
||||
../peeridauth/client,
|
||||
../switch,
|
||||
../peerinfo,
|
||||
../wire
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autotls"
|
||||
|
||||
export LetsEncryptURL, AutoTLSError
|
||||
|
||||
const
|
||||
DefaultDnsServers* =
|
||||
@[
|
||||
initTAddress("1.1.1.1:53"),
|
||||
initTAddress("1.0.0.1:53"),
|
||||
initTAddress("[2606:4700:4700::1111]:53"),
|
||||
]
|
||||
DefaultRenewCheckTime* = 1.hours
|
||||
DefaultRenewBufferTime = 1.hours
|
||||
|
||||
DefaultIssueRetries = 3
|
||||
DefaultIssueRetryTime = 1.seconds
|
||||
|
||||
AutoTLSBroker* = "registration.libp2p.direct"
|
||||
AutoTLSDNSServer* = "libp2p.direct"
|
||||
HttpOk* = 200
|
||||
HttpCreated* = 201
|
||||
# NoneIp is needed because nim 1.6.16 can't do proper generic inference
|
||||
NoneIp = Opt.none(IpAddress)
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
type AutotlsCert* = ref object
|
||||
cert*: TLSCertificate
|
||||
privkey*: TLSPrivateKey
|
||||
expiry*: Moment
|
||||
|
||||
type AutotlsConfig* = ref object
|
||||
acmeServerURL*: Uri
|
||||
nameResolver*: NameResolver
|
||||
ipAddress: Opt[IpAddress]
|
||||
renewCheckTime*: Duration
|
||||
renewBufferTime*: Duration
|
||||
issueRetries*: int
|
||||
issueRetryTime*: Duration
|
||||
|
||||
type AutotlsService* = ref object of Service
|
||||
acmeClient*: ACMEClient
|
||||
brokerClient*: PeerIDAuthClient
|
||||
bearer*: Opt[BearerToken]
|
||||
cert*: Opt[AutotlsCert]
|
||||
certReady*: AsyncEvent
|
||||
running*: AsyncEvent
|
||||
config*: AutotlsConfig
|
||||
managerFut: Future[void]
|
||||
peerInfo: PeerInfo
|
||||
rng*: ref HmacDrbgContext
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import json, sequtils, bearssl/pem
|
||||
|
||||
import
|
||||
../crypto/rsa,
|
||||
../utils/heartbeat,
|
||||
../transports/transport,
|
||||
../utils/ipaddr,
|
||||
../transports/tcptransport,
|
||||
../nameresolving/dnsresolver
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsCert],
|
||||
cert: TLSCertificate,
|
||||
privkey: TLSPrivateKey,
|
||||
expiry: Moment,
|
||||
): T =
|
||||
T(cert: cert, privkey: privkey, expiry: expiry)
|
||||
|
||||
method getCertWhenReady*(
|
||||
self: AutotlsService
|
||||
): Future[AutotlsCert] {.base, async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
await self.certReady.wait()
|
||||
return self.cert.get
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsConfig],
|
||||
ipAddress: Opt[IpAddress] = NoneIp,
|
||||
nameServers: seq[TransportAddress] = DefaultDnsServers,
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
renewCheckTime: Duration = DefaultRenewCheckTime,
|
||||
renewBufferTime: Duration = DefaultRenewBufferTime,
|
||||
issueRetries: int = DefaultIssueRetries,
|
||||
issueRetryTime: Duration = DefaultIssueRetryTime,
|
||||
): T =
|
||||
T(
|
||||
nameResolver: DnsResolver.new(nameServers),
|
||||
acmeServerURL: acmeServerURL,
|
||||
ipAddress: ipAddress,
|
||||
renewCheckTime: renewCheckTime,
|
||||
renewBufferTime: renewBufferTime,
|
||||
issueRetries: issueRetries,
|
||||
issueRetryTime: issueRetryTime,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsService],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
config: AutotlsConfig = AutotlsConfig.new(),
|
||||
): T =
|
||||
T(
|
||||
acmeClient:
|
||||
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
|
||||
brokerClient: PeerIDAuthClient.new(),
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
running: newAsyncEvent(),
|
||||
config: config,
|
||||
managerFut: nil,
|
||||
peerInfo: nil,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
method setup*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
trace "Setting up AutotlsService"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetup:
|
||||
if self.config.ipAddress.isNone():
|
||||
try:
|
||||
self.config.ipAddress = Opt.some(getPublicIPAddress())
|
||||
except ValueError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
except OSError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
self.managerFut = self.run(switch)
|
||||
return hasBeenSetup
|
||||
|
||||
method issueCertificate(
|
||||
self: AutotlsService
|
||||
): Future[bool] {.
|
||||
base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
trace "Issuing certificate"
|
||||
|
||||
if self.peerInfo.isNil():
|
||||
error "Cannot issue new certificate: peerInfo not set"
|
||||
return false
|
||||
|
||||
# generate autotls domain string: "*.{peerID}.libp2p.direct"
|
||||
let baseDomain =
|
||||
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
|
||||
let domain = api.Domain("*." & baseDomain)
|
||||
|
||||
let acmeClient = self.acmeClient
|
||||
|
||||
trace "Requesting ACME challenge"
|
||||
let dns01Challenge = await acmeClient.getChallenge(@[domain])
|
||||
trace "Generating key authorization"
|
||||
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
|
||||
|
||||
let addrs = await self.peerInfo.expandAddrs()
|
||||
if addrs.len == 0:
|
||||
error "Unable to authenticate with broker: no addresses"
|
||||
return false
|
||||
|
||||
let strMultiaddresses: seq[string] = addrs.mapIt($it)
|
||||
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
|
||||
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
|
||||
|
||||
trace "Sending challenge to AutoTLS broker"
|
||||
let (bearer, response) =
|
||||
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
|
||||
if self.bearer.isNone():
|
||||
# save bearer token for future
|
||||
self.bearer = Opt.some(bearer)
|
||||
if response.status != HttpOk:
|
||||
error "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
debug "Broker message",
|
||||
body = bytesToString(response.body), peerinfo = self.peerInfo
|
||||
return false
|
||||
|
||||
let dashedIpAddr = ($self.config.ipAddress.get()).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
debug "Waiting for DNS record to be set", ip = ip4Domain, acme = acmeChalDomain
|
||||
let dnsSet = await checkDNSRecords(
|
||||
self.config.nameResolver, self.config.ipAddress.get(), baseDomain, keyAuth
|
||||
)
|
||||
if not dnsSet:
|
||||
error "DNS records not set"
|
||||
return false
|
||||
|
||||
trace "Notifying challenge completion to ACME and downloading cert"
|
||||
let certKeyPair = KeyPair.random(PKScheme.RSA, self.rng[]).get()
|
||||
|
||||
let certificate =
|
||||
await acmeClient.getCertificate(domain, certKeyPair, dns01Challenge)
|
||||
|
||||
let derPrivKey = certKeyPair.seckey.rsakey.getBytes.valueOr:
|
||||
raise newException(AutoTLSError, "Unable to get TLS private key")
|
||||
let pemPrivKey: string = derPrivKey.pemEncode("PRIVATE KEY")
|
||||
debug "autotls cert", pemPrivKey = pemPrivKey, cert = certificate.rawCertificate
|
||||
|
||||
trace "Installing certificate"
|
||||
let newCert =
|
||||
try:
|
||||
AutotlsCert.new(
|
||||
TLSCertificate.init(certificate.rawCertificate),
|
||||
TLSPrivateKey.init(pemPrivKey),
|
||||
asMoment(certificate.certificateExpiry),
|
||||
)
|
||||
except TLSStreamProtocolError:
|
||||
error "Could not parse downloaded certificates"
|
||||
return false
|
||||
self.cert = Opt.some(newCert)
|
||||
self.certReady.fire()
|
||||
trace "Certificate installed"
|
||||
true
|
||||
|
||||
proc hasTcpStarted(switch: Switch): bool =
|
||||
switch.transports.filterIt(it of TcpTransport and it.running).len == 0
|
||||
|
||||
proc tryIssueCertificate(self: AutotlsService) {.async: (raises: [CancelledError]).} =
|
||||
for _ in 0 ..< self.config.issueRetries:
|
||||
try:
|
||||
if await self.issueCertificate():
|
||||
return
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
await sleepAsync(self.config.issueRetryTime)
|
||||
error "Failed to issue certificate"
|
||||
|
||||
method run*(
|
||||
self: AutotlsService, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting Autotls management"
|
||||
self.running.fire()
|
||||
self.peerInfo = switch.peerInfo
|
||||
|
||||
# ensure that there's at least one TcpTransport running
|
||||
# for communicating with autotls broker
|
||||
if switch.hasTcpStarted():
|
||||
error "Could not find a running TcpTransport in switch"
|
||||
return
|
||||
|
||||
heartbeat "Certificate Management", self.config.renewCheckTime:
|
||||
if self.cert.isNone():
|
||||
await self.tryIssueCertificate()
|
||||
|
||||
# AutotlsService will renew the cert 1h before it expires
|
||||
let cert = self.cert.get
|
||||
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
|
||||
if waitTime <= self.config.renewBufferTime:
|
||||
await self.tryIssueCertificate()
|
||||
|
||||
method stop*(
|
||||
self: AutotlsService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
if not self.acmeClient.isNil():
|
||||
await self.acmeClient.close()
|
||||
if not self.brokerClient.isNil():
|
||||
await self.brokerClient.close()
|
||||
if not self.managerFut.isNil():
|
||||
await self.managerFut.cancelAndWait()
|
||||
self.managerFut = nil
|
||||
return hasBeenStopped
|
||||
82
libp2p/autotls/utils.nim
Normal file
82
libp2p/autotls/utils.nim
Normal file
@@ -0,0 +1,82 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import chronos, chronicles
|
||||
import ../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p utils"
|
||||
|
||||
const
|
||||
DefaultDnsRetries = 3
|
||||
DefaultDnsRetryTime = 1.seconds
|
||||
|
||||
type AutoTLSError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import strutils
|
||||
from times import DateTime, toTime, toUnix
|
||||
import stew/base36
|
||||
import
|
||||
../peerid,
|
||||
../multihash,
|
||||
../cid,
|
||||
../multicodec,
|
||||
../nameresolving/nameresolver,
|
||||
./acme/client
|
||||
|
||||
proc asMoment*(dt: DateTime): Moment =
|
||||
let unixTime: int64 = dt.toTime.toUnix
|
||||
return Moment.init(unixTime, Second)
|
||||
|
||||
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
|
||||
var mh: MultiHash
|
||||
let decodeResult = MultiHash.decode(peerId.data, mh)
|
||||
if decodeResult.isErr() or decodeResult.get() == -1:
|
||||
raise
|
||||
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
|
||||
|
||||
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
|
||||
if cidResult.isErr():
|
||||
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
|
||||
|
||||
return Base36.encode(cidResult.get().data.buffer)
|
||||
|
||||
proc checkDNSRecords*(
|
||||
nameResolver: NameResolver,
|
||||
ipAddress: IpAddress,
|
||||
baseDomain: api.Domain,
|
||||
keyAuth: KeyAuthorization,
|
||||
retries: int = DefaultDnsRetries,
|
||||
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
# if my ip address is 100.10.10.3 then the ip4Domain will be:
|
||||
# 100-10-10-3.{peerIdBase36}.libp2p.direct
|
||||
# and acme challenge TXT domain will be:
|
||||
# _acme-challenge.{peerIdBase36}.libp2p.direct
|
||||
let dashedIpAddr = ($ipAddress).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
|
||||
var txt: seq[string]
|
||||
var ip4: seq[TransportAddress]
|
||||
for _ in 0 .. retries:
|
||||
txt = await nameResolver.resolveTxt(acmeChalDomain)
|
||||
try:
|
||||
ip4 = await nameResolver.resolveIp(ip4Domain, 0.Port)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to resolve IP", description = exc.msg # retry
|
||||
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
|
||||
return true
|
||||
await sleepAsync(DefaultDnsRetryTime)
|
||||
|
||||
return false
|
||||
@@ -9,42 +9,61 @@
|
||||
|
||||
## This module contains a Switch Building helper.
|
||||
runnableExamples:
|
||||
let switch =
|
||||
SwitchBuilder.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(multiaddress)
|
||||
# etc
|
||||
.build()
|
||||
let switch = SwitchBuilder.new().withRng(rng).withAddresses(multiaddress)
|
||||
# etc
|
||||
.build()
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, tables, chronos, chronicles, sequtils
|
||||
import
|
||||
options, tables, chronos, chronicles, sequtils,
|
||||
switch, peerid, peerinfo, stream/connection, multiaddress,
|
||||
crypto/crypto, transports/[transport, tcptransport],
|
||||
switch,
|
||||
peerid,
|
||||
peerinfo,
|
||||
stream/connection,
|
||||
multiaddress,
|
||||
crypto/crypto,
|
||||
transports/[transport, tcptransport, wstransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
protocols/connectivity/
|
||||
[autonat/server, autonatv2/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager,
|
||||
upgrademngrs/muxedupgrade,
|
||||
observedaddrmanager,
|
||||
autotls/service,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
errors,
|
||||
utility
|
||||
import services/wildcardresolverservice
|
||||
|
||||
export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
|
||||
TLSCertificate, TLSFlags, ServerFlags
|
||||
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.deprecated: "Use TransportBuilder instead".} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
|
||||
TransportBuilder* {.public.} =
|
||||
proc(config: TransportConfig): Transport {.gcsafe, raises: [].}
|
||||
|
||||
TransportConfig* = ref object
|
||||
upgr*: Upgrade
|
||||
privateKey*: PrivateKey
|
||||
autotls*: AutotlsService
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise,
|
||||
Secio {.deprecated.}
|
||||
Noise
|
||||
|
||||
SwitchBuilder* = ref object
|
||||
privKey: Option[PrivateKey]
|
||||
addresses: seq[MultiAddress]
|
||||
secureManagers: seq[SecureProtocol]
|
||||
muxers: seq[MuxerProvider]
|
||||
transports: seq[TransportProvider]
|
||||
transports: seq[TransportBuilder]
|
||||
rng: ref HmacDrbgContext
|
||||
maxConnections: int
|
||||
maxIn: int
|
||||
@@ -56,16 +75,19 @@ type
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
autonatV2: bool
|
||||
autotls: AutotlsService
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
observedAddrManager: ObservedAddrManager
|
||||
enableWildcardResolver: bool
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
|
||||
let address = MultiAddress
|
||||
.init("/ip4/127.0.0.1/tcp/0")
|
||||
.expect("Should initialize to default")
|
||||
let address =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should initialize to default")
|
||||
|
||||
SwitchBuilder(
|
||||
privKey: none(PrivateKey),
|
||||
@@ -76,53 +98,59 @@ proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
maxOut: -1,
|
||||
maxConnsPerPeer: MaxConnectionsPerPeer,
|
||||
protoVersion: ProtoVersion,
|
||||
agentVersion: AgentVersion)
|
||||
agentVersion: AgentVersion,
|
||||
enableWildcardResolver: true,
|
||||
)
|
||||
|
||||
proc withPrivateKey*(b: SwitchBuilder, privateKey: PrivateKey): SwitchBuilder {.public.} =
|
||||
proc withPrivateKey*(
|
||||
b: SwitchBuilder, privateKey: PrivateKey
|
||||
): SwitchBuilder {.public.} =
|
||||
## Set the private key of the switch. Will be used to
|
||||
## generate a PeerId
|
||||
|
||||
b.privKey = some(privateKey)
|
||||
b
|
||||
|
||||
proc withAddress*(b: SwitchBuilder, address: MultiAddress): SwitchBuilder {.public.} =
|
||||
## | Set the listening address of the switch
|
||||
## | Calling it multiple time will override the value
|
||||
|
||||
b.addresses = @[address]
|
||||
b
|
||||
|
||||
proc withAddresses*(b: SwitchBuilder, addresses: seq[MultiAddress]): SwitchBuilder {.public.} =
|
||||
proc withAddresses*(
|
||||
b: SwitchBuilder, addresses: seq[MultiAddress], enableWildcardResolver: bool = true
|
||||
): SwitchBuilder {.public.} =
|
||||
## | Set the listening addresses of the switch
|
||||
## | Calling it multiple time will override the value
|
||||
|
||||
b.addresses = addresses
|
||||
b.enableWildcardResolver = enableWildcardResolver
|
||||
b
|
||||
|
||||
proc withAddress*(
|
||||
b: SwitchBuilder, address: MultiAddress, enableWildcardResolver: bool = true
|
||||
): SwitchBuilder {.public.} =
|
||||
## | Set the listening address of the switch
|
||||
## | Calling it multiple time will override the value
|
||||
b.withAddresses(@[address], enableWildcardResolver)
|
||||
|
||||
proc withSignedPeerRecord*(b: SwitchBuilder, sendIt = true): SwitchBuilder {.public.} =
|
||||
b.sendSignedPeerRecord = sendIt
|
||||
b
|
||||
|
||||
proc withMplex*(
|
||||
b: SwitchBuilder,
|
||||
inTimeout = 5.minutes,
|
||||
outTimeout = 5.minutes,
|
||||
maxChannCount = 200): SwitchBuilder {.public.} =
|
||||
b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes, maxChannCount = 200
|
||||
): SwitchBuilder {.public.} =
|
||||
## | Uses `Mplex <https://docs.libp2p.io/concepts/stream-multiplexing/#mplex>`_ as a multiplexer
|
||||
## | `Timeout` is the duration after which a inactive connection will be closed
|
||||
proc newMuxer(conn: Connection): Muxer =
|
||||
Mplex.new(
|
||||
conn,
|
||||
inTimeout,
|
||||
outTimeout,
|
||||
maxChannCount)
|
||||
Mplex.new(conn, inTimeout, outTimeout, maxChannCount)
|
||||
|
||||
assert b.muxers.countIt(it.codec == MplexCodec) == 0, "Mplex build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||
b
|
||||
|
||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
||||
proc withYamux*(
|
||||
b: SwitchBuilder,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer =
|
||||
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
|
||||
|
||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||
@@ -132,24 +160,81 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.secureManagers.add(SecureProtocol.Noise)
|
||||
b
|
||||
|
||||
proc withTransport*(b: SwitchBuilder, prov: TransportProvider): SwitchBuilder {.public.} =
|
||||
proc withTransport*(
|
||||
b: SwitchBuilder, prov: TransportBuilder
|
||||
): SwitchBuilder {.public.} =
|
||||
## Use a custom transport
|
||||
runnableExamples:
|
||||
let switch =
|
||||
SwitchBuilder.new()
|
||||
.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(config: TransportConfig): Transport =
|
||||
TcpTransport.new(flags, config.upgr)
|
||||
)
|
||||
.build()
|
||||
b.transports.add(prov)
|
||||
b
|
||||
|
||||
proc withTcpTransport*(b: SwitchBuilder, flags: set[ServerFlags] = {}): SwitchBuilder {.public.} =
|
||||
b.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
|
||||
proc withTransport*(
|
||||
b: SwitchBuilder, prov: TransportProvider
|
||||
): SwitchBuilder {.deprecated: "Use TransportBuilder instead".} =
|
||||
## Use a custom transport
|
||||
runnableExamples:
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
let tBuilder: TransportBuilder = proc(config: TransportConfig): Transport =
|
||||
prov(config.upgr, config.privateKey)
|
||||
b.withTransport(tBuilder)
|
||||
|
||||
proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(config: TransportConfig): Transport =
|
||||
TcpTransport.new(flags, config.upgr)
|
||||
)
|
||||
|
||||
proc withWsTransport*(
|
||||
b: SwitchBuilder,
|
||||
tlsPrivateKey: TLSPrivateKey = nil,
|
||||
tlsCertificate: TLSCertificate = nil,
|
||||
tlsFlags: set[TLSFlags] = {},
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(config: TransportConfig): Transport =
|
||||
WsTransport.new(
|
||||
config.upgr, tlsPrivateKey, tlsCertificate, config.autotls, tlsFlags, flags
|
||||
)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import transports/quictransport
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(config: TransportConfig): Transport =
|
||||
QuicTransport.new(config.upgr, config.privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(config: TransportConfig): Transport =
|
||||
MemoryTransport.new(config.upgr)
|
||||
)
|
||||
|
||||
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
|
||||
b.rng = rng
|
||||
b
|
||||
|
||||
proc withMaxConnections*(b: SwitchBuilder, maxConnections: int): SwitchBuilder {.public.} =
|
||||
proc withMaxConnections*(
|
||||
b: SwitchBuilder, maxConnections: int
|
||||
): SwitchBuilder {.public.} =
|
||||
## Maximum concurrent connections of the switch. You should either use this, or
|
||||
## `withMaxIn <#withMaxIn,SwitchBuilder,int>`_ & `withMaxOut<#withMaxOut,SwitchBuilder,int>`_
|
||||
b.maxConnections = maxConnections
|
||||
@@ -165,7 +250,9 @@ proc withMaxOut*(b: SwitchBuilder, maxOut: int): SwitchBuilder {.public.} =
|
||||
b.maxOut = maxOut
|
||||
b
|
||||
|
||||
proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder {.public.} =
|
||||
proc withMaxConnsPerPeer*(
|
||||
b: SwitchBuilder, maxConnsPerPeer: int
|
||||
): SwitchBuilder {.public.} =
|
||||
b.maxConnsPerPeer = maxConnsPerPeer
|
||||
b
|
||||
|
||||
@@ -173,15 +260,21 @@ proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
|
||||
b.peerStoreCapacity = Opt.some(capacity)
|
||||
b
|
||||
|
||||
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
|
||||
proc withProtoVersion*(
|
||||
b: SwitchBuilder, protoVersion: string
|
||||
): SwitchBuilder {.public.} =
|
||||
b.protoVersion = protoVersion
|
||||
b
|
||||
|
||||
proc withAgentVersion*(b: SwitchBuilder, agentVersion: string): SwitchBuilder {.public.} =
|
||||
proc withAgentVersion*(
|
||||
b: SwitchBuilder, agentVersion: string
|
||||
): SwitchBuilder {.public.} =
|
||||
b.agentVersion = agentVersion
|
||||
b
|
||||
|
||||
proc withNameResolver*(b: SwitchBuilder, nameResolver: NameResolver): SwitchBuilder {.public.} =
|
||||
proc withNameResolver*(
|
||||
b: SwitchBuilder, nameResolver: NameResolver
|
||||
): SwitchBuilder {.public.} =
|
||||
b.nameResolver = nameResolver
|
||||
b
|
||||
|
||||
@@ -189,11 +282,24 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonat = true
|
||||
b
|
||||
|
||||
proc withAutonatV2*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonatV2 = true
|
||||
b
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc withAutotls*(
|
||||
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
|
||||
): SwitchBuilder {.public.} =
|
||||
b.autotls = AutotlsService.new(config = config)
|
||||
b
|
||||
|
||||
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
||||
b.circuitRelay = r
|
||||
b
|
||||
|
||||
proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): SwitchBuilder =
|
||||
proc withRendezVous*(
|
||||
b: SwitchBuilder, rdv: RendezVous = RendezVous.new()
|
||||
): SwitchBuilder =
|
||||
b.rdv = rdv
|
||||
b
|
||||
|
||||
@@ -201,40 +307,55 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
proc withObservedAddrManager*(
|
||||
b: SwitchBuilder, observedAddrManager: ObservedAddrManager
|
||||
): SwitchBuilder =
|
||||
b.observedAddrManager = observedAddrManager
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
if b.rng == nil: # newRng could fail
|
||||
raise newException(Defect, "Cannot initialize RNG")
|
||||
|
||||
let pkRes = PrivateKey.random(b.rng[])
|
||||
let
|
||||
seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
|
||||
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
|
||||
|
||||
var
|
||||
secureManagerInstances: seq[Secure]
|
||||
if b.secureManagers.len == 0:
|
||||
debug "no secure managers defined. Adding noise by default"
|
||||
b.secureManagers.add(SecureProtocol.Noise)
|
||||
|
||||
var secureManagerInstances: seq[Secure]
|
||||
if SecureProtocol.Noise in b.secureManagers:
|
||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||
|
||||
let
|
||||
peerInfo = PeerInfo.new(
|
||||
seckey,
|
||||
b.addresses,
|
||||
protoVersion = b.protoVersion,
|
||||
agentVersion = b.agentVersion)
|
||||
let peerInfo = PeerInfo.new(
|
||||
seckey, b.addresses, protoVersion = b.protoVersion, agentVersion = b.agentVersion
|
||||
)
|
||||
|
||||
let identify =
|
||||
if b.observedAddrManager != nil:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||
else:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
|
||||
let
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
connManager =
|
||||
ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
|
||||
let
|
||||
transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade))
|
||||
transports
|
||||
if not b.autotls.isNil():
|
||||
b.services.insert(b.autotls, 0)
|
||||
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(
|
||||
tProvider(
|
||||
TransportConfig(upgr: muxedUpgrade, privateKey: seckey, autotls: b.autotls)
|
||||
)
|
||||
)
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
b.secureManagers &= SecureProtocol.Noise
|
||||
@@ -248,6 +369,9 @@ proc build*(b: SwitchBuilder): Switch
|
||||
else:
|
||||
PeerStore.new(identify)
|
||||
|
||||
if b.enableWildcardResolver:
|
||||
b.services.insert(WildcardAddressResolverService.new(), 0)
|
||||
|
||||
let switch = newSwitch(
|
||||
peerInfo = peerInfo,
|
||||
transports = transports,
|
||||
@@ -256,11 +380,15 @@ proc build*(b: SwitchBuilder): Switch
|
||||
ms = ms,
|
||||
nameResolver = b.nameResolver,
|
||||
peerStore = peerStore,
|
||||
services = b.services)
|
||||
services = b.services,
|
||||
)
|
||||
|
||||
switch.mount(identify)
|
||||
|
||||
if b.autonat:
|
||||
if b.autonatV2:
|
||||
let autonatV2 = AutonatV2.new(switch)
|
||||
switch.mount(autonatV2)
|
||||
elif b.autonat:
|
||||
let autonat = Autonat.new(switch)
|
||||
switch.mount(autonat)
|
||||
|
||||
@@ -276,31 +404,29 @@ proc build*(b: SwitchBuilder): Switch
|
||||
|
||||
return switch
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
SecureProtocol.Noise,
|
||||
],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
proc newStandardSwitchBuilder*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000,
|
||||
): SwitchBuilder {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
quit("Secio is deprecated!") # use of secio is unsafe
|
||||
{.pop.}
|
||||
|
||||
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
|
||||
let addrs =
|
||||
when addrs is MultiAddress:
|
||||
@[addrs]
|
||||
else:
|
||||
addrs
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
.withAddresses(addrs)
|
||||
@@ -310,7 +436,7 @@ proc newStandardSwitch*(
|
||||
.withMaxIn(maxIn)
|
||||
.withMaxOut(maxOut)
|
||||
.withMaxConnsPerPeer(maxConnsPerPeer)
|
||||
.withPeerStore(capacity=peerStoreCapacity)
|
||||
.withPeerStore(capacity = peerStoreCapacity)
|
||||
.withMplex(inTimeout, outTimeout)
|
||||
.withTcpTransport(transportFlags)
|
||||
.withNameResolver(nameResolver)
|
||||
@@ -319,4 +445,39 @@ proc newStandardSwitch*(
|
||||
privKey.withValue(pkey):
|
||||
b = b.withPrivateKey(pkey)
|
||||
|
||||
b.build()
|
||||
b
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000,
|
||||
): Switch {.raises: [LPError], public.} =
|
||||
newStandardSwitchBuilder(
|
||||
privKey = privKey,
|
||||
addrs = addrs,
|
||||
secureManagers = secureManagers,
|
||||
transportFlags = transportFlags,
|
||||
rng = rng,
|
||||
inTimeout = inTimeout,
|
||||
outTimeout = outTimeout,
|
||||
maxConnections = maxConnections,
|
||||
maxIn = maxIn,
|
||||
maxOut = maxOut,
|
||||
maxConnsPerPeer = maxConnsPerPeer,
|
||||
nameResolver = nameResolver,
|
||||
sendSignedPeerRecord = sendSignedPeerRecord,
|
||||
peerStoreCapacity = peerStoreCapacity,
|
||||
)
|
||||
.build()
|
||||
|
||||
139
libp2p/cid.nim
139
libp2p/cid.nim
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -10,19 +10,27 @@
|
||||
## This module implementes CID (Content IDentifier).
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
import stew/[base58, results]
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
import stew/base58
|
||||
import ./utils/sequninit
|
||||
|
||||
export results
|
||||
|
||||
type
|
||||
CidError* {.pure.} = enum
|
||||
Error, Incorrect, Unsupported, Overrun
|
||||
Error
|
||||
Incorrect
|
||||
Unsupported
|
||||
Overrun
|
||||
|
||||
CidVersion* = enum
|
||||
CIDvIncorrect, CIDv0, CIDv1, CIDvReserved
|
||||
CIDvIncorrect
|
||||
CIDv0
|
||||
CIDv1
|
||||
CIDvReserved
|
||||
|
||||
Cid* = object
|
||||
cidver*: CidVersion
|
||||
@@ -30,54 +38,52 @@ type
|
||||
hpos*: int
|
||||
data*: VBuffer
|
||||
|
||||
const
|
||||
ContentIdsList = [
|
||||
multiCodec("raw"),
|
||||
multiCodec("dag-pb"),
|
||||
multiCodec("dag-cbor"),
|
||||
multiCodec("dag-json"),
|
||||
multiCodec("git-raw"),
|
||||
multiCodec("eth-block"),
|
||||
multiCodec("eth-block-list"),
|
||||
multiCodec("eth-tx-trie"),
|
||||
multiCodec("eth-tx"),
|
||||
multiCodec("eth-tx-receipt-trie"),
|
||||
multiCodec("eth-tx-receipt"),
|
||||
multiCodec("eth-state-trie"),
|
||||
multiCodec("eth-account-snapshot"),
|
||||
multiCodec("eth-storage-trie"),
|
||||
multiCodec("bitcoin-block"),
|
||||
multiCodec("bitcoin-tx"),
|
||||
multiCodec("zcash-block"),
|
||||
multiCodec("zcash-tx"),
|
||||
multiCodec("stellar-block"),
|
||||
multiCodec("stellar-tx"),
|
||||
multiCodec("decred-block"),
|
||||
multiCodec("decred-tx"),
|
||||
multiCodec("dash-block"),
|
||||
multiCodec("dash-tx"),
|
||||
multiCodec("torrent-info"),
|
||||
multiCodec("torrent-file"),
|
||||
multiCodec("ed25519-pub")
|
||||
]
|
||||
const ContentIdsList = [
|
||||
multiCodec("raw"),
|
||||
multiCodec("dag-pb"),
|
||||
multiCodec("dag-cbor"),
|
||||
multiCodec("dag-json"),
|
||||
multiCodec("libp2p-key"),
|
||||
multiCodec("git-raw"),
|
||||
multiCodec("eth-block"),
|
||||
multiCodec("eth-block-list"),
|
||||
multiCodec("eth-tx-trie"),
|
||||
multiCodec("eth-tx"),
|
||||
multiCodec("eth-tx-receipt-trie"),
|
||||
multiCodec("eth-tx-receipt"),
|
||||
multiCodec("eth-state-trie"),
|
||||
multiCodec("eth-account-snapshot"),
|
||||
multiCodec("eth-storage-trie"),
|
||||
multiCodec("bitcoin-block"),
|
||||
multiCodec("bitcoin-tx"),
|
||||
multiCodec("zcash-block"),
|
||||
multiCodec("zcash-tx"),
|
||||
multiCodec("stellar-block"),
|
||||
multiCodec("stellar-tx"),
|
||||
multiCodec("decred-block"),
|
||||
multiCodec("decred-tx"),
|
||||
multiCodec("dash-block"),
|
||||
multiCodec("dash-tx"),
|
||||
multiCodec("torrent-info"),
|
||||
multiCodec("torrent-file"),
|
||||
multiCodec("ed25519-pub"),
|
||||
]
|
||||
|
||||
proc initCidCodeTable(): Table[int, MultiCodec] {.compileTime.} =
|
||||
for item in ContentIdsList:
|
||||
result[int(item)] = item
|
||||
|
||||
const
|
||||
CodeContentIds = initCidCodeTable()
|
||||
const CodeContentIds = initCidCodeTable()
|
||||
|
||||
template orError*(exp: untyped, err: untyped): untyped =
|
||||
(exp.mapErr do (_: auto) -> auto: err)
|
||||
exp.mapErr do(_: auto) -> auto:
|
||||
err
|
||||
|
||||
proc decode(data: openArray[byte]): Result[Cid, CidError] =
|
||||
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
|
||||
ok(Cid(
|
||||
cidver: CIDv0,
|
||||
mcodec: multiCodec("dag-pb"),
|
||||
hpos: 0,
|
||||
data: initVBuffer(data)))
|
||||
ok(
|
||||
Cid(cidver: CIDv0, mcodec: multiCodec("dag-pb"), hpos: 0, data: initVBuffer(data))
|
||||
)
|
||||
else:
|
||||
var version, codec: uint64
|
||||
var res, offset: int
|
||||
@@ -98,21 +104,18 @@ proc decode(data: openArray[byte]): Result[Cid, CidError] =
|
||||
err(CidError.Incorrect)
|
||||
else:
|
||||
offset += res
|
||||
var mcodec = CodeContentIds.getOrDefault(cast[int](codec),
|
||||
InvalidMultiCodec)
|
||||
var mcodec =
|
||||
CodeContentIds.getOrDefault(cast[int](codec), InvalidMultiCodec)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
err(CidError.Incorrect)
|
||||
else:
|
||||
if not MultiHash.validate(vb.buffer.toOpenArray(vb.offset,
|
||||
vb.buffer.high)):
|
||||
if not MultiHash.validate(
|
||||
vb.buffer.toOpenArray(vb.offset, vb.buffer.high)
|
||||
):
|
||||
err(CidError.Incorrect)
|
||||
else:
|
||||
vb.finish()
|
||||
ok(Cid(
|
||||
cidver: CIDv1,
|
||||
mcodec: mcodec,
|
||||
hpos: offset,
|
||||
data: vb))
|
||||
ok(Cid(cidver: CIDv1, mcodec: mcodec, hpos: offset, data: vb))
|
||||
|
||||
proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
var buffer: seq[byte]
|
||||
@@ -121,7 +124,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
return err(CidError.Incorrect)
|
||||
if len(data) == 46:
|
||||
if data[0] == 'Q' and data[1] == 'm':
|
||||
buffer = newSeq[byte](BTCBase58.decodedLength(len(data)))
|
||||
buffer = newSeqUninit[byte](BTCBase58.decodedLength(len(data)))
|
||||
if BTCBase58.decode(data, buffer, plen) != Base58Status.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
@@ -129,7 +132,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||
let length = MultiBase.decodedLength(data[0], len(data))
|
||||
if length == -1:
|
||||
return err(CidError.Incorrect)
|
||||
buffer = newSeq[byte](length)
|
||||
buffer = newSeqUninit[byte](length)
|
||||
if MultiBase.decode(data, buffer, plen) != MultiBaseStatus.Success:
|
||||
return err(CidError.Incorrect)
|
||||
buffer.setLen(plen)
|
||||
@@ -172,7 +175,9 @@ proc mhash*(cid: Cid): Result[MultiHash, CidError] =
|
||||
if cid.cidver notin {CIDv0, CIDv1}:
|
||||
err(CidError.Incorrect)
|
||||
else:
|
||||
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(CidError.Incorrect)
|
||||
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(
|
||||
CidError.Incorrect
|
||||
)
|
||||
|
||||
proc contentType*(cid: Cid): Result[MultiCodec, CidError] =
|
||||
## Returns content type part of CID
|
||||
@@ -185,12 +190,15 @@ proc version*(cid: Cid): CidVersion =
|
||||
## Returns CID version
|
||||
result = cid.cidver
|
||||
|
||||
proc init*[T: char|byte](ctype: typedesc[Cid], data: openArray[T]): Result[Cid, CidError] =
|
||||
proc init*[T: char | byte](
|
||||
ctype: typedesc[Cid], data: openArray[T]
|
||||
): Result[Cid, CidError] =
|
||||
## Create new content identifier using array of bytes or string ``data``.
|
||||
decode(data)
|
||||
|
||||
proc init*(ctype: typedesc[Cid], version: CidVersion, content: MultiCodec,
|
||||
hash: MultiHash): Result[Cid, CidError] =
|
||||
proc init*(
|
||||
ctype: typedesc[Cid], version: CidVersion, content: MultiCodec, hash: MultiHash
|
||||
): Result[Cid, CidError] =
|
||||
## Create new content identifier using content type ``content`` and
|
||||
## MultiHash ``hash`` using version ``version``.
|
||||
##
|
||||
@@ -213,8 +221,7 @@ proc init*(ctype: typedesc[Cid], version: CidVersion, content: MultiCodec,
|
||||
res.data.finish()
|
||||
return ok(res)
|
||||
elif version == CIDv1:
|
||||
let mcodec = CodeContentIds.getOrDefault(cast[int](content),
|
||||
InvalidMultiCodec)
|
||||
let mcodec = CodeContentIds.getOrDefault(cast[int](content), InvalidMultiCodec)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
return err(CidError.Incorrect)
|
||||
res.mcodec = mcodec
|
||||
@@ -233,11 +240,9 @@ proc `==`*(a: Cid, b: Cid): bool =
|
||||
## are equal, ``false`` otherwise.
|
||||
if a.mcodec == b.mcodec:
|
||||
var ah, bh: MultiHash
|
||||
if MultiHash.decode(
|
||||
a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
|
||||
if MultiHash.decode(a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
|
||||
return false
|
||||
if MultiHash.decode(
|
||||
b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
|
||||
if MultiHash.decode(b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
|
||||
return false
|
||||
result = (ah == bh)
|
||||
|
||||
@@ -261,12 +266,6 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
|
||||
## Write CID value ``cid`` to buffer ``vb``.
|
||||
vb.writeArray(cid.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
cid: Cid): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``cid`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
|
||||
|
||||
proc hash*(cid: Cid): Hash {.inline.} =
|
||||
hash(cid.data.buffer)
|
||||
|
||||
|
||||
@@ -11,12 +11,7 @@
|
||||
|
||||
import std/[tables, sequtils, sets]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import peerinfo,
|
||||
peerstore,
|
||||
stream/connection,
|
||||
muxers/muxer,
|
||||
utils/semaphore,
|
||||
errors
|
||||
import peerinfo, peerstore, stream/connection, muxers/muxer, utils/semaphore, errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p connmanager"
|
||||
@@ -32,12 +27,13 @@ type
|
||||
AlreadyExpectingConnectionError* = object of LPError
|
||||
|
||||
ConnEventKind* {.pure.} = enum
|
||||
Connected, # A connection was made and securely upgraded - there may be
|
||||
# more than one concurrent connection thus more than one upgrade
|
||||
# event per peer.
|
||||
|
||||
Disconnected # Peer disconnected - this event is fired once per upgrade
|
||||
# when the associated connection is terminated.
|
||||
Connected
|
||||
# A connection was made and securely upgraded - there may be
|
||||
# more than one concurrent connection thus more than one upgrade
|
||||
# event per peer.
|
||||
Disconnected
|
||||
# Peer disconnected - this event is fired once per upgrade
|
||||
# when the associated connection is terminated.
|
||||
|
||||
ConnEvent* = object
|
||||
case kind*: ConnEventKind
|
||||
@@ -46,23 +42,25 @@ type
|
||||
else:
|
||||
discard
|
||||
|
||||
ConnEventHandler* =
|
||||
proc(peerId: PeerId, event: ConnEvent): Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left,
|
||||
Left
|
||||
Joined
|
||||
Identified
|
||||
|
||||
PeerEvent* = object
|
||||
case kind*: PeerEventKind
|
||||
of PeerEventKind.Joined:
|
||||
initiator*: bool
|
||||
else:
|
||||
discard
|
||||
of PeerEventKind.Joined, PeerEventKind.Identified:
|
||||
initiator*: bool
|
||||
else:
|
||||
discard
|
||||
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
|
||||
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
ConnManager* = ref object of RootObj
|
||||
maxConnsPerPeer: int
|
||||
@@ -81,11 +79,13 @@ type
|
||||
proc newTooManyConnectionsError(): ref TooManyConnectionsError {.inline.} =
|
||||
result = newException(TooManyConnectionsError, "Too many connections")
|
||||
|
||||
proc new*(C: type ConnManager,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1): ConnManager =
|
||||
proc new*(
|
||||
C: type ConnManager,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
): ConnManager =
|
||||
var inSema, outSema: AsyncSemaphore
|
||||
if maxIn > 0 or maxOut > 0:
|
||||
inSema = newAsyncSemaphore(maxIn)
|
||||
@@ -96,9 +96,7 @@ proc new*(C: type ConnManager,
|
||||
else:
|
||||
raiseAssert "Invalid connection counts!"
|
||||
|
||||
C(maxConnsPerPeer: maxConnsPerPeer,
|
||||
inSema: inSema,
|
||||
outSema: outSema)
|
||||
C(maxConnsPerPeer: maxConnsPerPeer, inSema: inSema, outSema: outSema)
|
||||
|
||||
proc connCount*(c: ConnManager, peerId: PeerId): int =
|
||||
c.muxed.getOrDefault(peerId).len
|
||||
@@ -113,22 +111,23 @@ proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
|
||||
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
|
||||
return c.muxed
|
||||
|
||||
proc addConnEventHandler*(c: ConnManager,
|
||||
handler: ConnEventHandler,
|
||||
kind: ConnEventKind) =
|
||||
proc addConnEventHandler*(
|
||||
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
|
||||
) =
|
||||
## Add peer event handler - handlers must not raise exceptions!
|
||||
##
|
||||
if isNil(handler): return
|
||||
if isNil(handler):
|
||||
return
|
||||
c.connEvents[kind].incl(handler)
|
||||
|
||||
proc removeConnEventHandler*(c: ConnManager,
|
||||
handler: ConnEventHandler,
|
||||
kind: ConnEventKind) =
|
||||
c.connEvents[kind].excl(handler)
|
||||
proc removeConnEventHandler*(
|
||||
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
|
||||
) =
|
||||
c.connEvents[kind].excl(handler)
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
proc triggerConnEvent*(
|
||||
c: ConnManager, peerId: PeerId, event: ConnEvent
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -141,27 +140,27 @@ proc triggerConnEvent*(c: ConnManager,
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
msg = exc.msg, peer = peerId, event = $event
|
||||
warn "Exception in triggerConnEvent",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(c: ConnManager,
|
||||
handler: PeerEventHandler,
|
||||
kind: PeerEventKind) =
|
||||
proc addPeerEventHandler*(
|
||||
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
|
||||
) =
|
||||
## Add peer event handler - handlers must not raise exceptions!
|
||||
##
|
||||
|
||||
if isNil(handler): return
|
||||
if isNil(handler):
|
||||
return
|
||||
c.peerEvents[kind].incl(handler)
|
||||
|
||||
proc removePeerEventHandler*(c: ConnManager,
|
||||
handler: PeerEventHandler,
|
||||
kind: PeerEventKind) =
|
||||
proc removePeerEventHandler*(
|
||||
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
|
||||
) =
|
||||
c.peerEvents[kind].excl(handler)
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
|
||||
proc triggerPeerEvents*(
|
||||
c: ConnManager, peerId: PeerId, event: PeerEvent
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
return
|
||||
@@ -177,15 +176,20 @@ proc triggerPeerEvents*(c: ConnManager,
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc: # handlers should not raise!
|
||||
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
|
||||
warn "Exception in triggerPeerEvents", description = exc.msg, peer = peerId
|
||||
|
||||
proc expectConnection*(c: ConnManager, p: PeerId, dir: Direction): Future[Muxer] {.async.} =
|
||||
proc expectConnection*(
|
||||
c: ConnManager, p: PeerId, dir: Direction
|
||||
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
|
||||
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
|
||||
let key = (p, dir)
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(AlreadyExpectingConnectionError, "Already expecting an incoming connection from that peer")
|
||||
raise newException(
|
||||
AlreadyExpectingConnectionError,
|
||||
"Already expecting an incoming connection from that peer: " & shortLog(p),
|
||||
)
|
||||
|
||||
let future = newFuture[Muxer]()
|
||||
let future = Future[Muxer].Raising([CancelledError]).init()
|
||||
c.expectedConnectionsOverLimit[key] = future
|
||||
|
||||
try:
|
||||
@@ -207,18 +211,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
|
||||
let conn = muxer.connection
|
||||
return muxer in c.muxed.getOrDefault(conn.peerId)
|
||||
|
||||
proc closeMuxer(muxer: Muxer) {.async.} =
|
||||
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
|
||||
trace "Cleaning up muxer", m = muxer
|
||||
|
||||
await muxer.close()
|
||||
if not(isNil(muxer.handler)):
|
||||
if not (isNil(muxer.handler)):
|
||||
try:
|
||||
await muxer.handler # TODO noraises?
|
||||
await muxer.handler
|
||||
except CatchableError as exc:
|
||||
trace "Exception in close muxer handler", exc = exc.msg
|
||||
trace "Exception in close muxer handler", description = exc.msg
|
||||
trace "Cleaned up muxer", m = muxer
|
||||
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
|
||||
try:
|
||||
trace "Triggering disconnect events", mux
|
||||
let peerId = mux.connection.peerId
|
||||
@@ -231,18 +235,16 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
|
||||
|
||||
if not(c.peerStore.isNil):
|
||||
if not (c.peerStore.isNil):
|
||||
c.peerStore.cleanup(peerId)
|
||||
|
||||
await c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
|
||||
await c.triggerConnEvent(peerId, ConnEvent(kind: ConnEventKind.Disconnected))
|
||||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
warn "Unexpected exception peer cleanup handler",
|
||||
mux, msg = exc.msg
|
||||
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
|
||||
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
|
||||
## connection close even handler
|
||||
##
|
||||
## triggers the connections resource cleanup
|
||||
@@ -252,18 +254,14 @@ proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
||||
trace "Connection closed, cleaning up", mux
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in connection manager's cleanup",
|
||||
errMsg = exc.msg, mux
|
||||
description = exc.msg, mux
|
||||
finally:
|
||||
await c.muxCleanup(mux)
|
||||
|
||||
proc selectMuxer*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Muxer =
|
||||
proc selectMuxer*(c: ConnManager, peerId: PeerId, dir: Direction): Muxer =
|
||||
## Select a connection for the provided peer and direction
|
||||
##
|
||||
let conns = toSeq(
|
||||
c.muxed.getOrDefault(peerId))
|
||||
.filterIt( it.connection.dir == dir )
|
||||
let conns = toSeq(c.muxed.getOrDefault(peerId)).filterIt(it.connection.dir == dir)
|
||||
|
||||
if conns.len > 0:
|
||||
return conns[0]
|
||||
@@ -280,9 +278,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
|
||||
trace "connection not found", peerId
|
||||
return mux
|
||||
|
||||
proc storeMuxer*(c: ConnManager,
|
||||
muxer: Muxer)
|
||||
{.raises: [CatchableError].} =
|
||||
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
@@ -304,69 +300,72 @@ proc storeMuxer*(c: ConnManager,
|
||||
let key = (peerId, dir)
|
||||
let expectedConn = c.expectedConnectionsOverLimit.getOrDefault(key)
|
||||
if expectedConn != nil and not expectedConn.finished:
|
||||
expectedConn.complete(muxer)
|
||||
expectedConn.complete(muxer)
|
||||
else:
|
||||
debug "Too many connections for peer",
|
||||
conns = c.muxed.getOrDefault(peerId).len
|
||||
conns = c.muxed.getOrDefault(peerId).len, peerId, dir
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
assert muxer notin c.muxed.getOrDefault(peerId)
|
||||
|
||||
let
|
||||
newPeer = peerId notin c.muxed
|
||||
assert newPeer or c.muxed[peerId].len > 0
|
||||
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
|
||||
var newPeer = false
|
||||
c.muxed.withValue(peerId, muxers):
|
||||
doAssert muxers[].len > 0
|
||||
doAssert muxer notin muxers[]
|
||||
muxers[].add(muxer)
|
||||
do:
|
||||
c.muxed[peerId] = @[muxer]
|
||||
newPeer = true
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
|
||||
asyncSpawn c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In))
|
||||
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)
|
||||
)
|
||||
|
||||
if newPeer:
|
||||
asyncSpawn c.triggerPeerEvents(
|
||||
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out))
|
||||
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out)
|
||||
)
|
||||
|
||||
asyncSpawn c.onClose(muxer)
|
||||
|
||||
trace "Stored muxer",
|
||||
muxer, direction = $muxer.connection.dir, peers = c.muxed.len
|
||||
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
|
||||
|
||||
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
|
||||
proc getIncomingSlot*(
|
||||
c: ConnManager
|
||||
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
|
||||
await c.inSema.acquire()
|
||||
return ConnectionSlot(connManager: c, direction: In)
|
||||
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
|
||||
proc getOutgoingSlot*(
|
||||
c: ConnManager, forceDial = false
|
||||
): ConnectionSlot {.raises: [TooManyConnectionsError].} =
|
||||
if forceDial:
|
||||
c.outSema.forceAcquire()
|
||||
elif not c.outSema.tryAcquire():
|
||||
trace "Too many outgoing connections!", count = c.outSema.count,
|
||||
max = c.outSema.size
|
||||
trace "Too many outgoing connections!",
|
||||
available = c.outSema.count, max = c.outSema.size
|
||||
raise newTooManyConnectionsError()
|
||||
return ConnectionSlot(connManager: c, direction: Out)
|
||||
|
||||
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
|
||||
return if dir == In: c.inSema else: c.outSema
|
||||
|
||||
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
|
||||
case dir:
|
||||
of Direction.In:
|
||||
return c.inSema.count
|
||||
of Direction.Out:
|
||||
return c.outSema.count
|
||||
return semaphore(c, dir).count
|
||||
|
||||
proc release*(cs: ConnectionSlot) =
|
||||
if cs.direction == In:
|
||||
cs.connManager.inSema.release()
|
||||
else:
|
||||
cs.connManager.outSema.release()
|
||||
semaphore(cs.connManager, cs.direction).release()
|
||||
|
||||
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
|
||||
if isNil(conn):
|
||||
cs.release()
|
||||
return
|
||||
|
||||
proc semaphoreMonitor() {.async.} =
|
||||
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
await conn.join()
|
||||
except CatchableError as exc:
|
||||
trace "Exception in semaphore monitor, ignoring", exc = exc.msg
|
||||
trace "Exception in semaphore monitor, ignoring", description = exc.msg
|
||||
|
||||
cs.release()
|
||||
|
||||
@@ -378,31 +377,32 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
return
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
proc getStream*(
|
||||
c: ConnManager, muxer: Muxer
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
if not(isNil(muxer)):
|
||||
if not (isNil(muxer)):
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||
proc getStream*(
|
||||
c: ConnManager, peerId: PeerId
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
return await c.getStream(c.selectMuxer(peerId))
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
proc getStream*(
|
||||
c: ConnManager, peerId: PeerId, dir: Direction
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
return await c.getStream(c.selectMuxer(peerId, dir))
|
||||
|
||||
|
||||
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
|
||||
## drop connections and cleanup resources for peer
|
||||
##
|
||||
trace "Dropping peer", peerId
|
||||
@@ -413,7 +413,7 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||
|
||||
trace "Peer dropped", peerId
|
||||
|
||||
proc close*(c: ConnManager) {.async.} =
|
||||
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
|
||||
## cleanup resources for the connection
|
||||
## manager
|
||||
##
|
||||
@@ -433,4 +433,3 @@ proc close*(c: ConnManager) {.async.} =
|
||||
await closeMuxer(mux)
|
||||
|
||||
trace "Closed ConnManager"
|
||||
|
||||
|
||||
@@ -48,17 +48,19 @@ proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag =
|
||||
# this is reconciled at runtime
|
||||
# we do this in the global scope / module init
|
||||
|
||||
proc encrypt*(_: type[ChaChaPoly],
|
||||
key: ChaChaPolyKey,
|
||||
nonce: ChaChaPolyNonce,
|
||||
tag: var ChaChaPolyTag,
|
||||
data: var openArray[byte],
|
||||
aad: openArray[byte]) =
|
||||
let
|
||||
ad = if aad.len > 0:
|
||||
unsafeAddr aad[0]
|
||||
else:
|
||||
nil
|
||||
proc encrypt*(
|
||||
_: type[ChaChaPoly],
|
||||
key: ChaChaPolyKey,
|
||||
nonce: ChaChaPolyNonce,
|
||||
tag: var ChaChaPolyTag,
|
||||
data: var openArray[byte],
|
||||
aad: openArray[byte],
|
||||
) =
|
||||
let ad =
|
||||
if aad.len > 0:
|
||||
unsafeAddr aad[0]
|
||||
else:
|
||||
nil
|
||||
|
||||
poly1305CtmulRun(
|
||||
unsafeAddr key[0],
|
||||
@@ -69,20 +71,23 @@ proc encrypt*(_: type[ChaChaPoly],
|
||||
uint(aad.len),
|
||||
baseAddr(tag),
|
||||
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
|
||||
cast[Chacha20Run](chacha20CtRun),
|
||||
#[encrypt]# 1.cint)
|
||||
cast[Chacha20Run](chacha20CtRun), #[encrypt]#
|
||||
1.cint,
|
||||
)
|
||||
|
||||
proc decrypt*(_: type[ChaChaPoly],
|
||||
key: ChaChaPolyKey,
|
||||
nonce: ChaChaPolyNonce,
|
||||
tag: var ChaChaPolyTag,
|
||||
data: var openArray[byte],
|
||||
aad: openArray[byte]) =
|
||||
let
|
||||
ad = if aad.len > 0:
|
||||
unsafeAddr aad[0]
|
||||
else:
|
||||
nil
|
||||
proc decrypt*(
|
||||
_: type[ChaChaPoly],
|
||||
key: ChaChaPolyKey,
|
||||
nonce: ChaChaPolyNonce,
|
||||
tag: var ChaChaPolyTag,
|
||||
data: var openArray[byte],
|
||||
aad: openArray[byte],
|
||||
) =
|
||||
let ad =
|
||||
if aad.len > 0:
|
||||
unsafeAddr aad[0]
|
||||
else:
|
||||
nil
|
||||
|
||||
poly1305CtmulRun(
|
||||
unsafeAddr key[0],
|
||||
@@ -93,5 +98,6 @@ proc decrypt*(_: type[ChaChaPoly],
|
||||
uint(aad.len),
|
||||
baseAddr(tag),
|
||||
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
|
||||
cast[Chacha20Run](chacha20CtRun),
|
||||
#[decrypt]# 0.cint)
|
||||
cast[Chacha20Run](chacha20CtRun), #[decrypt]#
|
||||
0.cint,
|
||||
)
|
||||
|
||||
@@ -11,15 +11,15 @@
|
||||
{.push raises: [].}
|
||||
|
||||
from strutils import split, strip, cmpIgnoreCase
|
||||
import ../utils/sequninit
|
||||
|
||||
const libp2p_pki_schemes* {.strdefine.} = "rsa,ed25519,secp256k1,ecnist"
|
||||
|
||||
type
|
||||
PKScheme* = enum
|
||||
RSA = 0,
|
||||
Ed25519,
|
||||
Secp256k1,
|
||||
ECDSA
|
||||
type PKScheme* = enum
|
||||
RSA = 0
|
||||
Ed25519
|
||||
Secp256k1
|
||||
ECDSA
|
||||
|
||||
proc initSupportedSchemes(list: static string): set[PKScheme] =
|
||||
var res: set[PKScheme]
|
||||
@@ -65,17 +65,19 @@ when supported(PKScheme.Ed25519):
|
||||
import ed25519/ed25519
|
||||
when supported(PKScheme.Secp256k1):
|
||||
import secp
|
||||
when supported(PKScheme.ECDSA):
|
||||
import ecnist
|
||||
|
||||
# We are still importing `ecnist` because, it is used for SECIO handshake,
|
||||
# but it will be impossible to create ECNIST keys or import ECNIST keys.
|
||||
# These used to be declared in `crypto` itself
|
||||
export ecnist.ephemeral, ecnist.ECDHEScheme
|
||||
|
||||
import ecnist, bearssl/rand, bearssl/hash as bhash
|
||||
import bearssl/rand, bearssl/hash as bhash
|
||||
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
|
||||
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import stew/results
|
||||
import results
|
||||
export results, utility
|
||||
|
||||
# This is workaround for Nim's `import` bug
|
||||
@@ -83,11 +85,9 @@ export rijndael, twofish, sha2, hash, hmac, ncrutils, rand
|
||||
|
||||
type
|
||||
DigestSheme* = enum
|
||||
Sha256,
|
||||
Sha256
|
||||
Sha512
|
||||
|
||||
ECDHEScheme* = EcCurveKind
|
||||
|
||||
PublicKey* = object
|
||||
case scheme*: PKScheme
|
||||
of PKScheme.RSA:
|
||||
@@ -148,15 +148,16 @@ type
|
||||
data*: seq[byte]
|
||||
|
||||
CryptoError* = enum
|
||||
KeyError,
|
||||
SigError,
|
||||
HashError,
|
||||
KeyError
|
||||
SigError
|
||||
HashError
|
||||
SchemeError
|
||||
|
||||
CryptoResult*[T] = Result[T, CryptoError]
|
||||
|
||||
template orError*(exp: untyped, err: untyped): untyped =
|
||||
(exp.mapErr do (_: auto) -> auto: err)
|
||||
exp.mapErr do(_: auto) -> auto:
|
||||
err
|
||||
|
||||
proc newRng*(): ref HmacDrbgContext =
|
||||
# You should only create one instance of the RNG per application / library
|
||||
@@ -172,13 +173,11 @@ proc newRng*(): ref HmacDrbgContext =
|
||||
return nil
|
||||
rng
|
||||
|
||||
proc shuffle*[T](
|
||||
rng: ref HmacDrbgContext,
|
||||
x: var openArray[T]) =
|
||||
proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
|
||||
if x.len == 0:
|
||||
return
|
||||
|
||||
if x.len == 0: return
|
||||
|
||||
var randValues = newSeqUninitialized[byte](len(x) * 2)
|
||||
var randValues = newSeqUninit[byte](len(x) * 2)
|
||||
hmacDrbgGenerate(rng[], randValues)
|
||||
|
||||
for i in countdown(x.high, 1):
|
||||
@@ -187,9 +186,12 @@ proc shuffle*[T](
|
||||
y = rand mod i
|
||||
swap(x[i], x[y])
|
||||
|
||||
proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
||||
rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
|
||||
proc random*(
|
||||
T: typedesc[PrivateKey],
|
||||
scheme: PKScheme,
|
||||
rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize,
|
||||
): CryptoResult[PrivateKey] =
|
||||
## Generate random private key for scheme ``scheme``.
|
||||
##
|
||||
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
|
||||
@@ -197,7 +199,7 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
||||
case scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: scheme, rsakey: rsakey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -209,7 +211,8 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
|
||||
let eckey =
|
||||
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: scheme, eckey: eckey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -220,8 +223,9 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
||||
else:
|
||||
err(SchemeError)
|
||||
|
||||
proc random*(T: typedesc[PrivateKey], rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
|
||||
proc random*(
|
||||
T: typedesc[PrivateKey], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
|
||||
): CryptoResult[PrivateKey] =
|
||||
## Generate random private key using default public-key cryptography scheme.
|
||||
##
|
||||
## Default public-key cryptography schemes are following order:
|
||||
@@ -235,17 +239,21 @@ proc random*(T: typedesc[PrivateKey], rng: var HmacDrbgContext,
|
||||
let skkey = SkPrivateKey.random(rng)
|
||||
ok(PrivateKey(scheme: PKScheme.Secp256k1, skkey: skkey))
|
||||
elif supported(PKScheme.RSA):
|
||||
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: PKScheme.RSA, rsakey: rsakey))
|
||||
elif supported(PKScheme.ECDSA):
|
||||
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
|
||||
let eckey =
|
||||
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: PKScheme.ECDSA, eckey: eckey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
|
||||
proc random*(T: typedesc[KeyPair], scheme: PKScheme,
|
||||
rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
|
||||
proc random*(
|
||||
T: typedesc[KeyPair],
|
||||
scheme: PKScheme,
|
||||
rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize,
|
||||
): CryptoResult[KeyPair] =
|
||||
## Generate random key pair for scheme ``scheme``.
|
||||
##
|
||||
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
|
||||
@@ -253,39 +261,52 @@ proc random*(T: typedesc[KeyPair], scheme: PKScheme,
|
||||
case scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey)))
|
||||
let pair = ?RsaKeyPair.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
else:
|
||||
err(SchemeError)
|
||||
of PKScheme.Ed25519:
|
||||
when supported(PKScheme.Ed25519):
|
||||
let pair = EdKeyPair.random(rng)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey)))
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
else:
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey)))
|
||||
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
else:
|
||||
err(SchemeError)
|
||||
of PKScheme.Secp256k1:
|
||||
when supported(PKScheme.Secp256k1):
|
||||
let pair = SkKeyPair.random(rng)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey)))
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
else:
|
||||
err(SchemeError)
|
||||
|
||||
proc random*(T: typedesc[KeyPair], rng: var HmacDrbgContext,
|
||||
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
|
||||
proc random*(
|
||||
T: typedesc[KeyPair], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
|
||||
): CryptoResult[KeyPair] =
|
||||
## Generate random private pair of keys using default public-key cryptography
|
||||
## scheme.
|
||||
##
|
||||
@@ -295,24 +316,36 @@ proc random*(T: typedesc[KeyPair], rng: var HmacDrbgContext,
|
||||
## So will be used first available (supported) method.
|
||||
when supported(PKScheme.Ed25519):
|
||||
let pair = EdKeyPair.random(rng)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey)))
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
elif supported(PKScheme.Secp256k1):
|
||||
let pair = SkKeyPair.random(rng)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey)))
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
elif supported(PKScheme.RSA):
|
||||
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey)))
|
||||
let pair = ?RsaKeyPair.random(rng, bits).orError(KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
elif supported(PKScheme.ECDSA):
|
||||
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
ok(KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey)))
|
||||
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
|
||||
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey),
|
||||
)
|
||||
)
|
||||
else:
|
||||
err(SchemeError)
|
||||
|
||||
@@ -333,7 +366,7 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let eckey = ? key.eckey.getPublicKey().orError(KeyError)
|
||||
let eckey = ?key.eckey.getPublicKey().orError(KeyError)
|
||||
ok(PublicKey(scheme: ECDSA, eckey: eckey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -344,8 +377,9 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
|
||||
else:
|
||||
err(SchemeError)
|
||||
|
||||
proc toRawBytes*(key: PrivateKey | PublicKey,
|
||||
data: var openArray[byte]): CryptoResult[int] =
|
||||
proc toRawBytes*(
|
||||
key: PrivateKey | PublicKey, data: var openArray[byte]
|
||||
): CryptoResult[int] =
|
||||
## Serialize private key ``key`` (using scheme's own serialization) and store
|
||||
## it to ``data``.
|
||||
##
|
||||
@@ -404,7 +438,7 @@ proc toBytes*(key: PrivateKey, data: var openArray[byte]): CryptoResult[int] =
|
||||
## Returns number of bytes (octets) needed to store private key ``key``.
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, uint64(key.scheme))
|
||||
msg.write(2, ? key.getRawBytes())
|
||||
msg.write(2, ?key.getRawBytes())
|
||||
msg.finish()
|
||||
var blen = len(msg.buffer)
|
||||
if len(data) >= blen:
|
||||
@@ -418,7 +452,7 @@ proc toBytes*(key: PublicKey, data: var openArray[byte]): CryptoResult[int] =
|
||||
## Returns number of bytes (octets) needed to store public key ``key``.
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, uint64(key.scheme))
|
||||
msg.write(2, ? key.getRawBytes())
|
||||
msg.write(2, ?key.getRawBytes())
|
||||
msg.finish()
|
||||
var blen = len(msg.buffer)
|
||||
if len(data) >= blen and blen > 0:
|
||||
@@ -438,7 +472,7 @@ proc getBytes*(key: PrivateKey): CryptoResult[seq[byte]] =
|
||||
## serialization).
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, uint64(key.scheme))
|
||||
msg.write(2, ? key.getRawBytes())
|
||||
msg.write(2, ?key.getRawBytes())
|
||||
msg.finish()
|
||||
ok(msg.buffer)
|
||||
|
||||
@@ -447,7 +481,7 @@ proc getBytes*(key: PublicKey): CryptoResult[seq[byte]] =
|
||||
## serialization).
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, uint64(key.scheme))
|
||||
msg.write(2, ? key.getRawBytes())
|
||||
msg.write(2, ?key.getRawBytes())
|
||||
msg.finish()
|
||||
ok(msg.buffer)
|
||||
|
||||
@@ -455,8 +489,7 @@ proc getBytes*(sig: Signature): seq[byte] =
|
||||
## Return signature ``sig`` in binary form.
|
||||
result = sig.data
|
||||
|
||||
template initImpl[T: PrivateKey|PublicKey](
|
||||
key: var T, data: openArray[byte]): bool =
|
||||
template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
## Initialize private key ``key`` from libp2p's protobuf serialized raw
|
||||
## binary form.
|
||||
##
|
||||
@@ -469,7 +502,7 @@ template initImpl[T: PrivateKey|PublicKey](
|
||||
var pb = initProtoBuffer(@data)
|
||||
let r1 = pb.getField(1, id)
|
||||
let r2 = pb.getField(2, buffer)
|
||||
if not(r1.get(false) and r2.get(false)):
|
||||
if not (r1.get(false) and r2.get(false)):
|
||||
false
|
||||
else:
|
||||
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
|
||||
@@ -480,7 +513,7 @@ template initImpl[T: PrivateKey|PublicKey](
|
||||
var nkey = PrivateKey(scheme: scheme)
|
||||
else:
|
||||
var nkey = PublicKey(scheme: scheme)
|
||||
case scheme:
|
||||
case scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
if init(nkey.rsakey, buffer).isOk:
|
||||
@@ -518,12 +551,13 @@ template initImpl[T: PrivateKey|PublicKey](
|
||||
else:
|
||||
false
|
||||
|
||||
{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
|
||||
{.push warning[ProveField]: off.} # https://github.com/nim-lang/Nim/issues/22060
|
||||
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
|
||||
proc init*(key: var PublicKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
|
||||
{.pop.}
|
||||
|
||||
proc init*(sig: var Signature, data: openArray[byte]): bool =
|
||||
@@ -534,7 +568,7 @@ proc init*(sig: var Signature, data: openArray[byte]): bool =
|
||||
sig.data = @data
|
||||
result = true
|
||||
|
||||
proc init*[T: PrivateKey|PublicKey](key: var T, data: string): bool =
|
||||
proc init*[T: PrivateKey | PublicKey](key: var T, data: string): bool =
|
||||
## Initialize private/public key ``key`` from libp2p's protobuf serialized
|
||||
## hexadecimal string representation.
|
||||
##
|
||||
@@ -548,26 +582,23 @@ proc init*(sig: var Signature, data: string): bool =
|
||||
## Returns ``true`` on success.
|
||||
sig.init(ncrutils.fromHex(data))
|
||||
|
||||
proc init*(t: typedesc[PrivateKey],
|
||||
data: openArray[byte]): CryptoResult[PrivateKey] =
|
||||
proc init*(t: typedesc[PrivateKey], data: openArray[byte]): CryptoResult[PrivateKey] =
|
||||
## Create new private key from libp2p's protobuf serialized binary form.
|
||||
var res: t
|
||||
if not res.init(data):
|
||||
err(KeyError)
|
||||
err(CryptoError.KeyError)
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc init*(t: typedesc[PublicKey],
|
||||
data: openArray[byte]): CryptoResult[PublicKey] =
|
||||
proc init*(t: typedesc[PublicKey], data: openArray[byte]): CryptoResult[PublicKey] =
|
||||
## Create new public key from libp2p's protobuf serialized binary form.
|
||||
var res: t
|
||||
if not res.init(data):
|
||||
err(KeyError)
|
||||
err(CryptoError.KeyError)
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc init*(t: typedesc[Signature],
|
||||
data: openArray[byte]): CryptoResult[Signature] =
|
||||
proc init*(t: typedesc[Signature], data: openArray[byte]): CryptoResult[Signature] =
|
||||
## Create new public key from libp2p's protobuf serialized binary form.
|
||||
var res: t
|
||||
if not res.init(data):
|
||||
@@ -583,24 +614,28 @@ proc init*(t: typedesc[PrivateKey], data: string): CryptoResult[PrivateKey] =
|
||||
when supported(PKScheme.RSA):
|
||||
proc init*(t: typedesc[PrivateKey], key: rsa.RsaPrivateKey): PrivateKey =
|
||||
PrivateKey(scheme: RSA, rsakey: key)
|
||||
|
||||
proc init*(t: typedesc[PublicKey], key: rsa.RsaPublicKey): PublicKey =
|
||||
PublicKey(scheme: RSA, rsakey: key)
|
||||
|
||||
when supported(PKScheme.Ed25519):
|
||||
proc init*(t: typedesc[PrivateKey], key: EdPrivateKey): PrivateKey =
|
||||
PrivateKey(scheme: Ed25519, edkey: key)
|
||||
|
||||
proc init*(t: typedesc[PublicKey], key: EdPublicKey): PublicKey =
|
||||
PublicKey(scheme: Ed25519, edkey: key)
|
||||
|
||||
when supported(PKScheme.Secp256k1):
|
||||
proc init*(t: typedesc[PrivateKey], key: SkPrivateKey): PrivateKey =
|
||||
PrivateKey(scheme: Secp256k1, skkey: key)
|
||||
|
||||
proc init*(t: typedesc[PublicKey], key: SkPublicKey): PublicKey =
|
||||
PublicKey(scheme: Secp256k1, skkey: key)
|
||||
|
||||
when supported(PKScheme.ECDSA):
|
||||
proc init*(t: typedesc[PrivateKey], key: ecnist.EcPrivateKey): PrivateKey =
|
||||
PrivateKey(scheme: ECDSA, eckey: key)
|
||||
|
||||
proc init*(t: typedesc[PublicKey], key: ecnist.EcPublicKey): PublicKey =
|
||||
PublicKey(scheme: ECDSA, eckey: key)
|
||||
|
||||
@@ -669,9 +704,9 @@ proc `==`*(key1, key2: PrivateKey): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
proc `$`*(key: PrivateKey|PublicKey): string =
|
||||
proc `$`*(key: PrivateKey | PublicKey): string =
|
||||
## Get string representation of private/public key ``key``.
|
||||
case key.scheme:
|
||||
case key.scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
$(key.rsakey)
|
||||
@@ -693,9 +728,9 @@ proc `$`*(key: PrivateKey|PublicKey): string =
|
||||
else:
|
||||
"unsupported secp256k1 key"
|
||||
|
||||
func shortLog*(key: PrivateKey|PublicKey): string =
|
||||
func shortLog*(key: PrivateKey | PublicKey): string =
|
||||
## Get short string representation of private/public key ``key``.
|
||||
case key.scheme:
|
||||
case key.scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
($key.rsakey).shortLog
|
||||
@@ -721,16 +756,15 @@ proc `$`*(sig: Signature): string =
|
||||
## Get string representation of signature ``sig``.
|
||||
result = ncrutils.toHex(sig.data)
|
||||
|
||||
proc sign*(key: PrivateKey,
|
||||
data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
|
||||
proc sign*(key: PrivateKey, data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
|
||||
## Sign message ``data`` using private key ``key`` and return generated
|
||||
## signature in raw binary form.
|
||||
var res: Signature
|
||||
case key.scheme:
|
||||
case key.scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
let sig = ? key.rsakey.sign(data).orError(SigError)
|
||||
res.data = ? sig.getBytes().orError(SigError)
|
||||
let sig = ?key.rsakey.sign(data).orError(SigError)
|
||||
res.data = ?sig.getBytes().orError(SigError)
|
||||
ok(res)
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -743,8 +777,8 @@ proc sign*(key: PrivateKey,
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let sig = ? key.eckey.sign(data).orError(SigError)
|
||||
res.data = ? sig.getBytes().orError(SigError)
|
||||
let sig = ?key.eckey.sign(data).orError(SigError)
|
||||
res.data = ?sig.getBytes().orError(SigError)
|
||||
ok(res)
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -759,7 +793,7 @@ proc sign*(key: PrivateKey,
|
||||
proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
|
||||
## Verify signature ``sig`` using message ``message`` and public key ``key``.
|
||||
## Return ``true`` if message signature is valid.
|
||||
case key.scheme:
|
||||
case key.scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
var signature: RsaSignature
|
||||
@@ -797,12 +831,12 @@ proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.}=
|
||||
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.} =
|
||||
var ctx: hmactype
|
||||
var j = 0
|
||||
# We need to strip leading zeros, because Go bigint serialization do it.
|
||||
var offset = 0
|
||||
for i in 0..<len(secret):
|
||||
for i in 0 ..< len(secret):
|
||||
if secret[i] != 0x00'u8:
|
||||
break
|
||||
inc(offset)
|
||||
@@ -823,8 +857,9 @@ template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.}=
|
||||
ctx.update(a.data)
|
||||
a = ctx.finish()
|
||||
|
||||
proc stretchKeys*(cipherType: string, hashType: string,
|
||||
sharedSecret: seq[byte]): Secret =
|
||||
proc stretchKeys*(
|
||||
cipherType: string, hashType: string, sharedSecret: seq[byte]
|
||||
): Secret =
|
||||
## Expand shared secret to cryptographic keys.
|
||||
if cipherType == "AES-128":
|
||||
result.ivsize = aes128.sizeBlock
|
||||
@@ -839,7 +874,7 @@ proc stretchKeys*(cipherType: string, hashType: string,
|
||||
var seed = "key expansion"
|
||||
result.macsize = 20
|
||||
let length = result.ivsize + result.keysize + result.macsize
|
||||
result.data = newSeq[byte](2 * length)
|
||||
result.data = newSeqUninit[byte](2 * length)
|
||||
|
||||
if hashType == "SHA256":
|
||||
makeSecret(result.data, HMAC[sha256], sharedSecret, seed)
|
||||
@@ -850,65 +885,57 @@ template goffset*(secret, id, o: untyped): untyped =
|
||||
id * (len(secret.data) shr 1) + o
|
||||
|
||||
template ivOpenArray*(secret: Secret, id: int): untyped =
|
||||
toOpenArray(secret.data, goffset(secret, id, 0),
|
||||
goffset(secret, id, secret.ivsize - 1))
|
||||
toOpenArray(
|
||||
secret.data, goffset(secret, id, 0), goffset(secret, id, secret.ivsize - 1)
|
||||
)
|
||||
|
||||
template keyOpenArray*(secret: Secret, id: int): untyped =
|
||||
toOpenArray(secret.data, goffset(secret, id, secret.ivsize),
|
||||
goffset(secret, id, secret.ivsize + secret.keysize - 1))
|
||||
toOpenArray(
|
||||
secret.data,
|
||||
goffset(secret, id, secret.ivsize),
|
||||
goffset(secret, id, secret.ivsize + secret.keysize - 1),
|
||||
)
|
||||
|
||||
template macOpenArray*(secret: Secret, id: int): untyped =
|
||||
toOpenArray(secret.data, goffset(secret, id, secret.ivsize + secret.keysize),
|
||||
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1))
|
||||
toOpenArray(
|
||||
secret.data,
|
||||
goffset(secret, id, secret.ivsize + secret.keysize),
|
||||
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1),
|
||||
)
|
||||
|
||||
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
## Get array of bytes with with initial vector.
|
||||
result = newSeq[byte](secret.ivsize)
|
||||
var offset = if id == 0: 0 else: (len(secret.data) div 2)
|
||||
result = newSeqUninit[byte](secret.ivsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
else:
|
||||
(len(secret.data) div 2)
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
|
||||
|
||||
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.keysize)
|
||||
var offset = if id == 0: 0 else: (len(secret.data) div 2)
|
||||
result = newSeqUninit[byte](secret.keysize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
else:
|
||||
(len(secret.data) div 2)
|
||||
offset += secret.ivsize
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
|
||||
|
||||
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
result = newSeq[byte](secret.macsize)
|
||||
var offset = if id == 0: 0 else: (len(secret.data) div 2)
|
||||
result = newSeqUninit[byte](secret.macsize)
|
||||
var offset =
|
||||
if id == 0:
|
||||
0
|
||||
else:
|
||||
(len(secret.data) div 2)
|
||||
offset += secret.ivsize + secret.keysize
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: ECDHEScheme,
|
||||
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
proc getOrder*(remotePubkey, localNonce: openArray[byte],
|
||||
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
|
||||
proc getOrder*(
|
||||
remotePubkey, localNonce: openArray[byte], localPubkey, remoteNonce: openArray[byte]
|
||||
): CryptoResult[int] =
|
||||
## Compare values and calculate `order` parameter.
|
||||
var ctx: sha256
|
||||
ctx.init()
|
||||
@@ -919,9 +946,9 @@ proc getOrder*(remotePubkey, localNonce: openArray[byte],
|
||||
ctx.update(localPubkey)
|
||||
ctx.update(remoteNonce)
|
||||
var digest2 = ctx.finish()
|
||||
var mh1 = ? MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
|
||||
var mh2 = ? MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
|
||||
var res = 0;
|
||||
var mh1 = ?MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
|
||||
var mh2 = ?MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
|
||||
var res = 0
|
||||
for i in 0 ..< len(mh1.data.buffer):
|
||||
res = int(mh1.data.buffer[i]) - int(mh2.data.buffer[i])
|
||||
if res != 0:
|
||||
@@ -952,95 +979,45 @@ proc selectBest*(order: int, p1, p2: string): string =
|
||||
if felement == selement:
|
||||
return felement
|
||||
|
||||
proc createProposal*(nonce, pubkey: openArray[byte],
|
||||
exchanges, ciphers, hashes: string): seq[byte] =
|
||||
## Create SecIO proposal message using random ``nonce``, local public key
|
||||
## ``pubkey``, comma-delimieted list of supported exchange schemes
|
||||
## ``exchanges``, comma-delimeted list of supported ciphers ``ciphers`` and
|
||||
## comma-delimeted list of supported hashes ``hashes``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, nonce)
|
||||
msg.write(2, pubkey)
|
||||
msg.write(3, exchanges)
|
||||
msg.write(4, ciphers)
|
||||
msg.write(5, hashes)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
|
||||
exchanges, ciphers, hashes: var string): bool =
|
||||
## Parse incoming proposal message and decode remote random nonce ``nonce``,
|
||||
## remote public key ``pubkey``, comma-delimieted list of supported exchange
|
||||
## schemes ``exchanges``, comma-delimeted list of supported ciphers
|
||||
## ``ciphers`` and comma-delimeted list of supported hashes ``hashes``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, nonce)
|
||||
let r2 = pb.getField(2, pubkey)
|
||||
let r3 = pb.getField(3, exchanges)
|
||||
let r4 = pb.getField(4, ciphers)
|
||||
let r5 = pb.getField(5, hashes)
|
||||
|
||||
r1.get(false) and r2.get(false) and r3.get(false) and
|
||||
r4.get(false) and r5.get(false)
|
||||
|
||||
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
|
||||
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
||||
## signature of proposal blocks ``signature``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, epubkey)
|
||||
msg.write(2, signature)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeExchange*(message: seq[byte],
|
||||
pubkey, signature: var seq[byte]): bool =
|
||||
## Parse incoming exchange message and decode remote ephemeral public key
|
||||
## ``pubkey`` and signature ``signature``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getField(2, signature)
|
||||
r1.get(false) and r2.get(false)
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
proc write*(
|
||||
vb: var VBuffer, pubkey: PublicKey
|
||||
) {.inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PublicKey value ``pubkey`` to buffer ``vb``.
|
||||
vb.writeSeq(pubkey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
proc write*(
|
||||
vb: var VBuffer, seckey: PrivateKey
|
||||
) {.inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PrivateKey value ``seckey`` to buffer ``vb``.
|
||||
vb.writeSeq(seckey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, sig: PrivateKey) {.
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
proc write*(
|
||||
vb: var VBuffer, sig: PrivateKey
|
||||
) {.inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write Signature value ``sig`` to buffer ``vb``.
|
||||
vb.writeSeq(sig.getBytes().tryGet())
|
||||
|
||||
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
|
||||
key: T) {.
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
proc write*[T: PublicKey | PrivateKey](
|
||||
pb: var ProtoBuffer, field: int, key: T
|
||||
) {.inline, raises: [ResultError[CryptoError]].} =
|
||||
write(pb, field, key.getBytes().tryGet())
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
|
||||
inline, raises: [].} =
|
||||
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.inline, raises: [].} =
|
||||
write(pb, field, sig.getBytes())
|
||||
|
||||
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
||||
value: var T): ProtoResult[bool] =
|
||||
proc getField*[T: PublicKey | PrivateKey](
|
||||
pb: ProtoBuffer, field: int, value: var T
|
||||
): ProtoResult[bool] =
|
||||
## Deserialize public/private key from protobuf's message ``pb`` using field
|
||||
## index ``field``.
|
||||
##
|
||||
## On success deserialized key will be stored in ``value``.
|
||||
var buffer: seq[byte]
|
||||
var key: T
|
||||
let res = ? pb.getField(field, buffer)
|
||||
if not(res):
|
||||
let res = ?pb.getField(field, buffer)
|
||||
if not (res):
|
||||
ok(false)
|
||||
else:
|
||||
if key.init(buffer):
|
||||
@@ -1049,16 +1026,15 @@ proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
||||
else:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
|
||||
proc getField*(pb: ProtoBuffer, field: int,
|
||||
value: var Signature): ProtoResult[bool] =
|
||||
proc getField*(pb: ProtoBuffer, field: int, value: var Signature): ProtoResult[bool] =
|
||||
## Deserialize signature from protobuf's message ``pb`` using field index
|
||||
## ``field``.
|
||||
##
|
||||
## On success deserialized signature will be stored in ``value``.
|
||||
var buffer: seq[byte]
|
||||
var sig: Signature
|
||||
let res = ? pb.getField(field, buffer)
|
||||
if not(res):
|
||||
let res = ?pb.getField(field, buffer)
|
||||
if not (res):
|
||||
ok(false)
|
||||
else:
|
||||
if sig.init(buffer):
|
||||
|
||||
@@ -18,12 +18,11 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
import results
|
||||
from stew/assign2 import assign
|
||||
export results
|
||||
|
||||
const
|
||||
Curve25519KeySize* = 32
|
||||
const Curve25519KeySize* = 32
|
||||
|
||||
type
|
||||
Curve25519* = object
|
||||
@@ -35,12 +34,12 @@ proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key =
|
||||
assert s.len == Curve25519KeySize
|
||||
assign(result, s)
|
||||
|
||||
proc getBytes*(key: Curve25519Key): seq[byte] = @key
|
||||
proc getBytes*(key: Curve25519Key): seq[byte] =
|
||||
@key
|
||||
|
||||
proc byteswap(buf: var Curve25519Key) {.inline.} =
|
||||
for i in 0..<16:
|
||||
let
|
||||
x = buf[i]
|
||||
for i in 0 ..< 16:
|
||||
let x = buf[i]
|
||||
buf[i] = buf[31 - i]
|
||||
buf[31 - i] = x
|
||||
|
||||
@@ -48,31 +47,25 @@ proc mul*(_: type[Curve25519], point: var Curve25519Key, multiplier: Curve25519K
|
||||
let defaultBrEc = ecGetDefault()
|
||||
|
||||
# multiplier needs to be big-endian
|
||||
var
|
||||
multiplierBs = multiplier
|
||||
var multiplierBs = multiplier
|
||||
multiplierBs.byteswap()
|
||||
let
|
||||
res = defaultBrEc.mul(
|
||||
addr point[0],
|
||||
Curve25519KeySize,
|
||||
addr multiplierBs[0],
|
||||
Curve25519KeySize,
|
||||
EC_curve25519)
|
||||
let res = defaultBrEc.mul(
|
||||
addr point[0],
|
||||
Curve25519KeySize,
|
||||
addr multiplierBs[0],
|
||||
Curve25519KeySize,
|
||||
EC_curve25519,
|
||||
)
|
||||
assert res == 1
|
||||
|
||||
proc mulgen(_: type[Curve25519], dst: var Curve25519Key, point: Curve25519Key) =
|
||||
let defaultBrEc = ecGetDefault()
|
||||
|
||||
var
|
||||
rpoint = point
|
||||
var rpoint = point
|
||||
rpoint.byteswap()
|
||||
|
||||
let
|
||||
size = defaultBrEc.mulgen(
|
||||
addr dst[0],
|
||||
addr rpoint[0],
|
||||
Curve25519KeySize,
|
||||
EC_curve25519)
|
||||
let size =
|
||||
defaultBrEc.mulgen(addr dst[0], addr rpoint[0], Curve25519KeySize, EC_curve25519)
|
||||
|
||||
assert size == Curve25519KeySize
|
||||
|
||||
@@ -83,7 +76,8 @@ proc random*(_: type[Curve25519Key], rng: var HmacDrbgContext): Curve25519Key =
|
||||
var res: Curve25519Key
|
||||
let defaultBrEc = ecGetDefault()
|
||||
let len = ecKeygen(
|
||||
addr rng.vtable, defaultBrEc, nil, addr res[0], EC_curve25519)
|
||||
PrngClassPointerConst(addr rng.vtable), defaultBrEc, nil, addr res[0], EC_curve25519
|
||||
)
|
||||
# Per bearssl documentation, the keygen only fails if the curve is
|
||||
# unrecognised -
|
||||
doAssert len == Curve25519KeySize, "Could not generate curve"
|
||||
|
||||
@@ -21,7 +21,9 @@ import bearssl/[ec, rand, hash]
|
||||
import nimcrypto/utils as ncrutils
|
||||
import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/[results, ctops]
|
||||
import stew/ctops
|
||||
import results
|
||||
import ../utils/sequninit
|
||||
|
||||
import ../utility
|
||||
|
||||
@@ -58,23 +60,22 @@ type
|
||||
buffer*: seq[byte]
|
||||
|
||||
EcCurveKind* = enum
|
||||
Secp256r1 = EC_secp256r1,
|
||||
Secp384r1 = EC_secp384r1,
|
||||
Secp256r1 = EC_secp256r1
|
||||
Secp384r1 = EC_secp384r1
|
||||
Secp521r1 = EC_secp521r1
|
||||
|
||||
EcPKI* = EcPrivateKey | EcPublicKey | EcSignature
|
||||
|
||||
EcError* = enum
|
||||
EcRngError,
|
||||
EcKeyGenError,
|
||||
EcPublicKeyError,
|
||||
EcKeyIncorrectError,
|
||||
EcRngError
|
||||
EcKeyGenError
|
||||
EcPublicKeyError
|
||||
EcKeyIncorrectError
|
||||
EcSignatureError
|
||||
|
||||
EcResult*[T] = Result[T, EcError]
|
||||
|
||||
const
|
||||
EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
|
||||
const EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
|
||||
|
||||
proc `-`(x: uint32): uint32 {.inline.} =
|
||||
result = (0xFFFF_FFFF'u32 - x) + 1'u32
|
||||
@@ -88,7 +89,7 @@ proc CMP(x, y: uint32): int32 {.inline.} =
|
||||
|
||||
proc EQ0(x: int32): uint32 {.inline.} =
|
||||
var q = cast[uint32](x)
|
||||
result = not(q or -q) shr 31
|
||||
result = not (q or -q) shr 31
|
||||
|
||||
proc NEQ(x, y: uint32): uint32 {.inline.} =
|
||||
var q = cast[uint32](x xor y)
|
||||
@@ -113,7 +114,7 @@ proc checkScalar(scalar: openArray[byte], curve: cint): uint32 =
|
||||
for u in scalar:
|
||||
z = z or u
|
||||
if len(scalar) == int(orderlen):
|
||||
for i in 0..<len(scalar):
|
||||
for i in 0 ..< len(scalar):
|
||||
c = c or (-(cast[int32](EQ0(c))) and CMP(scalar[i], order[i]))
|
||||
else:
|
||||
c = -1
|
||||
@@ -126,8 +127,7 @@ proc checkPublic(key: openArray[byte], curve: cint): uint32 =
|
||||
var impl = ecGetDefault()
|
||||
var orderlen: uint = 0
|
||||
discard impl.order(curve, orderlen)
|
||||
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)),
|
||||
addr x[0], uint(len(x)), curve)
|
||||
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)), addr x[0], uint(len(x)), curve)
|
||||
|
||||
proc getOffset(pubkey: EcPublicKey): int {.inline.} =
|
||||
let o = cast[uint](pubkey.key.q) - cast[uint](unsafeAddr pubkey.buffer[0])
|
||||
@@ -145,21 +145,15 @@ proc getOffset(seckey: EcPrivateKey): int {.inline.} =
|
||||
|
||||
template getPublicKeyLength*(curve: EcCurveKind): int =
|
||||
case curve
|
||||
of Secp256r1:
|
||||
PubKey256Length
|
||||
of Secp384r1:
|
||||
PubKey384Length
|
||||
of Secp521r1:
|
||||
PubKey521Length
|
||||
of Secp256r1: PubKey256Length
|
||||
of Secp384r1: PubKey384Length
|
||||
of Secp521r1: PubKey521Length
|
||||
|
||||
template getPrivateKeyLength*(curve: EcCurveKind): int =
|
||||
case curve
|
||||
of Secp256r1:
|
||||
SecKey256Length
|
||||
of Secp384r1:
|
||||
SecKey384Length
|
||||
of Secp521r1:
|
||||
SecKey521Length
|
||||
of Secp256r1: SecKey256Length
|
||||
of Secp384r1: SecKey384Length
|
||||
of Secp521r1: SecKey521Length
|
||||
|
||||
proc copy*[T: EcPKI](dst: var T, src: T): bool =
|
||||
## Copy EC `private key`, `public key` or `signature` ``src`` to ``dst``.
|
||||
@@ -201,7 +195,7 @@ proc copy*[T: EcPKI](src: T): T {.inline.} =
|
||||
if not copy(result, src):
|
||||
raise newException(EcKeyIncorrectError, "Incorrect key or signature")
|
||||
|
||||
proc clear*[T: EcPKI|EcKeyPair](pki: var T) =
|
||||
proc clear*[T: EcPKI | EcKeyPair](pki: var T) =
|
||||
## Wipe and clear EC `private key`, `public key` or `signature` object.
|
||||
doAssert(not isNil(pki))
|
||||
when T is EcPrivateKey:
|
||||
@@ -232,8 +226,8 @@ proc clear*[T: EcPKI|EcKeyPair](pki: var T) =
|
||||
pki.pubkey.key.curve = 0
|
||||
|
||||
proc random*(
|
||||
T: typedesc[EcPrivateKey], kind: EcCurveKind,
|
||||
rng: var HmacDrbgContext): EcResult[EcPrivateKey] =
|
||||
T: typedesc[EcPrivateKey], kind: EcCurveKind, rng: var HmacDrbgContext
|
||||
): EcResult[EcPrivateKey] =
|
||||
## Generate new random EC private key using BearSSL's HMAC-SHA256-DRBG
|
||||
## algorithm.
|
||||
##
|
||||
@@ -241,9 +235,13 @@ proc random*(
|
||||
## secp521r1).
|
||||
var ecimp = ecGetDefault()
|
||||
var res = new EcPrivateKey
|
||||
if ecKeygen(addr rng.vtable, ecimp,
|
||||
addr res.key, addr res.buffer[0],
|
||||
safeConvert[cint](kind)) == 0:
|
||||
if ecKeygen(
|
||||
PrngClassPointerConst(addr rng.vtable),
|
||||
ecimp,
|
||||
addr res.key,
|
||||
addr res.buffer[0],
|
||||
safeConvert[cint](kind),
|
||||
) == 0:
|
||||
err(EcKeyGenError)
|
||||
else:
|
||||
ok(res)
|
||||
@@ -257,8 +255,7 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = new EcPublicKey
|
||||
assert res.buffer.len > getPublicKeyLength(cast[EcCurveKind](seckey.key.curve))
|
||||
if ecComputePub(ecimp, addr res.key,
|
||||
addr res.buffer[0], unsafeAddr seckey.key) == 0:
|
||||
if ecComputePub(ecimp, addr res.key, addr res.buffer[0], unsafeAddr seckey.key) == 0:
|
||||
err(EcKeyIncorrectError)
|
||||
else:
|
||||
ok(res)
|
||||
@@ -266,23 +263,23 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
|
||||
err(EcKeyIncorrectError)
|
||||
|
||||
proc random*(
|
||||
T: typedesc[EcKeyPair], kind: EcCurveKind,
|
||||
rng: var HmacDrbgContext): EcResult[T] =
|
||||
T: typedesc[EcKeyPair], kind: EcCurveKind, rng: var HmacDrbgContext
|
||||
): EcResult[T] =
|
||||
## Generate new random EC private and public keypair using BearSSL's
|
||||
## HMAC-SHA256-DRBG algorithm.
|
||||
##
|
||||
## ``kind`` elliptic curve kind of your choice (secp256r1, secp384r1 or
|
||||
## secp521r1).
|
||||
let
|
||||
seckey = ? EcPrivateKey.random(kind, rng)
|
||||
pubkey = ? seckey.getPublicKey()
|
||||
seckey = ?EcPrivateKey.random(kind, rng)
|
||||
pubkey = ?seckey.getPublicKey()
|
||||
key = EcKeyPair(seckey: seckey, pubkey: pubkey)
|
||||
ok(key)
|
||||
|
||||
proc `$`*(seckey: EcPrivateKey): string =
|
||||
## Return string representation of EC private key.
|
||||
if isNil(seckey) or seckey.key.curve == 0 or seckey.key.xlen == 0 or
|
||||
len(seckey.buffer) == 0:
|
||||
len(seckey.buffer) == 0:
|
||||
result = "Empty or uninitialized ECNIST key"
|
||||
else:
|
||||
if seckey.key.curve notin EcSupportedCurvesCint:
|
||||
@@ -298,7 +295,7 @@ proc `$`*(seckey: EcPrivateKey): string =
|
||||
proc `$`*(pubkey: EcPublicKey): string =
|
||||
## Return string representation of EC public key.
|
||||
if isNil(pubkey) or pubkey.key.curve == 0 or pubkey.key.qlen == 0 or
|
||||
len(pubkey.buffer) == 0:
|
||||
len(pubkey.buffer) == 0:
|
||||
result = "Empty or uninitialized ECNIST key"
|
||||
else:
|
||||
if pubkey.key.curve notin EcSupportedCurvesCint:
|
||||
@@ -371,7 +368,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var offset, length: int
|
||||
var pubkey = ? seckey.getPublicKey()
|
||||
var pubkey = ?seckey.getPublicKey()
|
||||
var b = Asn1Buffer.init()
|
||||
var p = Asn1Composite.init(Asn1Tag.Sequence)
|
||||
var c0 = Asn1Composite.init(0)
|
||||
@@ -387,16 +384,14 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
||||
if offset < 0:
|
||||
return err(EcKeyIncorrectError)
|
||||
length = int(pubkey.key.qlen)
|
||||
c1.write(Asn1Tag.BitString,
|
||||
pubkey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
c1.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
c1.finish()
|
||||
offset = seckey.getOffset()
|
||||
if offset < 0:
|
||||
return err(EcKeyIncorrectError)
|
||||
length = int(seckey.key.xlen)
|
||||
p.write(1'u64)
|
||||
p.write(Asn1Tag.OctetString,
|
||||
seckey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
p.write(Asn1Tag.OctetString, seckey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
p.write(c0)
|
||||
p.write(c1)
|
||||
p.finish()
|
||||
@@ -410,7 +405,6 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
||||
else:
|
||||
err(EcKeyIncorrectError)
|
||||
|
||||
|
||||
proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
|
||||
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
|
||||
## to ``data``.
|
||||
@@ -436,8 +430,7 @@ proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
|
||||
if offset < 0:
|
||||
return err(EcKeyIncorrectError)
|
||||
let length = int(pubkey.key.qlen)
|
||||
p.write(Asn1Tag.BitString,
|
||||
pubkey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
p.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
|
||||
p.finish()
|
||||
b.write(p)
|
||||
b.finish()
|
||||
@@ -466,10 +459,10 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
let length = ? seckey.toBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?seckey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? seckey.toBytes(res)
|
||||
discard ?seckey.toBytes(res)
|
||||
ok(res)
|
||||
else:
|
||||
err(EcKeyIncorrectError)
|
||||
@@ -479,10 +472,10 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
let length = ? pubkey.toBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?pubkey.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? pubkey.toBytes(res)
|
||||
discard ?pubkey.toBytes(res)
|
||||
ok(res)
|
||||
else:
|
||||
err(EcKeyIncorrectError)
|
||||
@@ -491,10 +484,10 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to ASN.1 DER binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
let length = ? sig.toBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? sig.toBytes(res)
|
||||
discard ?sig.toBytes(res)
|
||||
ok(res)
|
||||
|
||||
proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
@@ -502,10 +495,10 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
let length = ? seckey.toRawBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?seckey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? seckey.toRawBytes(res)
|
||||
discard ?seckey.toRawBytes(res)
|
||||
ok(res)
|
||||
else:
|
||||
err(EcKeyIncorrectError)
|
||||
@@ -515,10 +508,10 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
|
||||
if isNil(pubkey):
|
||||
return err(EcKeyIncorrectError)
|
||||
if pubkey.key.curve in EcSupportedCurvesCint:
|
||||
var res = newSeq[byte]()
|
||||
let length = ? pubkey.toRawBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?pubkey.toRawBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? pubkey.toRawBytes(res)
|
||||
discard ?pubkey.toRawBytes(res)
|
||||
return ok(res)
|
||||
else:
|
||||
return err(EcKeyIncorrectError)
|
||||
@@ -527,10 +520,10 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
|
||||
## Serialize EC signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(EcSignatureError)
|
||||
var res = newSeq[byte]()
|
||||
let length = ? sig.toBytes(res)
|
||||
var res = newSeqUninit[byte](0)
|
||||
let length = ?sig.toBytes(res)
|
||||
res.setLen(length)
|
||||
discard ? sig.toBytes(res)
|
||||
discard ?sig.toBytes(res)
|
||||
ok(res)
|
||||
|
||||
proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
|
||||
@@ -550,8 +543,10 @@ proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
|
||||
let op2 = pubkey2.getOffset()
|
||||
if op1 == -1 or op2 == -1:
|
||||
return false
|
||||
return CT.isEqual(pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
|
||||
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1))
|
||||
return CT.isEqual(
|
||||
pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
|
||||
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1),
|
||||
)
|
||||
|
||||
proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
|
||||
## Returns ``true`` if both keys ``seckey1`` and ``seckey2`` are equal.
|
||||
@@ -570,8 +565,10 @@ proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
|
||||
let op2 = seckey2.getOffset()
|
||||
if op1 == -1 or op2 == -1:
|
||||
return false
|
||||
return CT.isEqual(seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
|
||||
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1))
|
||||
return CT.isEqual(
|
||||
seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
|
||||
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1),
|
||||
)
|
||||
|
||||
proc `==`*(a, b: EcSignature): bool =
|
||||
## Return ``true`` if both signatures ``sig1`` and ``sig2`` are equal.
|
||||
@@ -605,26 +602,26 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error
|
||||
|
||||
var ab = Asn1Buffer.init(data)
|
||||
|
||||
field = ? ab.read()
|
||||
field = ?ab.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ib = field.getBuffer()
|
||||
|
||||
field = ? ib.read()
|
||||
field = ?ib.read()
|
||||
|
||||
if field.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
if field.vint != 1'u64:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
raw = ? ib.read()
|
||||
raw = ?ib.read()
|
||||
|
||||
if raw.kind != Asn1Tag.OctetString:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
oid = ? ib.read()
|
||||
oid = ?ib.read()
|
||||
|
||||
if oid.kind != Asn1Tag.Oid:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -658,19 +655,19 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
|
||||
|
||||
var ab = Asn1Buffer.init(data)
|
||||
|
||||
field = ? ab.read()
|
||||
field = ?ab.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ib = field.getBuffer()
|
||||
field = ? ib.read()
|
||||
field = ?ib.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ob = field.getBuffer()
|
||||
oid = ? ob.read()
|
||||
oid = ?ob.read()
|
||||
|
||||
if oid.kind != Asn1Tag.Oid:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -678,7 +675,7 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
|
||||
if oid != Asn1OidEcPublicKey:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
oid = ? ob.read()
|
||||
oid = ?ob.read()
|
||||
|
||||
if oid.kind != Asn1Tag.Oid:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -692,7 +689,7 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
|
||||
else:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
raw = ? ib.read()
|
||||
raw = ?ib.read()
|
||||
|
||||
if raw.kind != Asn1Tag.BitString:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -718,16 +715,14 @@ proc init*(sig: var EcSignature, data: openArray[byte]): Result[void, Asn1Error]
|
||||
else:
|
||||
err(Asn1Error.Incorrect)
|
||||
|
||||
proc init*[T: EcPKI](sospk: var T,
|
||||
data: string): Result[void, Asn1Error] {.inline.} =
|
||||
proc init*[T: EcPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
|
||||
## Initialize EC `private key`, `public key` or `signature` ``sospk`` from
|
||||
## ASN.1 DER hexadecimal string representation ``data``.
|
||||
##
|
||||
## Procedure returns ``Asn1Status``.
|
||||
sospk.init(ncrutils.fromHex(data))
|
||||
|
||||
proc init*(t: typedesc[EcPrivateKey],
|
||||
data: openArray[byte]): EcResult[EcPrivateKey] =
|
||||
proc init*(t: typedesc[EcPrivateKey], data: openArray[byte]): EcResult[EcPrivateKey] =
|
||||
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var key: EcPrivateKey
|
||||
@@ -737,8 +732,7 @@ proc init*(t: typedesc[EcPrivateKey],
|
||||
else:
|
||||
ok(key)
|
||||
|
||||
proc init*(t: typedesc[EcPublicKey],
|
||||
data: openArray[byte]): EcResult[EcPublicKey] =
|
||||
proc init*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
|
||||
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var key: EcPublicKey
|
||||
@@ -748,8 +742,7 @@ proc init*(t: typedesc[EcPublicKey],
|
||||
else:
|
||||
ok(key)
|
||||
|
||||
proc init*(t: typedesc[EcSignature],
|
||||
data: openArray[byte]): EcResult[EcSignature] =
|
||||
proc init*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
|
||||
## Initialize EC signature from raw binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var sig: EcSignature
|
||||
@@ -832,8 +825,7 @@ proc initRaw*(sig: var EcSignature, data: openArray[byte]): bool =
|
||||
##
|
||||
## Procedure returns ``true`` on success, ``false`` otherwise.
|
||||
let length = len(data)
|
||||
if (length == Sig256Length) or (length == Sig384Length) or
|
||||
(length == Sig521Length):
|
||||
if (length == Sig256Length) or (length == Sig384Length) or (length == Sig521Length):
|
||||
result = true
|
||||
if result:
|
||||
sig = new EcSignature
|
||||
@@ -846,8 +838,9 @@ proc initRaw*[T: EcPKI](sospk: var T, data: string): bool {.inline.} =
|
||||
## Procedure returns ``true`` on success, ``false`` otherwise.
|
||||
result = sospk.initRaw(ncrutils.fromHex(data))
|
||||
|
||||
proc initRaw*(t: typedesc[EcPrivateKey],
|
||||
data: openArray[byte]): EcResult[EcPrivateKey] =
|
||||
proc initRaw*(
|
||||
t: typedesc[EcPrivateKey], data: openArray[byte]
|
||||
): EcResult[EcPrivateKey] =
|
||||
## Initialize EC private key from raw binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var res: EcPrivateKey
|
||||
@@ -856,8 +849,7 @@ proc initRaw*(t: typedesc[EcPrivateKey],
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc initRaw*(t: typedesc[EcPublicKey],
|
||||
data: openArray[byte]): EcResult[EcPublicKey] =
|
||||
proc initRaw*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
|
||||
## Initialize EC public key from raw binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var res: EcPublicKey
|
||||
@@ -866,8 +858,7 @@ proc initRaw*(t: typedesc[EcPublicKey],
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc initRaw*(t: typedesc[EcSignature],
|
||||
data: openArray[byte]): EcResult[EcSignature] =
|
||||
proc initRaw*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
|
||||
## Initialize EC signature from raw binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var res: EcSignature
|
||||
@@ -894,16 +885,19 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey =
|
||||
let poffset = key.getOffset()
|
||||
let soffset = sec.getOffset()
|
||||
if poffset >= 0 and soffset >= 0:
|
||||
let res = impl.mul(addr key.buffer[poffset],
|
||||
key.key.qlen,
|
||||
unsafeAddr sec.buffer[soffset],
|
||||
sec.key.xlen,
|
||||
key.key.curve)
|
||||
let res = impl.mul(
|
||||
addr key.buffer[poffset],
|
||||
key.key.qlen,
|
||||
unsafeAddr sec.buffer[soffset],
|
||||
sec.key.xlen,
|
||||
key.key.curve,
|
||||
)
|
||||
if res != 0:
|
||||
result = key
|
||||
|
||||
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
|
||||
data: var openArray[byte]): int =
|
||||
proc toSecret*(
|
||||
pubkey: EcPublicKey, seckey: EcPrivateKey, data: var openArray[byte]
|
||||
): int =
|
||||
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
|
||||
## remote public key ``pubkey`` and local private key ``seckey`` and store
|
||||
## shared secret to ``data``.
|
||||
@@ -936,11 +930,12 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
|
||||
var data: array[Secret521Length, byte]
|
||||
let res = toSecret(pubkey, seckey, data)
|
||||
if res > 0:
|
||||
result = newSeq[byte](res)
|
||||
result = newSeqUninit[byte](res)
|
||||
copyMem(addr result[0], addr data[0], res)
|
||||
|
||||
proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||
message: openArray[T]): EcResult[EcSignature] {.gcsafe.} =
|
||||
proc sign*[T: byte | char](
|
||||
seckey: EcPrivateKey, message: openArray[T]
|
||||
): EcResult[EcSignature] {.gcsafe.} =
|
||||
## Get ECDSA signature of data ``message`` using private key ``seckey``.
|
||||
if isNil(seckey):
|
||||
return err(EcKeyIncorrectError)
|
||||
@@ -949,7 +944,7 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||
var impl = ecGetDefault()
|
||||
if seckey.key.curve in EcSupportedCurvesCint:
|
||||
var sig = new EcSignature
|
||||
sig.buffer = newSeq[byte](256)
|
||||
sig.buffer = newSeqUninit[byte](256)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
@@ -957,8 +952,8 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||
else:
|
||||
kv.update(addr hc.vtable, nil, 0)
|
||||
kv.out(addr hc.vtable, addr hash[0])
|
||||
let res = ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key,
|
||||
addr sig.buffer[0])
|
||||
let res =
|
||||
ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key, addr sig.buffer[0])
|
||||
# Clear context with initial value
|
||||
kv.init(addr hc.vtable)
|
||||
if res != 0:
|
||||
@@ -969,8 +964,9 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||
else:
|
||||
err(EcKeyIncorrectError)
|
||||
|
||||
proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
|
||||
pubkey: EcPublicKey): bool {.inline.} =
|
||||
proc verify*[T: byte | char](
|
||||
sig: EcSignature, message: openArray[T], pubkey: EcPublicKey
|
||||
): bool {.inline.} =
|
||||
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
|
||||
## ``message``.
|
||||
##
|
||||
@@ -988,9 +984,41 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
|
||||
else:
|
||||
kv.update(addr hc.vtable, nil, 0)
|
||||
kv.out(addr hc.vtable, addr hash[0])
|
||||
let res = ecdsaI31VrfyAsn1(impl, addr hash[0], uint(len(hash)),
|
||||
unsafeAddr pubkey.key,
|
||||
addr sig.buffer[0], uint(len(sig.buffer)))
|
||||
let res = ecdsaI31VrfyAsn1(
|
||||
impl,
|
||||
addr hash[0],
|
||||
uint(len(hash)),
|
||||
unsafeAddr pubkey.key,
|
||||
addr sig.buffer[0],
|
||||
uint(len(sig.buffer)),
|
||||
)
|
||||
# Clear context with initial value
|
||||
kv.init(addr hc.vtable)
|
||||
result = (res == 1)
|
||||
|
||||
type ECDHEScheme* = EcCurveKind
|
||||
|
||||
proc ephemeral*(scheme: ECDHEScheme, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ?EcKeyPair.random(Secp256r1, rng)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ?EcKeyPair.random(Secp384r1, rng)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ?EcKeyPair.random(Secp521r1, rng)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -16,18 +16,38 @@ import bearssl/[kdf, hash]
|
||||
|
||||
type HkdfResult*[len: static int] = array[len, byte]
|
||||
|
||||
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openArray[byte]; outputs: var openArray[HkdfResult[len]]) =
|
||||
var
|
||||
ctx: HkdfContext
|
||||
proc hkdf*[T: sha256, len: static int](
|
||||
_: type[T],
|
||||
salt, ikm, info: openArray[byte],
|
||||
outputs: var openArray[HkdfResult[len]],
|
||||
) =
|
||||
var ctx: HkdfContext
|
||||
hkdfInit(
|
||||
ctx, addr sha256Vtable,
|
||||
if salt.len > 0: unsafeAddr salt[0] else: nil, csize_t(salt.len))
|
||||
ctx,
|
||||
addr sha256Vtable,
|
||||
if salt.len > 0:
|
||||
unsafeAddr salt[0]
|
||||
else:
|
||||
nil,
|
||||
csize_t(salt.len),
|
||||
)
|
||||
hkdfInject(
|
||||
ctx, if ikm.len > 0: unsafeAddr ikm[0] else: nil, csize_t(ikm.len))
|
||||
ctx,
|
||||
if ikm.len > 0:
|
||||
unsafeAddr ikm[0]
|
||||
else:
|
||||
nil,
|
||||
csize_t(ikm.len),
|
||||
)
|
||||
hkdfFlip(ctx)
|
||||
for i in 0..outputs.high:
|
||||
for i in 0 .. outputs.high:
|
||||
discard hkdfProduce(
|
||||
ctx,
|
||||
if info.len > 0: unsafeAddr info[0]
|
||||
else: nil, csize_t(info.len),
|
||||
addr outputs[i][0], csize_t(outputs[i].len))
|
||||
if info.len > 0:
|
||||
unsafeAddr info[0]
|
||||
else:
|
||||
nil,
|
||||
csize_t(info.len),
|
||||
addr outputs[i][0],
|
||||
csize_t(outputs[i].len),
|
||||
)
|
||||
|
||||
@@ -11,43 +11,44 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
import stew/[endians2, ctops]
|
||||
import results
|
||||
export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import ../utils/sequninit
|
||||
|
||||
type
|
||||
Asn1Error* {.pure.} = enum
|
||||
Overflow,
|
||||
Incomplete,
|
||||
Indefinite,
|
||||
Incorrect,
|
||||
NoSupport,
|
||||
Overflow
|
||||
Incomplete
|
||||
Indefinite
|
||||
Incorrect
|
||||
NoSupport
|
||||
Overrun
|
||||
|
||||
Asn1Result*[T] = Result[T, Asn1Error]
|
||||
|
||||
Asn1Class* {.pure.} = enum
|
||||
Universal = 0x00,
|
||||
Universal = 0x00
|
||||
Application = 0x01
|
||||
ContextSpecific = 0x02
|
||||
Private = 0x03
|
||||
|
||||
Asn1Tag* {.pure.} = enum
|
||||
## Protobuf's field types enum
|
||||
NoSupport,
|
||||
Boolean,
|
||||
Integer,
|
||||
BitString,
|
||||
OctetString,
|
||||
Null,
|
||||
Oid,
|
||||
Sequence,
|
||||
NoSupport
|
||||
Boolean
|
||||
Integer
|
||||
BitString
|
||||
OctetString
|
||||
Null
|
||||
Oid
|
||||
Sequence
|
||||
Context
|
||||
|
||||
Asn1Buffer* = object of RootObj
|
||||
## ASN.1's message representation object
|
||||
Asn1Buffer* = object of RootObj ## ASN.1's message representation object
|
||||
buffer*: seq[byte]
|
||||
offset*: int
|
||||
length*: int
|
||||
@@ -73,37 +74,23 @@ type
|
||||
idx*: int
|
||||
|
||||
const
|
||||
Asn1OidSecp256r1* = [
|
||||
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8
|
||||
]
|
||||
Asn1OidSecp256r1* =
|
||||
[0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8]
|
||||
## Encoded OID for `secp256r1` curve (1.2.840.10045.3.1.7)
|
||||
Asn1OidSecp384r1* = [
|
||||
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8
|
||||
]
|
||||
Asn1OidSecp384r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8]
|
||||
## Encoded OID for `secp384r1` curve (1.3.132.0.34)
|
||||
Asn1OidSecp521r1* = [
|
||||
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8
|
||||
]
|
||||
Asn1OidSecp521r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8]
|
||||
## Encoded OID for `secp521r1` curve (1.3.132.0.35)
|
||||
Asn1OidSecp256k1* = [
|
||||
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8
|
||||
]
|
||||
Asn1OidSecp256k1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8]
|
||||
## Encoded OID for `secp256k1` curve (1.3.132.0.10)
|
||||
Asn1OidEcPublicKey* = [
|
||||
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8
|
||||
]
|
||||
Asn1OidEcPublicKey* = [0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8]
|
||||
## Encoded OID for Elliptic Curve Public Key (1.2.840.10045.2.1)
|
||||
Asn1OidRsaEncryption* = [
|
||||
0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8,
|
||||
0x01'u8, 0x01'u8
|
||||
]
|
||||
Asn1OidRsaEncryption* =
|
||||
[0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8, 0x01'u8, 0x01'u8]
|
||||
## Encoded OID for RSA Encryption (1.2.840.113549.1.1.1)
|
||||
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8]
|
||||
## Encoded boolean ``TRUE``.
|
||||
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8]
|
||||
## Encoded boolean ``FALSE``.
|
||||
Asn1Null* = [0x05'u8, 0x00'u8]
|
||||
## Encoded ``NULL`` value.
|
||||
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8] ## Encoded boolean ``TRUE``.
|
||||
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8] ## Encoded boolean ``FALSE``.
|
||||
Asn1Null* = [0x05'u8, 0x00'u8] ## Encoded ``NULL`` value.
|
||||
|
||||
template toOpenArray*(ab: Asn1Buffer): untyped =
|
||||
toOpenArray(ab.buffer, ab.offset, ab.buffer.high)
|
||||
@@ -120,7 +107,7 @@ template isEmpty*(ab: Asn1Buffer): bool =
|
||||
template isEnough*(ab: Asn1Buffer, length: int64): bool =
|
||||
len(ab.buffer) >= ab.offset + length
|
||||
|
||||
proc len*[T: Asn1Buffer|Asn1Composite](abc: T): int {.inline.} =
|
||||
proc len*[T: Asn1Buffer | Asn1Composite](abc: T): int {.inline.} =
|
||||
len(abc.buffer) - abc.offset
|
||||
|
||||
proc len*(field: Asn1Field): int {.inline.} =
|
||||
@@ -129,31 +116,22 @@ proc len*(field: Asn1Field): int {.inline.} =
|
||||
template getPtr*(field: untyped): pointer =
|
||||
cast[pointer](unsafeAddr field.buffer[field.offset])
|
||||
|
||||
proc extend*[T: Asn1Buffer|Asn1Composite](abc: var T, length: int) {.inline.} =
|
||||
proc extend*[T: Asn1Buffer | Asn1Composite](abc: var T, length: int) {.inline.} =
|
||||
## Extend buffer or composite's internal buffer by ``length`` octets.
|
||||
abc.buffer.setLen(len(abc.buffer) + length)
|
||||
|
||||
proc code*(tag: Asn1Tag): byte {.inline.} =
|
||||
## Converts Nim ``tag`` enum to ASN.1 tag code.
|
||||
case tag:
|
||||
of Asn1Tag.NoSupport:
|
||||
0x00'u8
|
||||
of Asn1Tag.Boolean:
|
||||
0x01'u8
|
||||
of Asn1Tag.Integer:
|
||||
0x02'u8
|
||||
of Asn1Tag.BitString:
|
||||
0x03'u8
|
||||
of Asn1Tag.OctetString:
|
||||
0x04'u8
|
||||
of Asn1Tag.Null:
|
||||
0x05'u8
|
||||
of Asn1Tag.Oid:
|
||||
0x06'u8
|
||||
of Asn1Tag.Sequence:
|
||||
0x30'u8
|
||||
of Asn1Tag.Context:
|
||||
0xA0'u8
|
||||
case tag
|
||||
of Asn1Tag.NoSupport: 0x00'u8
|
||||
of Asn1Tag.Boolean: 0x01'u8
|
||||
of Asn1Tag.Integer: 0x02'u8
|
||||
of Asn1Tag.BitString: 0x03'u8
|
||||
of Asn1Tag.OctetString: 0x04'u8
|
||||
of Asn1Tag.Null: 0x05'u8
|
||||
of Asn1Tag.Oid: 0x06'u8
|
||||
of Asn1Tag.Sequence: 0x30'u8
|
||||
of Asn1Tag.Context: 0xA0'u8
|
||||
|
||||
proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
|
||||
## Encode ASN.1 DER length part of TLV triple and return number of bytes
|
||||
@@ -182,8 +160,7 @@ proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
|
||||
# then 9, so it is safe to convert it to `int`.
|
||||
int(res)
|
||||
|
||||
proc asn1EncodeInteger*(dest: var openArray[byte],
|
||||
value: openArray[byte]): int =
|
||||
proc asn1EncodeInteger*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
|
||||
## and return number of bytes (octets) used.
|
||||
##
|
||||
@@ -193,17 +170,16 @@ proc asn1EncodeInteger*(dest: var openArray[byte],
|
||||
var buffer: array[16, byte]
|
||||
var lenlen = 0
|
||||
|
||||
let offset =
|
||||
block:
|
||||
var o = 0
|
||||
for i in 0 ..< len(value):
|
||||
if value[o] != 0x00:
|
||||
break
|
||||
inc(o)
|
||||
if o < len(value):
|
||||
o
|
||||
else:
|
||||
o - 1
|
||||
let offset = block:
|
||||
var o = 0
|
||||
for i in 0 ..< len(value):
|
||||
if value[o] != 0x00:
|
||||
break
|
||||
inc(o)
|
||||
if o < len(value):
|
||||
o
|
||||
else:
|
||||
o - 1
|
||||
|
||||
let destlen =
|
||||
if len(value) > 0:
|
||||
@@ -225,12 +201,10 @@ proc asn1EncodeInteger*(dest: var openArray[byte],
|
||||
if value[offset] >= 0x80'u8:
|
||||
dest[1 + lenlen] = 0x00'u8
|
||||
shift = 2
|
||||
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset],
|
||||
len(value) - offset)
|
||||
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset], len(value) - offset)
|
||||
destlen
|
||||
|
||||
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte],
|
||||
value: T): int =
|
||||
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
|
||||
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
|
||||
## bytes (octets) used.
|
||||
##
|
||||
@@ -265,8 +239,7 @@ proc asn1EncodeNull*(dest: var openArray[byte]): int =
|
||||
dest[1] = 0x00'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOctetString*(dest: var openArray[byte],
|
||||
value: openArray[byte]): int =
|
||||
proc asn1EncodeOctetString*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
|
||||
## bytes (octets) used.
|
||||
##
|
||||
@@ -283,8 +256,9 @@ proc asn1EncodeOctetString*(dest: var openArray[byte],
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||
res
|
||||
|
||||
proc asn1EncodeBitString*(dest: var openArray[byte],
|
||||
value: openArray[byte], bits = 0): int =
|
||||
proc asn1EncodeBitString*(
|
||||
dest: var openArray[byte], value: openArray[byte], bits = 0
|
||||
): int =
|
||||
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
|
||||
## (octets) used.
|
||||
##
|
||||
@@ -305,7 +279,7 @@ proc asn1EncodeBitString*(dest: var openArray[byte],
|
||||
let bytelen = (bitlen + 7) shr 3
|
||||
# Number of unused bits
|
||||
let unused = (8 - (bitlen and 7)) and 7
|
||||
let mask = not((1'u8 shl unused) - 1'u8)
|
||||
let mask = not ((1'u8 shl unused) - 1'u8)
|
||||
var lenlen = asn1EncodeLength(buffer, uint64(bytelen + 1))
|
||||
let res = 1 + lenlen + 1 + len(value)
|
||||
if len(dest) >= res:
|
||||
@@ -319,29 +293,6 @@ proc asn1EncodeBitString*(dest: var openArray[byte],
|
||||
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
||||
res
|
||||
|
||||
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
|
||||
value: T): int =
|
||||
var v = value
|
||||
if value <= cast[T](0x7F):
|
||||
if len(dest) >= 1:
|
||||
dest[0] = cast[byte](value)
|
||||
1
|
||||
else:
|
||||
var s = 0
|
||||
var res = 0
|
||||
while v != 0:
|
||||
v = v shr 7
|
||||
s += 7
|
||||
inc(res)
|
||||
if len(dest) >= res:
|
||||
var k = 0
|
||||
while s != 0:
|
||||
s -= 7
|
||||
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
|
||||
inc(k)
|
||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||
## number of bytes (octets) used.
|
||||
@@ -361,8 +312,7 @@ proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||
res
|
||||
|
||||
proc asn1EncodeSequence*(dest: var openArray[byte],
|
||||
value: openArray[byte]): int =
|
||||
proc asn1EncodeSequence*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
|
||||
## (octets) used.
|
||||
##
|
||||
@@ -378,8 +328,7 @@ proc asn1EncodeSequence*(dest: var openArray[byte],
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||
res
|
||||
|
||||
proc asn1EncodeComposite*(dest: var openArray[byte],
|
||||
value: Asn1Composite): int =
|
||||
proc asn1EncodeComposite*(dest: var openArray[byte], value: Asn1Composite): int =
|
||||
## Encode composite value and return number of bytes (octets) used.
|
||||
##
|
||||
## If length of ``dest`` is less then number of required bytes to encode
|
||||
@@ -391,12 +340,12 @@ proc asn1EncodeComposite*(dest: var openArray[byte],
|
||||
if len(dest) >= res:
|
||||
dest[0] = value.tag.code()
|
||||
copyMem(addr dest[1], addr buffer[0], lenlen)
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0],
|
||||
len(value.buffer))
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0], len(value.buffer))
|
||||
res
|
||||
|
||||
proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
|
||||
tag: int): int =
|
||||
proc asn1EncodeContextTag*(
|
||||
dest: var openArray[byte], value: openArray[byte], tag: int
|
||||
): int =
|
||||
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
|
||||
## return number of bytes (octets) used.
|
||||
##
|
||||
@@ -432,7 +381,7 @@ proc getLength(ab: var Asn1Buffer): Asn1Result[int] =
|
||||
return err(Asn1Error.Overflow)
|
||||
if ab.isEnough(octets):
|
||||
var lengthU: uint64 = 0
|
||||
for i in 0..<octets:
|
||||
for i in 0 ..< octets:
|
||||
lengthU = (lengthU shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i + 1])
|
||||
if lengthU > uint64(int64.high):
|
||||
return err(Asn1Error.Overflow)
|
||||
@@ -471,7 +420,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
inclass = false
|
||||
while true:
|
||||
offset = ab.offset
|
||||
aclass = ? ab.getTag(tag)
|
||||
aclass = ?ab.getTag(tag)
|
||||
|
||||
case aclass
|
||||
of Asn1Class.ContextSpecific:
|
||||
@@ -480,9 +429,9 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
else:
|
||||
inclass = true
|
||||
ttag = tag
|
||||
tlength = ? ab.getLength()
|
||||
tlength = ?ab.getLength()
|
||||
of Asn1Class.Universal:
|
||||
length = ? ab.getLength()
|
||||
length = ?ab.getLength()
|
||||
|
||||
if inclass:
|
||||
if length >= tlength:
|
||||
@@ -499,22 +448,26 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
|
||||
let b = ab.buffer[ab.offset]
|
||||
if b != 0xFF'u8 and b != 0x00'u8:
|
||||
return err(Asn1Error.Incorrect)
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: 1, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Boolean,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: 1,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
field.vbool = (b == 0xFF'u8)
|
||||
ab.offset += 1
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Integer.code():
|
||||
# INTEGER
|
||||
if length == 0:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
# Count number of leading zeroes
|
||||
var zc = 0
|
||||
@@ -526,9 +479,14 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
|
||||
if zc == 0:
|
||||
# Negative or Positive integer
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Integer,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: length,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
|
||||
# Negative integer
|
||||
if length <= 8:
|
||||
@@ -538,54 +496,68 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
field.vint = (field.vint shl 8) or 0xFF'u64
|
||||
else:
|
||||
let offset = ab.offset + i - (8 - length)
|
||||
field.vint = (field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
|
||||
field.vint =
|
||||
(field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
|
||||
else:
|
||||
# Positive integer
|
||||
if length <= 8:
|
||||
for i in 0 ..< length:
|
||||
field.vint = (field.vint shl 8) or
|
||||
safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
field.vint =
|
||||
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
else:
|
||||
if length == 1:
|
||||
# Zero value integer
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, vint: 0'u64,
|
||||
buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Integer,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: length,
|
||||
vint: 0'u64,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
else:
|
||||
# Positive integer with leading zero
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: length - 1, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Integer,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset + 1,
|
||||
length: length - 1,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
if length <= 9:
|
||||
for i in 1 ..< length:
|
||||
field.vint = (field.vint shl 8) or
|
||||
safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
field.vint =
|
||||
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.BitString.code():
|
||||
# BIT STRING
|
||||
if length == 0:
|
||||
# BIT STRING should include `unused` bits field, so length should be
|
||||
# bigger then 1.
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
elif length == 1:
|
||||
if ab.buffer[ab.offset] != 0x00'u8:
|
||||
return err(Asn1Error.Incorrect)
|
||||
else:
|
||||
# Zero-length BIT STRING.
|
||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: 0, ubits: 0, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.BitString,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset + 1,
|
||||
length: 0,
|
||||
ubits: 0,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
else:
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
@@ -600,61 +572,79 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
## All unused bits should be set to `0`.
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: length - 1, ubits: safeConvert[int](unused),
|
||||
buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.BitString,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset + 1,
|
||||
length: length - 1,
|
||||
ubits: safeConvert[int](unused),
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.OctetString.code():
|
||||
# OCTET STRING
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.OctetString,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: length,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Null.code():
|
||||
# NULL
|
||||
if length != 0:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
|
||||
offset: ab.offset, length: 0, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Null,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: 0,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Oid.code():
|
||||
# OID
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Oid,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: length,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Sequence.code():
|
||||
# SEQUENCE
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
field = Asn1Field(
|
||||
kind: Asn1Tag.Sequence,
|
||||
klass: aclass,
|
||||
index: ttag,
|
||||
offset: ab.offset,
|
||||
length: length,
|
||||
buffer: ab.buffer,
|
||||
)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
inclass = false
|
||||
ttag = 0
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
@@ -672,9 +662,9 @@ proc `==`*(field: Asn1Field, data: openArray[byte]): bool =
|
||||
if length > 0:
|
||||
if field.length == len(data):
|
||||
CT.isEqual(
|
||||
field.buffer.toOpenArray(field.offset,
|
||||
field.offset + field.length - 1),
|
||||
data.toOpenArray(0, field.length - 1))
|
||||
field.buffer.toOpenArray(field.offset, field.offset + field.length - 1),
|
||||
data.toOpenArray(0, field.length - 1),
|
||||
)
|
||||
else:
|
||||
false
|
||||
else:
|
||||
@@ -690,15 +680,15 @@ proc init*(t: typedesc[Asn1Buffer], data: string): Asn1Buffer =
|
||||
|
||||
proc init*(t: typedesc[Asn1Buffer]): Asn1Buffer =
|
||||
## Initialize empty ``Asn1Buffer``.
|
||||
Asn1Buffer(buffer: newSeq[byte]())
|
||||
Asn1Buffer(buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], tag: Asn1Tag): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag ``tag``.
|
||||
Asn1Composite(tag: tag, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: tag, buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc init*(t: typedesc[Asn1Composite], idx: int): Asn1Composite =
|
||||
## Initialize ``Asn1Composite`` with tag context-specific id ``id``.
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeq[byte]())
|
||||
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninit[byte](0))
|
||||
|
||||
proc `$`*(buffer: Asn1Buffer): string =
|
||||
## Return string representation of ``buffer``.
|
||||
@@ -752,13 +742,14 @@ proc `$`*(field: Asn1Field): string =
|
||||
res.add(ncrutils.toHex(field.toOpenArray()))
|
||||
res
|
||||
|
||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag) =
|
||||
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, tag: Asn1Tag) =
|
||||
## Write empty value to buffer or composite with ``tag``.
|
||||
##
|
||||
## This procedure must be used to write `NULL`, `0` or empty `BIT STRING`,
|
||||
## `OCTET STRING` types.
|
||||
doAssert(tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString,
|
||||
Asn1Tag.OctetString})
|
||||
doAssert(
|
||||
tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString, Asn1Tag.OctetString}
|
||||
)
|
||||
var length: int
|
||||
if tag == Asn1Tag.Null:
|
||||
length = asn1EncodeNull(abc.toOpenArray())
|
||||
@@ -780,22 +771,23 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag) =
|
||||
discard asn1EncodeOctetString(abc.toOpenArray(), tmp.toOpenArray(0, -1))
|
||||
abc.offset += length
|
||||
|
||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: uint64) =
|
||||
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: uint64) =
|
||||
## Write uint64 ``value`` to buffer or composite as ASN.1 `INTEGER`.
|
||||
let length = asn1EncodeInteger(abc.toOpenArray(), value)
|
||||
abc.extend(length)
|
||||
discard asn1EncodeInteger(abc.toOpenArray(), value)
|
||||
abc.offset += length
|
||||
|
||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: bool) =
|
||||
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: bool) =
|
||||
## Write bool ``value`` to buffer or composite as ASN.1 `BOOLEAN`.
|
||||
let length = asn1EncodeBoolean(abc.toOpenArray(), value)
|
||||
abc.extend(length)
|
||||
discard asn1EncodeBoolean(abc.toOpenArray(), value)
|
||||
abc.offset += length
|
||||
|
||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
|
||||
value: openArray[byte], bits = 0) =
|
||||
proc write*[T: Asn1Buffer | Asn1Composite](
|
||||
abc: var T, tag: Asn1Tag, value: openArray[byte], bits = 0
|
||||
) =
|
||||
## Write array ``value`` using ``tag``.
|
||||
##
|
||||
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,
|
||||
@@ -803,8 +795,9 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
|
||||
##
|
||||
## For `BIT STRING` you can use ``bits`` argument to specify number of used
|
||||
## bits.
|
||||
doAssert(tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString,
|
||||
Asn1Tag.Oid})
|
||||
doAssert(
|
||||
tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString, Asn1Tag.Oid}
|
||||
)
|
||||
var length: int
|
||||
if tag == Asn1Tag.Integer:
|
||||
length = asn1EncodeInteger(abc.toOpenArray(), value)
|
||||
@@ -824,7 +817,7 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
|
||||
discard asn1EncodeOid(abc.toOpenArray(), value)
|
||||
abc.offset += length
|
||||
|
||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: Asn1Composite) =
|
||||
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: Asn1Composite) =
|
||||
doAssert(len(value) > 0, "Composite value not finished")
|
||||
var length: int
|
||||
if value.tag == Asn1Tag.Sequence:
|
||||
@@ -841,6 +834,6 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: Asn1Composite) =
|
||||
discard asn1EncodeContextTag(abc.toOpenArray(), value.buffer, value.idx)
|
||||
abc.offset += length
|
||||
|
||||
proc finish*[T: Asn1Buffer|Asn1Composite](abc: var T) {.inline.} =
|
||||
proc finish*[T: Asn1Buffer | Asn1Composite](abc: var T) {.inline.} =
|
||||
## Finishes buffer or composite and prepares it for writing.
|
||||
abc.offset = 0
|
||||
|
||||
@@ -17,9 +17,11 @@
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utils/sequninit
|
||||
|
||||
export Asn1Error, results
|
||||
|
||||
@@ -30,32 +32,17 @@ const
|
||||
MinKeySize* = 2048
|
||||
## Minimal allowed RSA key size in bits.
|
||||
## https://github.com/libp2p/go-libp2p-core/blob/master/crypto/rsa_common.go#L13
|
||||
DefaultKeySize* = 3072
|
||||
## Default RSA key size in bits.
|
||||
DefaultKeySize* = 3072 ## Default RSA key size in bits.
|
||||
|
||||
RsaOidSha1* = [
|
||||
byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A
|
||||
]
|
||||
RsaOidSha1* = [byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A]
|
||||
## RSA PKCS#1.5 SHA-1 hash object identifier.
|
||||
RsaOidSha224* = [
|
||||
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
|
||||
0x02, 0x04
|
||||
]
|
||||
RsaOidSha224* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04]
|
||||
## RSA PKCS#1.5 SHA-224 hash object identifier.
|
||||
RsaOidSha256* = [
|
||||
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
|
||||
0x02, 0x01
|
||||
]
|
||||
RsaOidSha256* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
|
||||
## RSA PKCS#1.5 SHA-256 hash object identifier.
|
||||
RsaOidSha384* = [
|
||||
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
|
||||
0x02, 0x02
|
||||
]
|
||||
RsaOidSha384* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02]
|
||||
## RSA PKCS#1.5 SHA-384 hash object identifier.
|
||||
RsaOidSha512* = [
|
||||
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
|
||||
0x02, 0x03
|
||||
]
|
||||
RsaOidSha512* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
|
||||
## RSA PKCS#1.5 SHA-512 hash object identifier.
|
||||
|
||||
type
|
||||
@@ -79,9 +66,9 @@ type
|
||||
RsaKP* = RsaPrivateKey | RsaKeyPair
|
||||
|
||||
RsaError* = enum
|
||||
RsaGenError,
|
||||
RsaKeyIncorrectError,
|
||||
RsaSignatureError,
|
||||
RsaGenError
|
||||
RsaKeyIncorrectError
|
||||
RsaSignatureError
|
||||
RsaLowSecurityError
|
||||
|
||||
RsaResult*[T] = Result[T, RsaError]
|
||||
@@ -109,15 +96,18 @@ template getArray*(bs, os, ls: untyped): untyped =
|
||||
|
||||
template trimZeroes(b: seq[byte], pt, ptlen: untyped) =
|
||||
var length = ptlen
|
||||
for i in 0..<length:
|
||||
for i in 0 ..< length:
|
||||
if pt[] != byte(0x00):
|
||||
break
|
||||
pt = cast[ptr byte](cast[uint](pt) + 1)
|
||||
ptlen -= 1
|
||||
|
||||
proc random*[T: RsaKP](t: typedesc[T], rng: var HmacDrbgContext,
|
||||
bits = DefaultKeySize,
|
||||
pubexp = DefaultPublicExponent): RsaResult[T] =
|
||||
proc random*[T: RsaKP](
|
||||
t: typedesc[T],
|
||||
rng: var HmacDrbgContext,
|
||||
bits = DefaultKeySize,
|
||||
pubexp = DefaultPublicExponent,
|
||||
): RsaResult[T] =
|
||||
## Generate new random RSA private key using BearSSL's HMAC-SHA256-DRBG
|
||||
## algorithm.
|
||||
##
|
||||
@@ -135,14 +125,19 @@ proc random*[T: RsaKP](t: typedesc[T], rng: var HmacDrbgContext,
|
||||
length = eko + ((bits + 7) shr 3)
|
||||
|
||||
let res = new T
|
||||
res.buffer = newSeq[byte](length)
|
||||
res.buffer = newSeqUninit[byte](length)
|
||||
|
||||
var keygen = rsaKeygenGetDefault()
|
||||
|
||||
if keygen(addr rng.vtable,
|
||||
addr res.seck, addr res.buffer[sko],
|
||||
addr res.pubk, addr res.buffer[pko],
|
||||
cuint(bits), pubexp) == 0:
|
||||
if keygen(
|
||||
addr rng.vtable,
|
||||
addr res.seck,
|
||||
addr res.buffer[sko],
|
||||
addr res.pubk,
|
||||
addr res.buffer[pko],
|
||||
cuint(bits),
|
||||
pubexp,
|
||||
) == 0:
|
||||
return err(RsaGenError)
|
||||
|
||||
let
|
||||
@@ -170,11 +165,12 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
doAssert(not isNil(key))
|
||||
when T is RsaPrivateKey:
|
||||
if len(key.buffer) > 0:
|
||||
let length = key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
|
||||
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
|
||||
key.pubk.elen.uint + key.pexplen.uint
|
||||
let length =
|
||||
key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
|
||||
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
|
||||
key.pubk.elen.uint + key.pexplen.uint
|
||||
result = new RsaPrivateKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
let po: uint = 0
|
||||
let qo = po + key.seck.plen
|
||||
let dpo = qo + key.seck.qlen
|
||||
@@ -212,7 +208,7 @@ proc copy*[T: RsaPKI](key: T): T =
|
||||
if len(key.buffer) > 0:
|
||||
let length = key.key.nlen + key.key.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
let no = 0
|
||||
let eo = no + key.key.nlen
|
||||
copyMem(addr result.buffer[no], key.key.n, key.key.nlen)
|
||||
@@ -231,12 +227,11 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
|
||||
doAssert(not isNil(key))
|
||||
let length = key.pubk.nlen + key.pubk.elen
|
||||
result = new RsaPublicKey
|
||||
result.buffer = newSeq[byte](length)
|
||||
result.buffer = newSeqUninit[byte](length)
|
||||
result.key.n = addr result.buffer[0]
|
||||
result.key.e = addr result.buffer[key.pubk.nlen]
|
||||
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
|
||||
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e),
|
||||
key.pubk.elen)
|
||||
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e), key.pubk.elen)
|
||||
result.key.nlen = key.pubk.nlen
|
||||
result.key.elen = key.pubk.elen
|
||||
|
||||
@@ -248,7 +243,7 @@ proc pubkey*(pair: RsaKeyPair): RsaPublicKey {.inline.} =
|
||||
## Get RSA public key from pair ``pair``.
|
||||
result = cast[RsaPrivateKey](pair).getPublicKey()
|
||||
|
||||
proc clear*[T: RsaPKI|RsaKeyPair](pki: var T) =
|
||||
proc clear*[T: RsaPKI | RsaKeyPair](pki: var T) =
|
||||
## Wipe and clear EC private key, public key or scalar object.
|
||||
doAssert(not isNil(pki))
|
||||
when T is RsaPrivateKey:
|
||||
@@ -292,21 +287,14 @@ proc toBytes*(key: RsaPrivateKey, data: var openArray[byte]): RsaResult[int] =
|
||||
var b = Asn1Buffer.init()
|
||||
var p = Asn1Composite.init(Asn1Tag.Sequence)
|
||||
p.write(0'u64)
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n,
|
||||
key.pubk.nlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e,
|
||||
key.pubk.elen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n, key.pubk.nlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e, key.pubk.elen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pexp, key.pexplen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p,
|
||||
key.seck.plen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q,
|
||||
key.seck.qlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp,
|
||||
key.seck.dplen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq,
|
||||
key.seck.dqlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq,
|
||||
key.seck.iqlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p, key.seck.plen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q, key.seck.qlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp, key.seck.dplen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq, key.seck.dqlen))
|
||||
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq, key.seck.iqlen))
|
||||
p.finish()
|
||||
b.write(p)
|
||||
b.finish()
|
||||
@@ -370,8 +358,8 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
let length = ? key.toBytes(res)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
ok(res)
|
||||
@@ -383,8 +371,8 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
|
||||
## return it.
|
||||
if isNil(key):
|
||||
return err(RsaKeyIncorrectError)
|
||||
var res = newSeq[byte](4096)
|
||||
let length = ? key.toBytes(res)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?key.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
ok(res)
|
||||
@@ -395,8 +383,8 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
|
||||
## Serialize RSA signature ``sig`` to raw binary form and return it.
|
||||
if isNil(sig):
|
||||
return err(RsaSignatureError)
|
||||
var res = newSeq[byte](4096)
|
||||
let length = ? sig.toBytes(res)
|
||||
var res = newSeqUninit[byte](4096)
|
||||
let length = ?sig.toBytes(res)
|
||||
if length > 0:
|
||||
res.setLen(length)
|
||||
ok(res)
|
||||
@@ -408,20 +396,19 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
|
||||
## ``data``.
|
||||
##
|
||||
## Procedure returns ``Asn1Status``.
|
||||
var
|
||||
field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
|
||||
var field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
|
||||
|
||||
# Asn1Field is not trivial so avoid too much Result
|
||||
|
||||
var ab = Asn1Buffer.init(data)
|
||||
field = ? ab.read()
|
||||
field = ?ab.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ib = field.getBuffer()
|
||||
|
||||
field = ? ib.read()
|
||||
field = ?ib.read()
|
||||
|
||||
if field.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -429,48 +416,48 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
|
||||
if field.vint != 0'u64:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawn = ? ib.read()
|
||||
rawn = ?ib.read()
|
||||
|
||||
if rawn.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawpube = ? ib.read()
|
||||
rawpube = ?ib.read()
|
||||
|
||||
if rawpube.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawprie = ? ib.read()
|
||||
rawprie = ?ib.read()
|
||||
|
||||
if rawprie.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawp = ? ib.read()
|
||||
rawp = ?ib.read()
|
||||
|
||||
if rawp.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawq = ? ib.read()
|
||||
rawq = ?ib.read()
|
||||
|
||||
if rawq.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawdp = ? ib.read()
|
||||
rawdp = ?ib.read()
|
||||
|
||||
if rawdp.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawdq = ? ib.read()
|
||||
rawdq = ?ib.read()
|
||||
|
||||
if rawdq.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawiq = ? ib.read()
|
||||
rawiq = ?ib.read()
|
||||
|
||||
if rawiq.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if len(rawn) >= (MinKeySize shr 3) and len(rawp) > 0 and len(rawq) > 0 and
|
||||
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
|
||||
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
|
||||
key = new RsaPrivateKey
|
||||
key.buffer = @data
|
||||
key.pubk.n = addr key.buffer[rawn.offset]
|
||||
@@ -502,52 +489,52 @@ proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error
|
||||
var field, rawn, rawe: Asn1Field
|
||||
var ab = Asn1Buffer.init(data)
|
||||
|
||||
field = ? ab.read()
|
||||
field = ?ab.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ib = field.getBuffer()
|
||||
|
||||
field = ? ib.read()
|
||||
field = ?ib.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var ob = field.getBuffer()
|
||||
|
||||
field = ? ob.read()
|
||||
field = ?ob.read()
|
||||
|
||||
if field.kind != Asn1Tag.Oid:
|
||||
return err(Asn1Error.Incorrect)
|
||||
elif field != Asn1OidRsaEncryption:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = ? ob.read()
|
||||
field = ?ob.read()
|
||||
|
||||
if field.kind != Asn1Tag.Null:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = ? ib.read()
|
||||
field = ?ib.read()
|
||||
|
||||
if field.kind != Asn1Tag.BitString:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var vb = field.getBuffer()
|
||||
|
||||
field = ? vb.read()
|
||||
field = ?vb.read()
|
||||
|
||||
if field.kind != Asn1Tag.Sequence:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
var sb = field.getBuffer()
|
||||
|
||||
rawn = ? sb.read()
|
||||
rawn = ?sb.read()
|
||||
|
||||
if rawn.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
rawe = ? sb.read()
|
||||
rawe = ?sb.read()
|
||||
|
||||
if rawe.kind != Asn1Tag.Integer:
|
||||
return err(Asn1Error.Incorrect)
|
||||
@@ -575,16 +562,16 @@ proc init*(sig: var RsaSignature, data: openArray[byte]): Result[void, Asn1Error
|
||||
else:
|
||||
err(Asn1Error.Incorrect)
|
||||
|
||||
proc init*[T: RsaPKI](sospk: var T,
|
||||
data: string): Result[void, Asn1Error] {.inline.} =
|
||||
proc init*[T: RsaPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
|
||||
## Initialize EC `private key`, `public key` or `scalar` ``sospk`` from
|
||||
## hexadecimal string representation ``data``.
|
||||
##
|
||||
## Procedure returns ``Result[void, Asn1Status]``.
|
||||
sospk.init(ncrutils.fromHex(data))
|
||||
|
||||
proc init*(t: typedesc[RsaPrivateKey],
|
||||
data: openArray[byte]): RsaResult[RsaPrivateKey] =
|
||||
proc init*(
|
||||
t: typedesc[RsaPrivateKey], data: openArray[byte]
|
||||
): RsaResult[RsaPrivateKey] =
|
||||
## Initialize RSA private key from ASN.1 DER binary representation ``data``
|
||||
## and return constructed object.
|
||||
var res: RsaPrivateKey
|
||||
@@ -593,8 +580,7 @@ proc init*(t: typedesc[RsaPrivateKey],
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc init*(t: typedesc[RsaPublicKey],
|
||||
data: openArray[byte]): RsaResult[RsaPublicKey] =
|
||||
proc init*(t: typedesc[RsaPublicKey], data: openArray[byte]): RsaResult[RsaPublicKey] =
|
||||
## Initialize RSA public key from ASN.1 DER binary representation ``data``
|
||||
## and return constructed object.
|
||||
var res: RsaPublicKey
|
||||
@@ -603,8 +589,7 @@ proc init*(t: typedesc[RsaPublicKey],
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc init*(t: typedesc[RsaSignature],
|
||||
data: openArray[byte]): RsaResult[RsaSignature] =
|
||||
proc init*(t: typedesc[RsaSignature], data: openArray[byte]): RsaResult[RsaSignature] =
|
||||
## Initialize RSA signature from raw binary representation ``data`` and
|
||||
## return constructed object.
|
||||
var res: RsaSignature
|
||||
@@ -631,14 +616,11 @@ proc `$`*(key: RsaPrivateKey): string =
|
||||
result.add("\nq = ")
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.q, key.seck.qlen)))
|
||||
result.add("\ndp = ")
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp,
|
||||
key.seck.dplen)))
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp, key.seck.dplen)))
|
||||
result.add("\ndq = ")
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq,
|
||||
key.seck.dqlen)))
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq, key.seck.dqlen)))
|
||||
result.add("\niq = ")
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq,
|
||||
key.seck.iqlen)))
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq, key.seck.iqlen)))
|
||||
result.add("\npre = ")
|
||||
result.add(ncrutils.toHex(getArray(key.buffer, key.pexp, key.pexplen)))
|
||||
result.add("\nm = ")
|
||||
@@ -684,22 +666,37 @@ proc `==`*(a, b: RsaPrivateKey): bool =
|
||||
else:
|
||||
if a.seck.nBitlen == b.seck.nBitlen:
|
||||
if a.seck.nBitlen > 0'u:
|
||||
let r1 = CT.isEqual(getArray(a.buffer, a.seck.p, a.seck.plen),
|
||||
getArray(b.buffer, b.seck.p, b.seck.plen))
|
||||
let r2 = CT.isEqual(getArray(a.buffer, a.seck.q, a.seck.qlen),
|
||||
getArray(b.buffer, b.seck.q, b.seck.qlen))
|
||||
let r3 = CT.isEqual(getArray(a.buffer, a.seck.dp, a.seck.dplen),
|
||||
getArray(b.buffer, b.seck.dp, b.seck.dplen))
|
||||
let r4 = CT.isEqual(getArray(a.buffer, a.seck.dq, a.seck.dqlen),
|
||||
getArray(b.buffer, b.seck.dq, b.seck.dqlen))
|
||||
let r5 = CT.isEqual(getArray(a.buffer, a.seck.iq, a.seck.iqlen),
|
||||
getArray(b.buffer, b.seck.iq, b.seck.iqlen))
|
||||
let r6 = CT.isEqual(getArray(a.buffer, a.pexp, a.pexplen),
|
||||
getArray(b.buffer, b.pexp, b.pexplen))
|
||||
let r7 = CT.isEqual(getArray(a.buffer, a.pubk.n, a.pubk.nlen),
|
||||
getArray(b.buffer, b.pubk.n, b.pubk.nlen))
|
||||
let r8 = CT.isEqual(getArray(a.buffer, a.pubk.e, a.pubk.elen),
|
||||
getArray(b.buffer, b.pubk.e, b.pubk.elen))
|
||||
let r1 = CT.isEqual(
|
||||
getArray(a.buffer, a.seck.p, a.seck.plen),
|
||||
getArray(b.buffer, b.seck.p, b.seck.plen),
|
||||
)
|
||||
let r2 = CT.isEqual(
|
||||
getArray(a.buffer, a.seck.q, a.seck.qlen),
|
||||
getArray(b.buffer, b.seck.q, b.seck.qlen),
|
||||
)
|
||||
let r3 = CT.isEqual(
|
||||
getArray(a.buffer, a.seck.dp, a.seck.dplen),
|
||||
getArray(b.buffer, b.seck.dp, b.seck.dplen),
|
||||
)
|
||||
let r4 = CT.isEqual(
|
||||
getArray(a.buffer, a.seck.dq, a.seck.dqlen),
|
||||
getArray(b.buffer, b.seck.dq, b.seck.dqlen),
|
||||
)
|
||||
let r5 = CT.isEqual(
|
||||
getArray(a.buffer, a.seck.iq, a.seck.iqlen),
|
||||
getArray(b.buffer, b.seck.iq, b.seck.iqlen),
|
||||
)
|
||||
let r6 = CT.isEqual(
|
||||
getArray(a.buffer, a.pexp, a.pexplen), getArray(b.buffer, b.pexp, b.pexplen)
|
||||
)
|
||||
let r7 = CT.isEqual(
|
||||
getArray(a.buffer, a.pubk.n, a.pubk.nlen),
|
||||
getArray(b.buffer, b.pubk.n, b.pubk.nlen),
|
||||
)
|
||||
let r8 = CT.isEqual(
|
||||
getArray(a.buffer, a.pubk.e, a.pubk.elen),
|
||||
getArray(b.buffer, b.pubk.e, b.pubk.elen),
|
||||
)
|
||||
r1 and r2 and r3 and r4 and r5 and r6 and r7 and r8
|
||||
else:
|
||||
true
|
||||
@@ -737,14 +734,17 @@ proc `==`*(a, b: RsaPublicKey): bool =
|
||||
elif isNil(b) and (not isNil(a)):
|
||||
false
|
||||
else:
|
||||
let r1 = CT.isEqual(getArray(a.buffer, a.key.n, a.key.nlen),
|
||||
getArray(b.buffer, b.key.n, b.key.nlen))
|
||||
let r2 = CT.isEqual(getArray(a.buffer, a.key.e, a.key.elen),
|
||||
getArray(b.buffer, b.key.e, b.key.elen))
|
||||
let r1 = CT.isEqual(
|
||||
getArray(a.buffer, a.key.n, a.key.nlen), getArray(b.buffer, b.key.n, b.key.nlen)
|
||||
)
|
||||
let r2 = CT.isEqual(
|
||||
getArray(a.buffer, a.key.e, a.key.elen), getArray(b.buffer, b.key.e, b.key.elen)
|
||||
)
|
||||
(r1 and r2)
|
||||
|
||||
proc sign*[T: byte|char](key: RsaPrivateKey,
|
||||
message: openArray[T]): RsaResult[RsaSignature] {.gcsafe.} =
|
||||
proc sign*[T: byte | char](
|
||||
key: RsaPrivateKey, message: openArray[T]
|
||||
): RsaResult[RsaSignature] {.gcsafe.} =
|
||||
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
|
||||
## key ``key``.
|
||||
if isNil(key):
|
||||
@@ -754,7 +754,7 @@ proc sign*[T: byte|char](key: RsaPrivateKey,
|
||||
var hash: array[32, byte]
|
||||
let impl = rsaPkcs1SignGetDefault()
|
||||
var res = new RsaSignature
|
||||
res.buffer = newSeq[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
res.buffer = newSeqUninit[byte]((key.seck.nBitlen + 7) shr 3)
|
||||
var kv = addr sha256Vtable
|
||||
kv.init(addr hc.vtable)
|
||||
if len(message) > 0:
|
||||
@@ -763,16 +763,16 @@ proc sign*[T: byte|char](key: RsaPrivateKey,
|
||||
kv.update(addr hc.vtable, nil, 0)
|
||||
kv.out(addr hc.vtable, addr hash[0])
|
||||
var oid = RsaOidSha256
|
||||
let implRes = impl(addr oid[0],
|
||||
addr hash[0], uint(len(hash)),
|
||||
addr key.seck, addr res.buffer[0])
|
||||
let implRes =
|
||||
impl(addr oid[0], addr hash[0], uint(len(hash)), addr key.seck, addr res.buffer[0])
|
||||
if implRes == 0:
|
||||
err(RsaSignatureError)
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
|
||||
pubkey: RsaPublicKey): bool {.inline.} =
|
||||
proc verify*[T: byte | char](
|
||||
sig: RsaSignature, message: openArray[T], pubkey: RsaPublicKey
|
||||
): bool {.inline.} =
|
||||
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
|
||||
## ``message``.
|
||||
##
|
||||
@@ -792,8 +792,13 @@ proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
|
||||
kv.update(addr hc.vtable, nil, 0)
|
||||
kv.out(addr hc.vtable, addr hash[0])
|
||||
var oid = RsaOidSha256
|
||||
let res = impl(addr sig.buffer[0], uint(len(sig.buffer)),
|
||||
addr oid[0],
|
||||
uint(len(check)), addr pubkey.key, addr check[0])
|
||||
let res = impl(
|
||||
addr sig.buffer[0],
|
||||
uint(len(sig.buffer)),
|
||||
addr oid[0],
|
||||
uint(len(check)),
|
||||
addr pubkey.key,
|
||||
addr check[0],
|
||||
)
|
||||
if res == 1:
|
||||
result = equalMem(addr check[0], addr hash[0], len(hash))
|
||||
|
||||
@@ -10,20 +10,16 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import
|
||||
secp256k1,
|
||||
stew/[byteutils, results],
|
||||
nimcrypto/[hash, sha2]
|
||||
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
|
||||
import ../utils/sequninit
|
||||
|
||||
export sha2, results, rand
|
||||
|
||||
const
|
||||
SkRawPrivateKeySize* = 256 div 8
|
||||
## Size of private key in octets (bytes)
|
||||
SkRawPrivateKeySize* = 256 div 8 ## Size of private key in octets (bytes)
|
||||
SkRawSignatureSize* = SkRawPrivateKeySize * 2 + 1
|
||||
## Size of signature in octets (bytes)
|
||||
SkRawPublicKeySize* = SkRawPrivateKeySize + 1
|
||||
## Size of public key in octets (bytes)
|
||||
SkRawPublicKeySize* = SkRawPrivateKeySize + 1 ## Size of public key in octets (bytes)
|
||||
|
||||
# This is extremely confusing but it's to avoid.. confusion between Eth standard and Secp standard
|
||||
type
|
||||
@@ -56,31 +52,31 @@ template pubkey*(v: SkKeyPair): SkPublicKey =
|
||||
proc init*(key: var SkPrivateKey, data: openArray[byte]): SkResult[void] =
|
||||
## Initialize Secp256k1 `private key` ``key`` from raw binary
|
||||
## representation ``data``.
|
||||
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
|
||||
key = SkPrivateKey(?secp256k1.SkSecretKey.fromRaw(data))
|
||||
ok()
|
||||
|
||||
proc init*(key: var SkPrivateKey, data: string): SkResult[void] =
|
||||
## Initialize Secp256k1 `private key` ``key`` from hexadecimal string
|
||||
## representation ``data``.
|
||||
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
|
||||
key = SkPrivateKey(?secp256k1.SkSecretKey.fromHex(data))
|
||||
ok()
|
||||
|
||||
proc init*(key: var SkPublicKey, data: openArray[byte]): SkResult[void] =
|
||||
## Initialize Secp256k1 `public key` ``key`` from raw binary
|
||||
## representation ``data``.
|
||||
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
|
||||
key = SkPublicKey(?secp256k1.SkPublicKey.fromRaw(data))
|
||||
ok()
|
||||
|
||||
proc init*(key: var SkPublicKey, data: string): SkResult[void] =
|
||||
## Initialize Secp256k1 `public key` ``key`` from hexadecimal string
|
||||
## representation ``data``.
|
||||
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
|
||||
key = SkPublicKey(?secp256k1.SkPublicKey.fromHex(data))
|
||||
ok()
|
||||
|
||||
proc init*(sig: var SkSignature, data: openArray[byte]): SkResult[void] =
|
||||
## Initialize Secp256k1 `signature` ``sig`` from raw binary
|
||||
## representation ``data``.
|
||||
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
|
||||
sig = SkSignature(?secp256k1.SkSignature.fromDer(data))
|
||||
ok()
|
||||
|
||||
proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
@@ -90,8 +86,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = hexToSeqByte(data)
|
||||
except ValueError:
|
||||
return err("secp: Hex to bytes failed")
|
||||
except ValueError as e:
|
||||
let errMsg = "secp: Hex to bytes failed: " & e.msg
|
||||
return err(errMsg.cstring)
|
||||
init(sig, buffer)
|
||||
|
||||
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||
@@ -151,7 +148,7 @@ proc toBytes*(key: SkPrivateKey, data: var openArray[byte]): SkResult[int] =
|
||||
## Procedure returns number of bytes (octets) needed to store
|
||||
## Secp256k1 private key.
|
||||
if len(data) >= SkRawPrivateKeySize:
|
||||
data[0..<SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
|
||||
data[0 ..< SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
|
||||
ok(SkRawPrivateKeySize)
|
||||
else:
|
||||
err("secp: Not enough bytes")
|
||||
@@ -163,7 +160,7 @@ proc toBytes*(key: SkPublicKey, data: var openArray[byte]): SkResult[int] =
|
||||
## Procedure returns number of bytes (octets) needed to store
|
||||
## Secp256k1 public key.
|
||||
if len(data) >= SkRawPublicKeySize:
|
||||
data[0..<SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
|
||||
data[0 ..< SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
|
||||
ok(SkRawPublicKeySize)
|
||||
else:
|
||||
err("secp: Not enough bytes")
|
||||
@@ -186,26 +183,32 @@ proc getBytes*(key: SkPublicKey): seq[byte] {.inline.} =
|
||||
|
||||
proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
|
||||
## Serialize Secp256k1 `signature` and return it.
|
||||
result = newSeq[byte](72)
|
||||
result = newSeqUninit[byte](72)
|
||||
let length = toBytes(sig, result)
|
||||
result.setLen(length)
|
||||
|
||||
proc sign*[T: byte|char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
|
||||
proc sign*[T: byte | char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
|
||||
## Sign message `msg` using private key `key` and return signature object.
|
||||
let h = sha256.digest(msg)
|
||||
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
|
||||
|
||||
proc verify*[T: byte|char](sig: SkSignature, msg: openArray[T],
|
||||
key: SkPublicKey): bool =
|
||||
proc verify*[T: byte | char](
|
||||
sig: SkSignature, msg: openArray[T], key: SkPublicKey
|
||||
): bool =
|
||||
let h = sha256.digest(msg)
|
||||
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
|
||||
|
||||
func clear*(key: var SkPrivateKey) = clear(secp256k1.SkSecretKey(key))
|
||||
func clear*(key: var SkPrivateKey) =
|
||||
clear(secp256k1.SkSecretKey(key))
|
||||
|
||||
func `$`*(key: SkPrivateKey): string = $secp256k1.SkSecretKey(key)
|
||||
func `$`*(key: SkPublicKey): string = $secp256k1.SkPublicKey(key)
|
||||
func `$`*(key: SkSignature): string = $secp256k1.SkSignature(key)
|
||||
func `$`*(key: SkKeyPair): string = $secp256k1.SkKeyPair(key)
|
||||
func `$`*(key: SkPrivateKey): string =
|
||||
$secp256k1.SkSecretKey(key)
|
||||
func `$`*(key: SkPublicKey): string =
|
||||
$secp256k1.SkPublicKey(key)
|
||||
func `$`*(key: SkSignature): string =
|
||||
$secp256k1.SkSignature(key)
|
||||
func `$`*(key: SkKeyPair): string =
|
||||
$secp256k1.SkKeyPair(key)
|
||||
|
||||
func `==`*(a, b: SkPrivateKey): bool =
|
||||
secp256k1.SkSecretKey(a) == secp256k1.SkSecretKey(b)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,23 +12,24 @@
|
||||
## This module implements Pool of StreamTransport.
|
||||
import chronos
|
||||
|
||||
const
|
||||
DefaultPoolSize* = 8
|
||||
## Default pool size
|
||||
const DefaultPoolSize* = 8 ## Default pool size
|
||||
|
||||
type
|
||||
ConnectionFlags = enum
|
||||
None, Busy
|
||||
None
|
||||
Busy
|
||||
|
||||
PoolItem = object
|
||||
transp*: StreamTransport
|
||||
flags*: set[ConnectionFlags]
|
||||
|
||||
PoolState = enum
|
||||
Connecting, Connected, Closing, Closed
|
||||
Connecting
|
||||
Connected
|
||||
Closing
|
||||
Closed
|
||||
|
||||
TransportPool* = ref object
|
||||
## Transports pool object
|
||||
TransportPool* = ref object ## Transports pool object
|
||||
transports: seq[PoolItem]
|
||||
busyCount: int
|
||||
state: PoolState
|
||||
@@ -45,13 +46,16 @@ proc waitAll[T](futs: seq[Future[T]]): Future[void] =
|
||||
dec(counter)
|
||||
if counter == 0:
|
||||
retFuture.complete()
|
||||
|
||||
for fut in futs:
|
||||
fut.addCallback(cb)
|
||||
return retFuture
|
||||
|
||||
proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
|
||||
bufferSize = DefaultStreamBufferSize,
|
||||
): Future[TransportPool] {.async.} =
|
||||
proc newPool*(
|
||||
address: TransportAddress,
|
||||
poolsize: int = DefaultPoolSize,
|
||||
bufferSize = DefaultStreamBufferSize,
|
||||
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
|
||||
## Establish pool of connections to address ``address`` with size
|
||||
## ``poolsize``.
|
||||
var pool = new TransportPool
|
||||
@@ -59,12 +63,12 @@ proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
|
||||
pool.transports = newSeq[PoolItem](poolsize)
|
||||
var conns = newSeq[Future[StreamTransport]](poolsize)
|
||||
pool.state = Connecting
|
||||
for i in 0..<poolsize:
|
||||
for i in 0 ..< poolsize:
|
||||
conns[i] = connect(address, bufferSize)
|
||||
# Waiting for all connections to be established.
|
||||
await waitAll(conns)
|
||||
# Checking connections and preparing pool.
|
||||
for i in 0..<poolsize:
|
||||
for i in 0 ..< poolsize:
|
||||
if conns[i].failed:
|
||||
raise conns[i].error
|
||||
else:
|
||||
@@ -76,7 +80,9 @@ proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
|
||||
pool.state = Connected
|
||||
result = pool
|
||||
|
||||
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
|
||||
proc acquire*(
|
||||
pool: TransportPool
|
||||
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
|
||||
## Acquire non-busy connection from pool ``pool``.
|
||||
var transp: StreamTransport
|
||||
if pool.state in {Connected}:
|
||||
@@ -98,7 +104,9 @@ proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
result = transp
|
||||
|
||||
proc release*(pool: TransportPool, transp: StreamTransport) =
|
||||
proc release*(
|
||||
pool: TransportPool, transp: StreamTransport
|
||||
) {.async: (raises: [TransportPoolError]).} =
|
||||
## Release connection ``transp`` back to pool ``pool``.
|
||||
if pool.state in {Connected, Closing}:
|
||||
var found = false
|
||||
@@ -114,7 +122,9 @@ proc release*(pool: TransportPool, transp: StreamTransport) =
|
||||
else:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc join*(pool: TransportPool) {.async.} =
|
||||
proc join*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Waiting for all connection to become available.
|
||||
if pool.state in {Connected, Closing}:
|
||||
while true:
|
||||
@@ -126,7 +136,9 @@ proc join*(pool: TransportPool) {.async.} =
|
||||
elif pool.state == Connecting:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc close*(pool: TransportPool) {.async.} =
|
||||
proc close*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Closes transports pool ``pool`` and release all resources.
|
||||
if pool.state == Connected:
|
||||
pool.state = Closing
|
||||
@@ -134,7 +146,7 @@ proc close*(pool: TransportPool) {.async.} =
|
||||
await pool.join()
|
||||
# Closing all transports
|
||||
var pending = newSeq[Future[void]](len(pool.transports))
|
||||
for i in 0..<len(pool.transports):
|
||||
for i in 0 ..< len(pool.transports):
|
||||
let transp = pool.transports[i].transp
|
||||
transp.close()
|
||||
pending[i] = transp.join()
|
||||
|
||||
@@ -27,17 +27,24 @@
|
||||
## 7. Message: required bytes
|
||||
import os
|
||||
import nimcrypto/utils, stew/endians2
|
||||
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
|
||||
multiaddress, peerid, varint, muxers/mplex/coder
|
||||
import
|
||||
protobuf/minprotobuf,
|
||||
stream/connection,
|
||||
protocols/secure/secure,
|
||||
multiaddress,
|
||||
peerid,
|
||||
varint,
|
||||
muxers/mplex/coder
|
||||
|
||||
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
|
||||
NanosecondRange, initTime
|
||||
from times import
|
||||
getTime, toUnix, fromUnix, nanosecond, format, Time, NanosecondRange, initTime
|
||||
from strutils import toHex, repeat
|
||||
export peerid, multiaddress
|
||||
|
||||
type
|
||||
FlowDirection* = enum
|
||||
Outgoing, Incoming
|
||||
Outgoing
|
||||
Incoming
|
||||
|
||||
ProtoMessage* = object
|
||||
timestamp*: uint64
|
||||
@@ -48,11 +55,10 @@ type
|
||||
local*: Opt[MultiAddress]
|
||||
remote*: Opt[MultiAddress]
|
||||
|
||||
const
|
||||
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
|
||||
## default directory where all the dumps will be stored, if the path
|
||||
## relative it will be created in home directory. You can overload this path
|
||||
## using ``-d:libp2p_dump_dir=<otherpath>``.
|
||||
const libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
|
||||
## default directory where all the dumps will be stored, if the path
|
||||
## relative it will be created in home directory. You can overload this path
|
||||
## using ``-d:libp2p_dump_dir=<otherpath>``.
|
||||
|
||||
proc getTimestamp(): uint64 =
|
||||
## This procedure is present because `stdlib.times` missing it.
|
||||
@@ -65,8 +71,7 @@ proc getTimedate(value: uint64): string =
|
||||
let time = initTime(int64(value div 1_000_000_000), value mod 1_000_000_000)
|
||||
time.format("yyyy-MM-dd HH:mm:ss'.'fffzzz")
|
||||
|
||||
proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
data: openArray[byte]) =
|
||||
proc dumpMessage*(conn: SecureConn, direction: FlowDirection, data: openArray[byte]) =
|
||||
## Store unencrypted message ``data`` to dump file, all the metadata will be
|
||||
## extracted from ``conn`` instance.
|
||||
var pb = initProtoBuffer(options = {WithVarintLength})
|
||||
@@ -87,7 +92,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
|
||||
# This is debugging procedure so it should not generate any exceptions,
|
||||
# and we going to return at every possible OS error.
|
||||
if not(dirExists(dirName)):
|
||||
if not (dirExists(dirName)):
|
||||
try:
|
||||
createDir(dirName)
|
||||
except CatchableError:
|
||||
@@ -153,13 +158,11 @@ iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
|
||||
while offset < len(data):
|
||||
value = 0
|
||||
size = 0
|
||||
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1),
|
||||
size, value)
|
||||
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1), size, value)
|
||||
if res.isOk():
|
||||
if (value > 0'u64) and (value < uint64(len(data) - offset)):
|
||||
offset += size
|
||||
yield decodeDumpMessage(data.toOpenArray(offset,
|
||||
offset + int(value) - 1))
|
||||
yield decodeDumpMessage(data.toOpenArray(offset, offset + int(value) - 1))
|
||||
# value is previously checked to be less then len(data) which is `int`.
|
||||
offset += int(value)
|
||||
else:
|
||||
@@ -179,10 +182,15 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
|
||||
|
||||
for k in 0 ..< groupBy:
|
||||
let ch = pbytes[offset + k]
|
||||
ascii.add(if ord(ch) > 31 and ord(ch) < 127: char(ch) else: '.')
|
||||
ascii.add(
|
||||
if ord(ch) > 31 and ord(ch) < 127:
|
||||
char(ch)
|
||||
else:
|
||||
'.'
|
||||
)
|
||||
|
||||
let item =
|
||||
case groupBy:
|
||||
case groupBy
|
||||
of 1:
|
||||
toHex(pbytes[offset])
|
||||
of 2:
|
||||
@@ -204,8 +212,7 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
|
||||
res.add("\p")
|
||||
|
||||
if (offset mod 16) != 0:
|
||||
let spacesCount = ((16 - (offset mod 16)) div groupBy) *
|
||||
(groupBy * 2 + 1) + 1
|
||||
let spacesCount = ((16 - (offset mod 16)) div groupBy) * (groupBy * 2 + 1) + 1
|
||||
res = res & repeat(' ', spacesCount)
|
||||
res = res & ascii
|
||||
|
||||
@@ -233,25 +240,30 @@ proc toString*(msg: ProtoMessage, dump = true): string =
|
||||
var res = getTimedate(msg.timestamp)
|
||||
let direction =
|
||||
case msg.direction
|
||||
of Incoming:
|
||||
" << "
|
||||
of Outgoing:
|
||||
" >> "
|
||||
let address =
|
||||
block:
|
||||
let local = block:
|
||||
msg.local.withValue(loc): "[" & $loc & "]"
|
||||
else: "[LOCAL]"
|
||||
let remote = block:
|
||||
msg.remote.withValue(rem): "[" & $rem & "]"
|
||||
else: "[REMOTE]"
|
||||
local & direction & remote
|
||||
of Incoming: " << "
|
||||
of Outgoing: " >> "
|
||||
let address = block:
|
||||
let local = block:
|
||||
msg.local.withValue(loc):
|
||||
"[" & $loc & "]"
|
||||
else:
|
||||
"[LOCAL]"
|
||||
let remote = block:
|
||||
msg.remote.withValue(rem):
|
||||
"[" & $rem & "]"
|
||||
else:
|
||||
"[REMOTE]"
|
||||
local & direction & remote
|
||||
let seqid = block:
|
||||
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
|
||||
else: ""
|
||||
msg.seqID.withValue(seqid):
|
||||
"seqID = " & $seqid & " "
|
||||
else:
|
||||
""
|
||||
let mtype = block:
|
||||
msg.mtype.withValue(typ): "type = " & $typ & " "
|
||||
else: ""
|
||||
msg.mtype.withValue(typ):
|
||||
"type = " & $typ & " "
|
||||
else:
|
||||
""
|
||||
res.add(" ")
|
||||
res.add(address)
|
||||
res.add(" ")
|
||||
|
||||
@@ -10,67 +10,81 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
import peerid,
|
||||
stream/connection,
|
||||
transports/transport
|
||||
import results
|
||||
import peerid, stream/connection, transports/transport, muxers/muxer
|
||||
|
||||
export results
|
||||
|
||||
type
|
||||
Dial* = ref object of RootObj
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
method connect*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method connect*(
|
||||
self: Dial,
|
||||
address: MultiAddress,
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
|
||||
self: Dial, address: MultiAddress, allowUnknownPeerId = false
|
||||
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
protos: seq[string],
|
||||
): Future[Connection] {.async, base.} =
|
||||
self: Dial, peerId: PeerId, protos: seq[string]
|
||||
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false): Future[Connection] {.async, base.} =
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false,
|
||||
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method addTransport*(
|
||||
self: Dial,
|
||||
transport: Transport) {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method addTransport*(self: Dial, transport: Transport) {.base.} =
|
||||
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
|
||||
|
||||
method dialAndUpgrade*(
|
||||
self: Dial, peerId: Opt[PeerId], address: MultiAddress, dir = Direction.Out
|
||||
): Future[Muxer] {.base, async: (raises: [CancelledError]).} =
|
||||
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
|
||||
|
||||
method dialAndUpgrade*(
|
||||
self: Dial, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
|
||||
): Future[Muxer] {.
|
||||
base, async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
|
||||
|
||||
method negotiateStream*(
|
||||
self: Dial, conn: Connection, protos: seq[string]
|
||||
): Future[Connection] {.base, async: (raises: [CatchableError]).} =
|
||||
doAssert(false, "[Dial.negotiateStream] abstract method not implemented!")
|
||||
|
||||
method tryDial*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.
|
||||
base, async: (raises: [DialFailedError, CancelledError])
|
||||
.} =
|
||||
doAssert(false, "[Dial.tryDial] abstract method not implemented!")
|
||||
|
||||
@@ -9,24 +9,22 @@
|
||||
|
||||
import std/tables
|
||||
|
||||
import stew/results
|
||||
import pkg/[chronos,
|
||||
chronicles,
|
||||
metrics]
|
||||
import pkg/[chronos, chronicles, metrics, results]
|
||||
|
||||
import dial,
|
||||
peerid,
|
||||
peerinfo,
|
||||
peerstore,
|
||||
multicodec,
|
||||
muxers/muxer,
|
||||
multistream,
|
||||
connmanager,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
nameresolving/nameresolver,
|
||||
upgrademngrs/upgrade,
|
||||
errors
|
||||
import
|
||||
dial,
|
||||
peerid,
|
||||
peerinfo,
|
||||
peerstore,
|
||||
multicodec,
|
||||
muxers/muxer,
|
||||
multistream,
|
||||
connmanager,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
nameresolving/nameresolver,
|
||||
upgrademngrs/upgrade,
|
||||
errors
|
||||
|
||||
export dial, errors, results
|
||||
|
||||
@@ -37,37 +35,35 @@ declareCounter(libp2p_total_dial_attempts, "total attempted dials")
|
||||
declareCounter(libp2p_successful_dials, "dialed successful peers")
|
||||
declareCounter(libp2p_failed_dials, "failed dials")
|
||||
|
||||
type
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
Dialer* = ref object of Dial
|
||||
localPeerId*: PeerId
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerId, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
type Dialer* = ref object of Dial
|
||||
localPeerId*: PeerId
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerId, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade*(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [CancelledError]).} =
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
if transport.handles(address): # check if it can dial it
|
||||
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
|
||||
let dialed =
|
||||
try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address, peerId)
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
trace "Dialing canceled",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
debug "Dialing failed",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
libp2p_failed_dials.inc()
|
||||
return nil # Try the next address
|
||||
|
||||
@@ -75,78 +71,101 @@ proc dialAndUpgrade(
|
||||
|
||||
let mux =
|
||||
try:
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||
# The if below is more general and might handle other use cases in the future.
|
||||
if dialed.dir != dir:
|
||||
dialed.dir = dir
|
||||
await transport.upgrade(dialed, peerId)
|
||||
except CancelledError as exc:
|
||||
await dialed.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if exc isnot CancelledError:
|
||||
if upgradeDir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
debug "Connection upgrade failed",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
|
||||
proc expandDnsAddr(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
address: MultiAddress): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
|
||||
|
||||
if not DNSADDR.matchPartial(address): return @[(address, peerId)]
|
||||
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
|
||||
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
if not DNS.matchPartial(address):
|
||||
return @[(address, peerId)]
|
||||
if isNil(self.nameResolver):
|
||||
info "Can't resolve DNSADDR without NameResolver", ma=address
|
||||
info "Can't resolve DNSADDR without NameResolver", ma = address
|
||||
return @[]
|
||||
|
||||
trace "Start trying to resolve addresses"
|
||||
let
|
||||
toResolve =
|
||||
if peerId.isSome:
|
||||
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
|
||||
try:
|
||||
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
|
||||
except ResultError[void]:
|
||||
raiseAssert "checked with if"
|
||||
else:
|
||||
address
|
||||
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
|
||||
|
||||
debug "resolved addresses",
|
||||
originalAddresses = toResolve, resolvedAddresses = resolved
|
||||
|
||||
for resolvedAddress in resolved:
|
||||
let lastPart = resolvedAddress[^1].tryGet()
|
||||
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||
let
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0..^2].tryGet(), Opt.some(addrPeerId)))
|
||||
except ResultError[string] as e:
|
||||
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
else:
|
||||
result.add((resolvedAddress, peerId))
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||
proc dialAndUpgrade*(
|
||||
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
|
||||
): Future[Muxer] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
|
||||
|
||||
for rawAddress in addrs:
|
||||
# resolve potential dnsaddr
|
||||
let addresses = await self.expandDnsAddr(peerId, rawAddress)
|
||||
|
||||
for (expandedAddress, addrPeerId) in addresses:
|
||||
# DNS resolution
|
||||
let
|
||||
hostname = expandedAddress.getHostname()
|
||||
resolvedAddresses =
|
||||
if isNil(self.nameResolver): @[expandedAddress]
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
if isNil(self.nameResolver):
|
||||
@[expandedAddress]
|
||||
else:
|
||||
await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
debug "Expanded address and hostname",
|
||||
expandedAddress = expandedAddress,
|
||||
hostname = hostname,
|
||||
resolvedAddresses = resolvedAddresses
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
@@ -159,57 +178,84 @@ proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
|
||||
return Opt.some(muxer)
|
||||
|
||||
proc internalConnect(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
raise newException(DialFailedError, "internalConnect can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
|
||||
await lock.acquire()
|
||||
defer:
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
await self.peerStore.identify(muxed)
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoung upgrade", err=exc.msg
|
||||
await muxed.close()
|
||||
raise exc
|
||||
|
||||
return muxed
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
except AsyncLockError as e:
|
||||
raiseAssert "lock must have been acquired in line above: " & e.msg
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot =
|
||||
try:
|
||||
self.connManager.getOutgoingSlot(forceDial)
|
||||
except TooManyConnectionsError as exc:
|
||||
raise newException(
|
||||
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CancelledError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise newException(
|
||||
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(
|
||||
DialFailedError, "Unable to establish outgoing link in internalConnect"
|
||||
)
|
||||
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
await self.peerStore.identify(muxed)
|
||||
await self.connManager.triggerPeerEvents(
|
||||
muxed.connection.peerId,
|
||||
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
|
||||
)
|
||||
return muxed
|
||||
except CancelledError as exc:
|
||||
await muxed.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoing upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
) {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -217,44 +263,41 @@ method connect*(
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
discard
|
||||
await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
address: MultiAddress,
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async.} =
|
||||
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
|
||||
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
parseFullAddress(address).toOpt().withValue(fullAddress):
|
||||
return (await self.internalConnect(
|
||||
Opt.some(fullAddress[0]),
|
||||
@[fullAddress[1]],
|
||||
false)).connection.peerId
|
||||
return (
|
||||
await self.internalConnect(Opt.some(fullAddress[0]), @[fullAddress[1]], false)
|
||||
).connection.peerId
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
raise newException(
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
|
||||
)
|
||||
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
false)).connection.peerId
|
||||
return
|
||||
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer,
|
||||
conn: Connection,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
proc negotiateStream*(
|
||||
self: Dialer, conn: Connection, protos: seq[string]
|
||||
): Future[Connection] {.async: (raises: [CatchableError]).} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
method tryDial*(
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async.} =
|
||||
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Create a protocol stream in order to check
|
||||
## if a connection is possible.
|
||||
## Doesn't use the Connection Manager to save it.
|
||||
@@ -264,35 +307,45 @@ method tryDial*(
|
||||
try:
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
raise newException(DialFailedError, "No valid multiaddress in tryDial")
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
self: Dialer, peerId: PeerId, protos: seq[string]
|
||||
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
trace "Dialing (existing)", peerId, protos
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
try:
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
|
||||
)
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", description = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error dialing", description = exc.msg
|
||||
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false): Future[Connection] {.async.} =
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false,
|
||||
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
@@ -301,11 +354,11 @@ method dial*(
|
||||
conn: Muxer
|
||||
stream: Connection
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
if not(isNil(stream)):
|
||||
proc cleanup() {.async: (raises: []).} =
|
||||
if not (isNil(stream)):
|
||||
await stream.closeWithEOF()
|
||||
|
||||
if not(isNil(conn)):
|
||||
if not (isNil(conn)):
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
@@ -315,32 +368,36 @@ method dial*(
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError,
|
||||
"Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
|
||||
)
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
trace "Dial canceled", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, err = exc.msg
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
|
||||
|
||||
method addTransport*(self: Dialer, t: Transport) =
|
||||
self.transports &= t
|
||||
|
||||
proc new*(
|
||||
T: type Dialer,
|
||||
localPeerId: PeerId,
|
||||
connManager: ConnManager,
|
||||
peerStore: PeerStore,
|
||||
transports: seq[Transport],
|
||||
nameResolver: NameResolver = nil): Dialer =
|
||||
|
||||
T(localPeerId: localPeerId,
|
||||
T: type Dialer,
|
||||
localPeerId: PeerId,
|
||||
connManager: ConnManager,
|
||||
peerStore: PeerStore,
|
||||
transports: seq[Transport],
|
||||
nameResolver: NameResolver = nil,
|
||||
): Dialer =
|
||||
T(
|
||||
localPeerId: localPeerId,
|
||||
connManager: connManager,
|
||||
transports: transports,
|
||||
peerStore: peerStore,
|
||||
nameResolver: nameResolver)
|
||||
nameResolver: nameResolver,
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
import chronos, chronicles, results
|
||||
import ../errors
|
||||
|
||||
type
|
||||
@@ -33,12 +33,12 @@ proc ofType*[T](f: BaseAttr, _: type[T]): bool =
|
||||
proc to*[T](f: BaseAttr, _: type[T]): T =
|
||||
Attribute[T](f).value
|
||||
|
||||
proc add*[T](pa: var PeerAttributes,
|
||||
value: T) =
|
||||
pa.attributes.add(Attribute[T](
|
||||
proc add*[T](pa: var PeerAttributes, value: T) =
|
||||
pa.attributes.add(
|
||||
Attribute[T](
|
||||
value: value,
|
||||
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
|
||||
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
|
||||
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -58,7 +58,8 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||
Opt.none(T)
|
||||
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
|
||||
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
|
||||
pa{T}.valueOr:
|
||||
raise newException(KeyError, "Attribute not found")
|
||||
|
||||
proc match*(pa, candidate: PeerAttributes): bool =
|
||||
for f in pa.attributes:
|
||||
@@ -78,16 +79,21 @@ type
|
||||
advertisementUpdated*: AsyncEvent
|
||||
advertiseLoop*: Future[void]
|
||||
|
||||
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method advertise*(self: DiscoveryInterface) {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
type
|
||||
DiscoveryError* = object of LPError
|
||||
DiscoveryFinished* = object of LPError
|
||||
AdvertiseError* = object of DiscoveryError
|
||||
|
||||
method request*(
|
||||
self: DiscoveryInterface, pa: PeerAttributes
|
||||
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
|
||||
|
||||
method advertise*(
|
||||
self: DiscoveryInterface
|
||||
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
|
||||
|
||||
type
|
||||
DiscoveryQuery* = ref object
|
||||
attr: PeerAttributes
|
||||
peers: AsyncQueue[PeerAttributes]
|
||||
@@ -101,13 +107,13 @@ type
|
||||
proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||
dm.interfaces &= di
|
||||
|
||||
di.onPeerFound = proc (pa: PeerAttributes) =
|
||||
di.onPeerFound = proc(pa: PeerAttributes) =
|
||||
for query in dm.queries:
|
||||
if query.attr.match(pa):
|
||||
try:
|
||||
query.peers.putNoWait(pa)
|
||||
except AsyncQueueFullError as exc:
|
||||
debug "Cannot push discovered peer to queue"
|
||||
debug "Cannot push discovered peer to queue", description = exc.msg
|
||||
|
||||
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||
@@ -122,30 +128,29 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
|
||||
pa.add(value)
|
||||
return dm.request(pa)
|
||||
|
||||
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
for i in dm.interfaces:
|
||||
i.toAdvertise = pa
|
||||
i.toAdvertise.add(value)
|
||||
if i.advertiseLoop.isNil:
|
||||
i.advertisementUpdated = newAsyncEvent()
|
||||
i.advertiseLoop = i.advertise()
|
||||
else:
|
||||
i.advertisementUpdated.fire()
|
||||
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
var pa: PeerAttributes
|
||||
pa.add(value)
|
||||
dm.advertise(pa)
|
||||
|
||||
template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||
## Will execute `code` for each discovered peer. The
|
||||
## peer attritubtes are available through the variable
|
||||
## `peer`
|
||||
|
||||
proc forEachInternal(q: DiscoveryQuery) {.async.} =
|
||||
proc forEachInternal(
|
||||
q: DiscoveryQuery
|
||||
) {.async: (raises: [CancelledError, DiscoveryError]).} =
|
||||
while true:
|
||||
let peer {.inject.} =
|
||||
try: await q.getPeer()
|
||||
except DiscoveryFinished: return
|
||||
try:
|
||||
await q.getPeer()
|
||||
except DiscoveryFinished:
|
||||
return
|
||||
code
|
||||
|
||||
asyncSpawn forEachInternal(query)
|
||||
@@ -153,22 +158,28 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||
proc stop*(query: DiscoveryQuery) =
|
||||
query.finished = true
|
||||
for r in query.futs:
|
||||
if not r.finished(): r.cancel()
|
||||
if not r.finished():
|
||||
r.cancelSoon()
|
||||
|
||||
proc stop*(dm: DiscoveryManager) =
|
||||
for q in dm.queries:
|
||||
q.stop()
|
||||
for i in dm.interfaces:
|
||||
if isNil(i.advertiseLoop): continue
|
||||
i.advertiseLoop.cancel()
|
||||
if isNil(i.advertiseLoop):
|
||||
continue
|
||||
i.advertiseLoop.cancelSoon()
|
||||
|
||||
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
|
||||
proc getPeer*(
|
||||
query: DiscoveryQuery
|
||||
): Future[PeerAttributes] {.
|
||||
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
|
||||
.} =
|
||||
let getter = query.peers.popFirst()
|
||||
|
||||
try:
|
||||
await getter or allFinished(query.futs)
|
||||
except CancelledError as exc:
|
||||
getter.cancel()
|
||||
getter.cancelSoon()
|
||||
raise exc
|
||||
|
||||
if not finished(getter):
|
||||
|
||||
@@ -10,29 +10,30 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import ./discoverymngr,
|
||||
../protocols/rendezvous,
|
||||
../peerid
|
||||
import ./discoverymngr, ../protocols/rendezvous, ../peerid
|
||||
|
||||
type
|
||||
RendezVousInterface* = ref object of DiscoveryInterface
|
||||
rdv*: RendezVous
|
||||
timeToRequest: Duration
|
||||
timeToAdvertise: Duration
|
||||
ttl: Duration
|
||||
|
||||
RdvNamespace* = distinct string
|
||||
|
||||
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
|
||||
|
||||
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
|
||||
var namespace = ""
|
||||
method request*(
|
||||
self: RendezVousInterface, pa: PeerAttributes
|
||||
) {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
var namespace = Opt.none(string)
|
||||
for attr in pa:
|
||||
if attr.ofType(RdvNamespace):
|
||||
namespace = string attr.to(RdvNamespace)
|
||||
namespace = Opt.some(string attr.to(RdvNamespace))
|
||||
elif attr.ofType(DiscoveryService):
|
||||
namespace = string attr.to(DiscoveryService)
|
||||
namespace = Opt.some(string attr.to(DiscoveryService))
|
||||
elif attr.ofType(PeerId):
|
||||
namespace = $attr.to(PeerId)
|
||||
namespace = Opt.some($attr.to(PeerId))
|
||||
else:
|
||||
# unhandled type
|
||||
return
|
||||
@@ -43,13 +44,15 @@ method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
|
||||
for address in pr.addresses:
|
||||
peer.add(address.address)
|
||||
|
||||
peer.add(DiscoveryService(namespace))
|
||||
peer.add(RdvNamespace(namespace))
|
||||
peer.add(DiscoveryService(namespace.get()))
|
||||
peer.add(RdvNamespace(namespace.get()))
|
||||
self.onPeerFound(peer)
|
||||
|
||||
await sleepAsync(self.timeToRequest)
|
||||
|
||||
method advertise*(self: RendezVousInterface) {.async.} =
|
||||
method advertise*(
|
||||
self: RendezVousInterface
|
||||
) {.async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
while true:
|
||||
var toAdvertise: seq[string]
|
||||
for attr in self.toAdvertise:
|
||||
@@ -62,12 +65,18 @@ method advertise*(self: RendezVousInterface) {.async.} =
|
||||
|
||||
self.advertisementUpdated.clear()
|
||||
for toAdv in toAdvertise:
|
||||
await self.rdv.advertise(toAdv, self.timeToAdvertise)
|
||||
try:
|
||||
await self.rdv.advertise(toAdv, self.ttl)
|
||||
except CatchableError as error:
|
||||
debug "RendezVous advertise error: ", description = error.msg
|
||||
|
||||
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
|
||||
|
||||
proc new*(T: typedesc[RendezVousInterface],
|
||||
rdv: RendezVous,
|
||||
ttr: Duration = 1.minutes,
|
||||
tta: Duration = MinimumDuration): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
|
||||
proc new*(
|
||||
T: typedesc[RendezVousInterface],
|
||||
rdv: RendezVous,
|
||||
ttr: Duration = 1.minutes,
|
||||
tta: Duration = 1.minutes,
|
||||
ttl: Duration = MinimumDuration,
|
||||
): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)
|
||||
|
||||
@@ -19,58 +19,30 @@ func toException*(e: string): ref LPError =
|
||||
# sadly nim needs more love for hygienic templates
|
||||
# so here goes the macro, its based on the proc/template version
|
||||
# and uses quote do so it's quite readable
|
||||
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# TODO https://github.com/nim-lang/Nim/issues/22936
|
||||
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
let nexclude = exclude.len
|
||||
case nexclude
|
||||
of 0:
|
||||
quote do:
|
||||
quote:
|
||||
for res in `futs`:
|
||||
if res.failed:
|
||||
let exc = res.readError()
|
||||
let exc = res.error
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details", error = exc.name
|
||||
trace "Exception message", msg= exc.msg, stack = getStackTrace()
|
||||
debug "A future has failed, enable trace logging for details",
|
||||
error = exc.name
|
||||
trace "Exception message", description = exc.msg, stack = getStackTrace()
|
||||
else:
|
||||
quote do:
|
||||
quote:
|
||||
for res in `futs`:
|
||||
block check:
|
||||
if res.failed:
|
||||
let exc = res.readError()
|
||||
for i in 0..<`nexclude`:
|
||||
let exc = res.error
|
||||
for i in 0 ..< `nexclude`:
|
||||
if exc of `exclude`[i]:
|
||||
trace "A future has failed", error=exc.name, msg=exc.msg
|
||||
trace "A future has failed", error = exc.name, description = exc.msg
|
||||
break check
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details", error=exc.name
|
||||
trace "Exception details", msg=exc.msg
|
||||
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
|
||||
return call()
|
||||
|
||||
template tryAndWarn*(message: static[string]; body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "An exception has ocurred, enable trace logging for details", name = exc.name, msg = message
|
||||
trace "Exception details", exc = exc.msg
|
||||
debug "A future has failed, enable trace logging for details",
|
||||
error = exc.name
|
||||
trace "Exception details", description = exc.msg
|
||||
|
||||
@@ -12,20 +12,33 @@
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos, chronicles
|
||||
import std/[nativesockets, hashes]
|
||||
import tables, strutils, sets, stew/shims/net
|
||||
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
|
||||
protobuf/minprotobuf, errors, utility
|
||||
import stew/[base58, base32, endians2, results]
|
||||
export results, minprotobuf, vbuffer, utility
|
||||
import pkg/[chronos, chronicles, results]
|
||||
import std/[nativesockets, net, hashes]
|
||||
import tables, strutils, sets
|
||||
import
|
||||
multicodec,
|
||||
multihash,
|
||||
multibase,
|
||||
transcoder,
|
||||
vbuffer,
|
||||
peerid,
|
||||
protobuf/minprotobuf,
|
||||
errors,
|
||||
utility
|
||||
import stew/[base58, base32, endians2]
|
||||
export results, vbuffer, errors, utility
|
||||
import ./utils/sequninit
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multiaddress"
|
||||
|
||||
type
|
||||
MAKind* = enum
|
||||
None, Fixed, Length, Path, Marker
|
||||
None
|
||||
Fixed
|
||||
Length
|
||||
Path
|
||||
Marker
|
||||
|
||||
MAProtocol* = object
|
||||
mcodec*: MultiCodec
|
||||
@@ -37,7 +50,9 @@ type
|
||||
data: VBuffer
|
||||
|
||||
MaPatternOp* = enum
|
||||
Eq, Or, And
|
||||
Eq
|
||||
Or
|
||||
And
|
||||
|
||||
MaPattern* = object
|
||||
operator*: MaPatternOp
|
||||
@@ -57,6 +72,9 @@ type
|
||||
tcpProtocol
|
||||
udpProtocol
|
||||
|
||||
func maErr*(msg: string): ref MaError =
|
||||
(ref MaError)(msg: msg)
|
||||
|
||||
const
|
||||
# These are needed in order to avoid an ambiguity error stemming from
|
||||
# some cint constants with the same name defined in the posix modules
|
||||
@@ -119,23 +137,52 @@ proc ip6VB(vb: var VBuffer): bool =
|
||||
if vb.readArray(a.address_v6) == 16:
|
||||
result = true
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
template pathStringToBuffer(s: string, vb: var VBuffer): bool =
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToString(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0): true else: false
|
||||
|
||||
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1): true else: false
|
||||
|
||||
template pathValidateBuffer(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
template pathValidateBufferNoSlash(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc ip6zoneBtS(vb: var VBuffer, s: var string): bool =
|
||||
## IPv6 bufferToString() implementation.
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc memoryStB(s: string, vb: var VBuffer): bool =
|
||||
## Memory stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc memoryBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Memory bufferToString() implementation.
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc memoryVB(vb: var VBuffer): bool =
|
||||
## Memory validateBuffer() implementation.
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
@@ -154,7 +201,7 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Port number bufferToString() implementation.
|
||||
var port: array[2, byte]
|
||||
if vb.readArray(port) == 2:
|
||||
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
let nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
s = $nport
|
||||
result = true
|
||||
|
||||
@@ -177,7 +224,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
|
||||
|
||||
proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
## P2P address bufferToString() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninit[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -186,7 +233,7 @@ proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
|
||||
proc p2pVB(vb: var VBuffer): bool =
|
||||
## P2P address validateBuffer() implementation.
|
||||
var address = newSeq[byte]()
|
||||
var address = newSeqUninit[byte](0)
|
||||
if vb.readSeq(address) > 0:
|
||||
var mh: MultiHash
|
||||
if MultiHash.decode(address, mh).isOk:
|
||||
@@ -214,7 +261,7 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[12, byte]
|
||||
if vb.readArray(buf) == 12:
|
||||
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
let nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 9))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -262,40 +309,27 @@ proc onion3VB(vb: var VBuffer): bool =
|
||||
|
||||
proc unixStB(s: string, vb: var VBuffer): bool =
|
||||
## Unix socket name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc unixBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Unix socket name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc unixVB(vb: var VBuffer): bool =
|
||||
## Unix socket name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc dnsStB(s: string, vb: var VBuffer): bool =
|
||||
## DNS name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc dnsBtS(vb: var VBuffer, s: var string): bool =
|
||||
## DNS name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc dnsVB(vb: var VBuffer): bool =
|
||||
## DNS name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc mapEq*(codec: string): MaPattern =
|
||||
## ``Equal`` operator for pattern
|
||||
@@ -313,152 +347,72 @@ proc mapAnd*(args: varargs[MaPattern]): MaPattern =
|
||||
result.args = @args
|
||||
|
||||
const
|
||||
TranscoderIP4* = Transcoder(
|
||||
stringToBuffer: ip4StB,
|
||||
bufferToString: ip4BtS,
|
||||
validateBuffer: ip4VB
|
||||
)
|
||||
TranscoderIP6* = Transcoder(
|
||||
stringToBuffer: ip6StB,
|
||||
bufferToString: ip6BtS,
|
||||
validateBuffer: ip6VB
|
||||
)
|
||||
TranscoderIP4* =
|
||||
Transcoder(stringToBuffer: ip4StB, bufferToString: ip4BtS, validateBuffer: ip4VB)
|
||||
TranscoderIP6* =
|
||||
Transcoder(stringToBuffer: ip6StB, bufferToString: ip6BtS, validateBuffer: ip6VB)
|
||||
TranscoderIP6Zone* = Transcoder(
|
||||
stringToBuffer: ip6zoneStB,
|
||||
bufferToString: ip6zoneBtS,
|
||||
validateBuffer: ip6zoneVB
|
||||
)
|
||||
TranscoderUnix* = Transcoder(
|
||||
stringToBuffer: unixStB,
|
||||
bufferToString: unixBtS,
|
||||
validateBuffer: unixVB
|
||||
)
|
||||
TranscoderP2P* = Transcoder(
|
||||
stringToBuffer: p2pStB,
|
||||
bufferToString: p2pBtS,
|
||||
validateBuffer: p2pVB
|
||||
)
|
||||
TranscoderPort* = Transcoder(
|
||||
stringToBuffer: portStB,
|
||||
bufferToString: portBtS,
|
||||
validateBuffer: portVB
|
||||
stringToBuffer: ip6zoneStB, bufferToString: ip6zoneBtS, validateBuffer: ip6zoneVB
|
||||
)
|
||||
TranscoderUnix* =
|
||||
Transcoder(stringToBuffer: unixStB, bufferToString: unixBtS, validateBuffer: unixVB)
|
||||
TranscoderP2P* =
|
||||
Transcoder(stringToBuffer: p2pStB, bufferToString: p2pBtS, validateBuffer: p2pVB)
|
||||
TranscoderPort* =
|
||||
Transcoder(stringToBuffer: portStB, bufferToString: portBtS, validateBuffer: portVB)
|
||||
TranscoderOnion* = Transcoder(
|
||||
stringToBuffer: onionStB,
|
||||
bufferToString: onionBtS,
|
||||
validateBuffer: onionVB
|
||||
stringToBuffer: onionStB, bufferToString: onionBtS, validateBuffer: onionVB
|
||||
)
|
||||
TranscoderOnion3* = Transcoder(
|
||||
stringToBuffer: onion3StB,
|
||||
bufferToString: onion3BtS,
|
||||
validateBuffer: onion3VB
|
||||
stringToBuffer: onion3StB, bufferToString: onion3BtS, validateBuffer: onion3VB
|
||||
)
|
||||
TranscoderDNS* = Transcoder(
|
||||
stringToBuffer: dnsStB,
|
||||
bufferToString: dnsBtS,
|
||||
validateBuffer: dnsVB
|
||||
TranscoderDNS* =
|
||||
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
|
||||
TranscoderMemory* = Transcoder(
|
||||
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
|
||||
)
|
||||
|
||||
ProtocolsList = [
|
||||
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
|
||||
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
MAProtocol(mcodec: multiCodec("udp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
MAProtocol(mcodec: multiCodec("ip6"), kind: Fixed, size: 16, coder: TranscoderIP6),
|
||||
MAProtocol(mcodec: multiCodec("dccp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
MAProtocol(mcodec: multiCodec("sctp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
MAProtocol(mcodec: multiCodec("udt"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("utp"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("http"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("https"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("quic"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("quic-v1"), kind: Marker, size: 0),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip4"), kind: Fixed, size: 4,
|
||||
coder: TranscoderIP4
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0, coder: TranscoderIP6Zone
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("tcp"), kind: Fixed, size: 2,
|
||||
coder: TranscoderPort
|
||||
mcodec: multiCodec("onion"), kind: Fixed, size: 10, coder: TranscoderOnion
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("udp"), kind: Fixed, size: 2,
|
||||
coder: TranscoderPort
|
||||
mcodec: multiCodec("onion3"), kind: Fixed, size: 37, coder: TranscoderOnion3
|
||||
),
|
||||
MAProtocol(mcodec: multiCodec("ws"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("wss"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("tls"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("ipfs"), kind: Length, size: 0, coder: TranscoderP2P),
|
||||
MAProtocol(mcodec: multiCodec("p2p"), kind: Length, size: 0, coder: TranscoderP2P),
|
||||
MAProtocol(mcodec: multiCodec("unix"), kind: Path, size: 0, coder: TranscoderUnix),
|
||||
MAProtocol(mcodec: multiCodec("dns"), kind: Length, size: 0, coder: TranscoderDNS),
|
||||
MAProtocol(mcodec: multiCodec("dns4"), kind: Length, size: 0, coder: TranscoderDNS),
|
||||
MAProtocol(mcodec: multiCodec("dns6"), kind: Length, size: 0, coder: TranscoderDNS),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6"), kind: Fixed, size: 16,
|
||||
coder: TranscoderIP6
|
||||
mcodec: multiCodec("dnsaddr"), kind: Length, size: 0, coder: TranscoderDNS
|
||||
),
|
||||
MAProtocol(mcodec: multiCodec("p2p-circuit"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("dccp"), kind: Fixed, size: 2,
|
||||
coder: TranscoderPort
|
||||
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("sctp"), kind: Fixed, size: 2,
|
||||
coder: TranscoderPort
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("udt"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("utp"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("http"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("https"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||
coder: TranscoderIP6Zone
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("onion"), kind: Fixed, size: 10,
|
||||
coder: TranscoderOnion
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("onion3"), kind: Fixed, size: 37,
|
||||
coder: TranscoderOnion3
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ws"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("wss"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("tls"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
|
||||
coder: TranscoderP2P
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("p2p"), kind: Length, size: 0,
|
||||
coder: TranscoderP2P
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("unix"), kind: Path, size: 0,
|
||||
coder: TranscoderUnix
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("dns"), kind: Length, size: 0,
|
||||
coder: TranscoderDNS
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("dns4"), kind: Length, size: 0,
|
||||
coder: TranscoderDNS
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("dns6"), kind: Length, size: 0,
|
||||
coder: TranscoderDNS
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("dnsaddr"), kind: Length, size: 0,
|
||||
coder: TranscoderDNS
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("p2p-circuit"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0
|
||||
)
|
||||
]
|
||||
|
||||
DNSANY* = mapEq("dns")
|
||||
@@ -477,7 +431,12 @@ const
|
||||
UDP_IP* = mapAnd(IP, mapEq("udp"))
|
||||
UDP* = mapOr(UDP_DNS, UDP_IP)
|
||||
UTP* = mapAnd(UDP, mapEq("utp"))
|
||||
QUIC* = mapAnd(UDP, mapEq("quic"))
|
||||
QUIC_IP* = mapAnd(UDP_IP, mapEq("quic"))
|
||||
QUIC_DNS* = mapAnd(UDP_DNS, mapEq("quic"))
|
||||
QUIC* = mapOr(QUIC_DNS, QUIC_IP)
|
||||
QUIC_V1_IP* = mapAnd(UDP_IP, mapEq("quic-v1"))
|
||||
QUIC_V1_DNS* = mapAnd(UDP_DNS, mapEq("quic-v1"))
|
||||
QUIC_V1* = mapOr(QUIC_V1_DNS, QUIC_V1_IP)
|
||||
UNIX* = mapEq("unix")
|
||||
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
|
||||
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
|
||||
@@ -501,31 +460,26 @@ const
|
||||
IPFS* = mapAnd(Reliable, P2PPattern)
|
||||
|
||||
HTTP* = mapOr(
|
||||
mapAnd(TCP, mapEq("http")),
|
||||
mapAnd(IP, mapEq("http")),
|
||||
mapAnd(DNS, mapEq("http"))
|
||||
mapAnd(TCP, mapEq("http")), mapAnd(IP, mapEq("http")), mapAnd(DNS, mapEq("http"))
|
||||
)
|
||||
|
||||
HTTPS* = mapOr(
|
||||
mapAnd(TCP, mapEq("https")),
|
||||
mapAnd(IP, mapEq("https")),
|
||||
mapAnd(DNS, mapEq("https"))
|
||||
mapAnd(TCP, mapEq("https")), mapAnd(IP, mapEq("https")), mapAnd(DNS, mapEq("https"))
|
||||
)
|
||||
|
||||
WebRTCDirect* = mapOr(
|
||||
mapAnd(HTTP, mapEq("p2p-webrtc-direct")),
|
||||
mapAnd(HTTPS, mapEq("p2p-webrtc-direct"))
|
||||
mapAnd(HTTP, mapEq("p2p-webrtc-direct")), mapAnd(HTTPS, mapEq("p2p-webrtc-direct"))
|
||||
)
|
||||
|
||||
CircuitRelay* = mapEq("p2p-circuit")
|
||||
|
||||
proc initMultiAddressCodeTable(): Table[MultiCodec,
|
||||
MAProtocol] {.compileTime.} =
|
||||
Memory* = mapEq("memory")
|
||||
|
||||
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
|
||||
for item in ProtocolsList:
|
||||
result[item.mcodec] = item
|
||||
|
||||
const
|
||||
CodeAddresses = initMultiAddressCodeTable()
|
||||
const CodeAddresses = initMultiAddressCodeTable()
|
||||
|
||||
proc trimRight(s: string, ch: char): string =
|
||||
## Consume trailing characters ``ch`` from string ``s`` and return result.
|
||||
@@ -535,7 +489,7 @@ proc trimRight(s: string, ch: char): string =
|
||||
inc(m)
|
||||
else:
|
||||
break
|
||||
result = s[0..(s.high - m)]
|
||||
result = s[0 .. (s.high - m)]
|
||||
|
||||
proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
|
||||
## Returns MultiAddress ``ma`` protocol code.
|
||||
@@ -563,8 +517,7 @@ proc protoName*(ma: MultiAddress): MaResult[string] =
|
||||
else:
|
||||
ok($(proto.mcodec))
|
||||
|
||||
proc protoArgument*(ma: MultiAddress,
|
||||
value: var openArray[byte]): MaResult[int] =
|
||||
proc protoArgument*(ma: MultiAddress, value: var openArray[byte]): MaResult[int] =
|
||||
## Returns MultiAddress ``ma`` protocol argument value.
|
||||
##
|
||||
## If current MultiAddress do not have argument value, then result will be
|
||||
@@ -583,7 +536,7 @@ proc protoArgument*(ma: MultiAddress,
|
||||
if proto.kind == Fixed:
|
||||
res = proto.size
|
||||
if len(value) >= res and
|
||||
vb.data.readArray(value.toOpenArray(0, proto.size - 1)) != proto.size:
|
||||
vb.data.readArray(value.toOpenArray(0, proto.size - 1)) != proto.size:
|
||||
err("multiaddress: Decoding protocol error")
|
||||
else:
|
||||
ok(res)
|
||||
@@ -603,8 +556,8 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
##
|
||||
## If current MultiAddress do not have argument value, then result array will
|
||||
## be empty.
|
||||
var buffer = newSeq[byte](len(ma.data.buffer))
|
||||
let res = ? protoArgument(ma, buffer)
|
||||
var buffer = newSeqUninit[byte](len(ma.data.buffer))
|
||||
let res = ?protoArgument(ma, buffer)
|
||||
buffer.setLen(res)
|
||||
ok(buffer)
|
||||
|
||||
@@ -617,13 +570,14 @@ proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||
|
||||
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninit[byte](0)
|
||||
var offset = 0
|
||||
var vb = ma
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
|
||||
if index < 0: return err("multiaddress: negative index gived to getPart")
|
||||
if index < 0:
|
||||
return err("multiaddress: negative index gived to getPart")
|
||||
|
||||
while offset <= index:
|
||||
if vb.data.readVarint(header) == -1:
|
||||
@@ -632,7 +586,6 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
let proto = CodeAddresses.getOrDefault(MultiCodec(header))
|
||||
if proto.kind == None:
|
||||
return err("multiaddress: Unsupported protocol '" & $header & "'")
|
||||
|
||||
elif proto.kind == Fixed:
|
||||
data.setLen(proto.size)
|
||||
if vb.data.readArray(data) != proto.size:
|
||||
@@ -659,22 +612,27 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
|
||||
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
let maLength = ?len(ma)
|
||||
template normalizeIndex(index): int =
|
||||
when index is BackwardsIndex: maLength - int(index)
|
||||
else: int(index)
|
||||
when index is BackwardsIndex:
|
||||
maLength - int(index)
|
||||
else:
|
||||
int(index)
|
||||
|
||||
let
|
||||
indexStart = normalizeIndex(slice.a)
|
||||
indexEnd = normalizeIndex(slice.b)
|
||||
var res: MultiAddress
|
||||
for i in indexStart..indexEnd:
|
||||
? res.append(? ma[i])
|
||||
for i in indexStart .. indexEnd:
|
||||
?res.append(?ma[i])
|
||||
ok(res)
|
||||
|
||||
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
proc `[]`*(
|
||||
ma: MultiAddress, i: int | BackwardsIndex
|
||||
): MaResult[MultiAddress] {.inline.} =
|
||||
## Returns part with index ``i`` of MultiAddress ``ma``.
|
||||
when i is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
let maLength = ?len(ma)
|
||||
ma.getPart(maLength - int(i))
|
||||
else:
|
||||
ma.getPart(i)
|
||||
@@ -686,7 +644,7 @@ proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
|
||||
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||
## Iterates over all addresses inside of MultiAddress ``ma``.
|
||||
var header: uint64
|
||||
var data = newSeq[byte]()
|
||||
var data = newSeqUninit[byte](0)
|
||||
var vb = ma
|
||||
while true:
|
||||
if vb.data.isEmpty():
|
||||
@@ -698,9 +656,7 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||
|
||||
let proto = CodeAddresses.getOrDefault(MultiCodec(header))
|
||||
if proto.kind == None:
|
||||
yield err(MaResult[MultiAddress], "Unsupported protocol '" &
|
||||
$header & "'")
|
||||
|
||||
yield err(MaResult[MultiAddress], "Unsupported protocol '" & $header & "'")
|
||||
elif proto.kind == Fixed:
|
||||
data.setLen(proto.size)
|
||||
if vb.data.readArray(data) != proto.size:
|
||||
@@ -722,7 +678,8 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||
proc len*(ma: MultiAddress): MaResult[int] =
|
||||
var counter: int
|
||||
for part in ma:
|
||||
if part.isErr: return err(part.error)
|
||||
if part.isErr:
|
||||
return err(part.error)
|
||||
counter.inc()
|
||||
ok(counter)
|
||||
|
||||
@@ -735,8 +692,7 @@ proc contains*(ma: MultiAddress, codec: MultiCodec): MaResult[bool] {.inline.} =
|
||||
return ok(true)
|
||||
ok(false)
|
||||
|
||||
proc `[]`*(ma: MultiAddress,
|
||||
codec: MultiCodec): MaResult[MultiAddress] {.inline.} =
|
||||
proc `[]`*(ma: MultiAddress, codec: MultiCodec): MaResult[MultiAddress] {.inline.} =
|
||||
## Returns partial MultiAddress with MultiCodec ``codec`` and present in
|
||||
## MultiAddress ``ma``.
|
||||
for item in ma.items:
|
||||
@@ -761,13 +717,12 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
return err("multiaddress: Unsupported protocol '" & $header & "'")
|
||||
if proto.kind in {Fixed, Length, Path}:
|
||||
if isNil(proto.coder.bufferToString):
|
||||
return err("multiaddress: Missing protocol '" & $(proto.mcodec) &
|
||||
"' coder")
|
||||
return err("multiaddress: Missing protocol '" & $(proto.mcodec) & "' coder")
|
||||
if not proto.coder.bufferToString(vb.data, part):
|
||||
return err("multiaddress: Decoding protocol error")
|
||||
parts.add($(proto.mcodec))
|
||||
if proto.kind == Path and part[0] == '/':
|
||||
parts.add(part[1..^1])
|
||||
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
|
||||
parts.add(part[1 ..^ 1])
|
||||
else:
|
||||
parts.add(part)
|
||||
elif proto.kind == Marker:
|
||||
@@ -779,8 +734,10 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
proc `$`*(value: MultiAddress): string =
|
||||
## Return string representation of MultiAddress ``value``.
|
||||
let s = value.toString()
|
||||
if s.isErr: s.error
|
||||
else: s[]
|
||||
if s.isErr:
|
||||
s.error
|
||||
else:
|
||||
s[]
|
||||
|
||||
proc protocols*(value: MultiAddress): MaResult[seq[MultiCodec]] =
|
||||
## Returns list of protocol codecs inside of MultiAddress ``value``.
|
||||
@@ -797,8 +754,9 @@ proc write*(vb: var VBuffer, ma: MultiAddress) {.inline.} =
|
||||
## Write MultiAddress value ``ma`` to buffer ``vb``.
|
||||
vb.writeArray(ma.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
ma: MultiAddress): string {.inline.} =
|
||||
proc encode*(
|
||||
mbtype: typedesc[MultiBase], encoding: string, ma: MultiAddress
|
||||
): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``ma`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, ma.data.buffer)
|
||||
@@ -825,8 +783,8 @@ proc validate*(ma: MultiAddress): bool =
|
||||
result = true
|
||||
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||
value: openArray[byte] = []): MaResult[MultiAddress] =
|
||||
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: openArray[byte] = []
|
||||
): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress object from protocol id ``protocol`` and array
|
||||
## of bytes ``value``.
|
||||
let proto = CodeAddresses.getOrDefault(protocol)
|
||||
@@ -856,19 +814,21 @@ proc init*(
|
||||
of None:
|
||||
raiseAssert "None checked above"
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||
value: PeerId): MaResult[MultiAddress] {.inline.} =
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: PeerId
|
||||
): MaResult[MultiAddress] {.inline.} =
|
||||
## Initialize MultiAddress object from protocol id ``protocol`` and peer id
|
||||
## ``value``.
|
||||
init(mtype, protocol, value.data)
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||
value: int): MaResult[MultiAddress] =
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: int
|
||||
): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress object from protocol id ``protocol`` and integer
|
||||
## ``value``. This procedure can be used to instantiate ``tcp``, ``udp``,
|
||||
## ``dccp`` and ``sctp`` MultiAddresses.
|
||||
var allowed = [multiCodec("tcp"), multiCodec("udp"), multiCodec("dccp"),
|
||||
multiCodec("sctp")]
|
||||
var allowed =
|
||||
[multiCodec("tcp"), multiCodec("udp"), multiCodec("dccp"), multiCodec("sctp")]
|
||||
if protocol notin allowed:
|
||||
err("multiaddress: Incorrect protocol for integer value")
|
||||
else:
|
||||
@@ -883,13 +843,20 @@ proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||
res.data.finish()
|
||||
ok(res)
|
||||
|
||||
proc getPart*(ma: MultiAddress, codec: MultiCodec): MaResult[MultiAddress] =
|
||||
## Returns the first multiaddress in ``value`` with codec ``codec``
|
||||
for part in ma:
|
||||
let part = ?part
|
||||
if codec == ?part.protoCode:
|
||||
return ok(part)
|
||||
err("no such codec in multiaddress")
|
||||
|
||||
proc getProtocol(name: string): MAProtocol {.inline.} =
|
||||
let mc = MultiCodec.codec(name)
|
||||
if mc != InvalidMultiCodec:
|
||||
result = CodeAddresses.getOrDefault(mc)
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress],
|
||||
value: string): MaResult[MultiAddress] =
|
||||
proc init*(mtype: typedesc[MultiAddress], value: string): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress object from string representation ``value``.
|
||||
if len(value) == 0 or value == "/":
|
||||
return err("multiaddress: Address must not be empty!")
|
||||
@@ -908,8 +875,7 @@ proc init*(mtype: typedesc[MultiAddress],
|
||||
else:
|
||||
if proto.kind in {Fixed, Length, Path}:
|
||||
if isNil(proto.coder.stringToBuffer):
|
||||
return err("multiaddress: Missing protocol '" &
|
||||
part & "' transcoder")
|
||||
return err("multiaddress: Missing protocol '" & part & "' transcoder")
|
||||
|
||||
if offset + 1 >= len(parts):
|
||||
return err("multiaddress: Missing protocol '" & part & "' argument")
|
||||
@@ -918,16 +884,15 @@ proc init*(mtype: typedesc[MultiAddress],
|
||||
res.data.write(proto.mcodec)
|
||||
let res = proto.coder.stringToBuffer(parts[offset + 1], res.data)
|
||||
if not res:
|
||||
return err("multiaddress: Error encoding `" & part & "/" &
|
||||
parts[offset + 1] & "`")
|
||||
return err(
|
||||
"multiaddress: Error encoding `" & part & "/" & parts[offset + 1] & "`"
|
||||
)
|
||||
offset += 2
|
||||
|
||||
elif proto.kind == Path:
|
||||
var path = "/" & (parts[(offset + 1)..^1].join("/"))
|
||||
var path = "/" & (parts[(offset + 1) ..^ 1].join("/"))
|
||||
res.data.write(proto.mcodec)
|
||||
if not proto.coder.stringToBuffer(path, res.data):
|
||||
return err("multiaddress: Error encoding `" & part & "/" &
|
||||
path & "`")
|
||||
return err("multiaddress: Error encoding `" & part & "/" & path & "`")
|
||||
|
||||
break
|
||||
elif proto.kind == Marker:
|
||||
@@ -936,8 +901,9 @@ proc init*(mtype: typedesc[MultiAddress],
|
||||
res.data.finish()
|
||||
ok(res)
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress],
|
||||
data: openArray[byte]): MaResult[MultiAddress] =
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress], data: openArray[byte]
|
||||
): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress with array of bytes ``data``.
|
||||
if len(data) == 0:
|
||||
err("multiaddress: Address must not be empty!")
|
||||
@@ -955,38 +921,55 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
||||
## Initialize empty MultiAddress.
|
||||
result.data = initVBuffer()
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress],
|
||||
address: IpAddress,
|
||||
protocol: IpTransportProtocol,
|
||||
port: Port,
|
||||
): MultiAddress =
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
let
|
||||
networkProto = case address.family
|
||||
of IpAddressFamily.IPv4: getProtocol("ip4")
|
||||
of IpAddressFamily.IPv6: getProtocol("ip6")
|
||||
networkProto =
|
||||
case address.family
|
||||
of IpAddressFamily.IPv4:
|
||||
getProtocol("ip4")
|
||||
of IpAddressFamily.IPv6:
|
||||
getProtocol("ip6")
|
||||
|
||||
transportProto = case protocol
|
||||
of tcpProtocol: getProtocol("tcp")
|
||||
of udpProtocol: getProtocol("udp")
|
||||
transportProto =
|
||||
case protocol
|
||||
of tcpProtocol:
|
||||
getProtocol("tcp")
|
||||
of udpProtocol:
|
||||
getProtocol("udp")
|
||||
|
||||
res.data.write(networkProto.mcodec)
|
||||
case address.family
|
||||
of IpAddressFamily.IPv4: res.data.writeArray(address.address_v4)
|
||||
of IpAddressFamily.IPv6: res.data.writeArray(address.address_v6)
|
||||
of IpAddressFamily.IPv4:
|
||||
res.data.writeArray(address.address_v4)
|
||||
of IpAddressFamily.IPv6:
|
||||
res.data.writeArray(address.address_v6)
|
||||
res.data.write(transportProto.mcodec)
|
||||
res.data.writeArray(toBytesBE(uint16(port)))
|
||||
res.data.finish()
|
||||
res
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: TransportAddress,
|
||||
protocol = IPPROTO_TCP): MaResult[MultiAddress] =
|
||||
proc init*(
|
||||
mtype: typedesc[MultiAddress], address: TransportAddress, protocol = IPPROTO_TCP
|
||||
): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress using chronos.TransportAddress (IPv4/IPv6/Unix)
|
||||
## and protocol information (UDP/TCP).
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
let protoProto = case protocol
|
||||
of IPPROTO_TCP: getProtocol("tcp")
|
||||
of IPPROTO_UDP: getProtocol("udp")
|
||||
else: default(MAProtocol)
|
||||
let protoProto =
|
||||
case protocol
|
||||
of IPPROTO_TCP:
|
||||
getProtocol("tcp")
|
||||
of IPPROTO_UDP:
|
||||
getProtocol("udp")
|
||||
else:
|
||||
default(MAProtocol)
|
||||
if protoProto.size == 0:
|
||||
return err("multiaddress: protocol should be either TCP or UDP")
|
||||
if address.family == AddressFamily.IPv4:
|
||||
@@ -1025,25 +1008,21 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
|
||||
else:
|
||||
ok()
|
||||
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
|
||||
raises: [LPError].} =
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.raises: [MaError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
## exception on error.
|
||||
##
|
||||
concat(m1, m2).valueOr:
|
||||
raise maErr error
|
||||
|
||||
concat(m1, m2).tryGet()
|
||||
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
|
||||
raises: [LPError].} =
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.raises: [MaError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
## exception on error.
|
||||
##
|
||||
|
||||
m1.append(m2).tryGet()
|
||||
m1.append(m2).isOkOr:
|
||||
raise maErr error
|
||||
|
||||
proc `==`*(m1: var MultiAddress, m2: MultiAddress): bool =
|
||||
## Check of two MultiAddress are equal
|
||||
@@ -1058,13 +1037,12 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
||||
let res = a.matchPart(pcs)
|
||||
if res.flag:
|
||||
#Greedy Or
|
||||
if result.flag == false or
|
||||
result.rem.len > res.rem.len:
|
||||
if result.flag == false or result.rem.len > res.rem.len:
|
||||
result = res
|
||||
elif pat.operator == And:
|
||||
if len(pcs) < len(pat.args):
|
||||
return MaPatResult(flag: false, rem: empty)
|
||||
for i in 0..<len(pat.args):
|
||||
for i in 0 ..< len(pat.args):
|
||||
let res = pat.args[i].matchPart(pcs)
|
||||
if not res.flag:
|
||||
return MaPatResult(flag: false, rem: res.rem)
|
||||
@@ -1074,20 +1052,22 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
||||
if len(pcs) == 0:
|
||||
return MaPatResult(flag: false, rem: empty)
|
||||
if pcs[0] == pat.value:
|
||||
return MaPatResult(flag: true, rem: pcs[1..^1])
|
||||
return MaPatResult(flag: true, rem: pcs[1 ..^ 1])
|
||||
result = MaPatResult(flag: false, rem: empty)
|
||||
|
||||
proc match*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match full ``address`` using pattern ``pat`` and return ``true`` if
|
||||
## ``address`` satisfies pattern.
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let protos = address.protocols().valueOr:
|
||||
return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag and (len(res.rem) == 0)
|
||||
|
||||
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match prefix part of ``address`` using pattern ``pat`` and return
|
||||
## ``true`` if ``address`` starts with pattern.
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let protos = address.protocols().valueOr:
|
||||
return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag
|
||||
|
||||
@@ -1109,28 +1089,32 @@ proc bytes*(value: MultiAddress): seq[byte] =
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: MultiAddress) {.inline.} =
|
||||
write(pb, field, value.data.buffer)
|
||||
|
||||
proc getField*(pb: ProtoBuffer, field: int,
|
||||
value: var MultiAddress): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
proc getField*(
|
||||
pb: ProtoBuffer, field: int, value: var MultiAddress
|
||||
): ProtoResult[bool] {.inline.} =
|
||||
var buffer: seq[byte]
|
||||
let res = ? pb.getField(field, buffer)
|
||||
if not(res):
|
||||
let res = ?pb.getField(field, buffer)
|
||||
if not (res):
|
||||
ok(false)
|
||||
else:
|
||||
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
value = MultiAddress.init(buffer).valueOr:
|
||||
return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
|
||||
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
|
||||
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
|
||||
proc getRepeatedField*(
|
||||
pb: ProtoBuffer, field: int, value: var seq[MultiAddress]
|
||||
): ProtoResult[bool] {.inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number.
|
||||
## If the message is malformed, an error is returned. If field is not present
|
||||
## in message, then ``ok(false)`` is returned and value is empty. If field is
|
||||
## present, but no items could be parsed, then
|
||||
## ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is
|
||||
## returned and value contains the parsed values.
|
||||
var items: seq[seq[byte]]
|
||||
value.setLen(0)
|
||||
let res = ? pb.getRepeatedField(field, items)
|
||||
if not(res):
|
||||
let res = ?pb.getRepeatedField(field, items)
|
||||
if not (res):
|
||||
ok(false)
|
||||
else:
|
||||
for item in items:
|
||||
@@ -1143,3 +1127,32 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
err(ProtoError.IncorrectBlob)
|
||||
else:
|
||||
ok(true)
|
||||
|
||||
proc areAddrsConsistent*(a, b: MultiAddress): bool =
|
||||
## Checks if two multiaddresses have the same protocol stack.
|
||||
let protosA = a.protocols().get()
|
||||
let protosB = b.protocols().get()
|
||||
if protosA.len != protosB.len:
|
||||
return false
|
||||
|
||||
for idx in 0 ..< protosA.len:
|
||||
let protoA = protosA[idx]
|
||||
let protoB = protosB[idx]
|
||||
|
||||
if protoA != protoB:
|
||||
if idx == 0:
|
||||
# allow DNS ↔ IP at the first component
|
||||
if protoB == multiCodec("dns") or protoB == multiCodec("dnsaddr"):
|
||||
if not (protoA == multiCodec("ip4") or protoA == multiCodec("ip6")):
|
||||
return false
|
||||
elif protoB == multiCodec("dns4"):
|
||||
if protoA != multiCodec("ip4"):
|
||||
return false
|
||||
elif protoB == multiCodec("dns6"):
|
||||
if protoA != multiCodec("ip6"):
|
||||
return false
|
||||
else:
|
||||
return false
|
||||
else:
|
||||
return false
|
||||
true
|
||||
|
||||
@@ -16,11 +16,18 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
import results
|
||||
import stew/[base32, base58, base64]
|
||||
import ./utils/sequninit
|
||||
|
||||
type
|
||||
MultiBaseStatus* {.pure.} = enum
|
||||
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
|
||||
Error
|
||||
Success
|
||||
Overrun
|
||||
Incorrect
|
||||
BadCodec
|
||||
NotSupported
|
||||
|
||||
MultiBase* = object
|
||||
|
||||
@@ -29,17 +36,18 @@ type
|
||||
MBCodec = object
|
||||
code: char
|
||||
name: string
|
||||
encr: proc(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
decr: proc(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
encr: proc(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
decr: proc(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
encl: MBCodeSize
|
||||
decl: MBCodeSize
|
||||
|
||||
proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc idd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
let length = len(inbytes)
|
||||
if length > len(outbytes):
|
||||
outlen = length
|
||||
@@ -49,9 +57,9 @@ proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
|
||||
outlen = length
|
||||
result = MultiBaseStatus.Success
|
||||
|
||||
proc ide(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc ide(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
let length = len(inbytes)
|
||||
if length > len(outbytes):
|
||||
outlen = length
|
||||
@@ -61,31 +69,37 @@ proc ide(inbytes: openArray[byte],
|
||||
outlen = length
|
||||
result = MultiBaseStatus.Success
|
||||
|
||||
proc idel(length: int): int = length
|
||||
proc iddl(length: int): int = length
|
||||
proc idel(length: int): int =
|
||||
length
|
||||
|
||||
proc b16d(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc iddl(length: int): int =
|
||||
length
|
||||
|
||||
proc b16d(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
discard
|
||||
|
||||
proc b16e(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b16e(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
discard
|
||||
|
||||
proc b16ud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b16ud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
discard
|
||||
|
||||
proc b16ue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b16ue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
discard
|
||||
|
||||
proc b16el(length: int): int = length shl 1
|
||||
proc b16dl(length: int): int = (length + 1) div 2
|
||||
proc b16el(length: int): int =
|
||||
length shl 1
|
||||
|
||||
proc b16dl(length: int): int =
|
||||
(length + 1) div 2
|
||||
|
||||
proc b32ce(r: Base32Status): MultiBaseStatus {.inline.} =
|
||||
result = MultiBaseStatus.Error
|
||||
@@ -114,218 +128,253 @@ proc b64ce(r: Base64Status): MultiBaseStatus {.inline.} =
|
||||
elif r == Base64Status.Success:
|
||||
result = MultiBaseStatus.Success
|
||||
|
||||
proc b32hd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32he(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32he(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hpd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hpd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hpe(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hpe(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hpud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hpud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32hpue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32hpue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32d(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32d(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32e(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32e(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32ud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32ud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32ue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32ue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32pd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32pd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32pe(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32pe(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32pud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32pud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32pue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32pue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b32el(length: int): int = Base32Lower.encodedLength(length)
|
||||
proc b32dl(length: int): int = Base32Lower.decodedLength(length)
|
||||
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
|
||||
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
|
||||
proc b32el(length: int): int =
|
||||
Base32Lower.encodedLength(length)
|
||||
|
||||
proc b58fd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b32dl(length: int): int =
|
||||
Base32Lower.decodedLength(length)
|
||||
|
||||
proc b32pel(length: int): int =
|
||||
Base32LowerPad.encodedLength(length)
|
||||
|
||||
proc b32pdl(length: int): int =
|
||||
Base32LowerPad.decodedLength(length)
|
||||
|
||||
proc b58fd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b58fe(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b58fe(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b58bd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b58bd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b58be(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b58be(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b58el(length: int): int = Base58.encodedLength(length)
|
||||
proc b58dl(length: int): int = Base58.decodedLength(length)
|
||||
proc b58el(length: int): int =
|
||||
Base58.encodedLength(length)
|
||||
|
||||
proc b64el(length: int): int = Base64.encodedLength(length)
|
||||
proc b64dl(length: int): int = Base64.decodedLength(length)
|
||||
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
|
||||
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
|
||||
proc b58dl(length: int): int =
|
||||
Base58.decodedLength(length)
|
||||
|
||||
proc b64e(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64el(length: int): int =
|
||||
Base64.encodedLength(length)
|
||||
|
||||
proc b64dl(length: int): int =
|
||||
Base64.decodedLength(length)
|
||||
|
||||
proc b64pel(length: int): int =
|
||||
Base64Pad.encodedLength(length)
|
||||
|
||||
proc b64pdl(length: int): int =
|
||||
Base64Pad.decodedLength(length)
|
||||
|
||||
proc b64e(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64d(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64d(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64pe(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64pe(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64pd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64pd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64ue(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64ue(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64ud(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64ud(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64upe(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64upe(
|
||||
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
|
||||
|
||||
proc b64upd(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc b64upd(
|
||||
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
|
||||
): MultiBaseStatus =
|
||||
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
|
||||
|
||||
const
|
||||
MultiBaseCodecs = [
|
||||
MBCodec(name: "identity", code: chr(0x00),
|
||||
decr: idd, encr: ide, decl: iddl, encl: idel
|
||||
),
|
||||
MBCodec(name: "base1", code: '1'),
|
||||
MBCodec(name: "base2", code: '0'),
|
||||
MBCodec(name: "base8", code: '7'),
|
||||
MBCodec(name: "base10", code: '9'),
|
||||
MBCodec(name: "base16", code: 'f',
|
||||
decr: b16d, encr: b16e, decl: b16dl, encl: b16el
|
||||
),
|
||||
MBCodec(name: "base16upper", code: 'F',
|
||||
decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
|
||||
),
|
||||
MBCodec(name: "base32hex", code: 'v',
|
||||
decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(name: "base32hexupper", code: 'V',
|
||||
decr: b32hud, encr: b32hue, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(name: "base32hexpad", code: 't',
|
||||
decr: b32hpd, encr: b32hpe, decl: b32pdl, encl: b32pel
|
||||
),
|
||||
MBCodec(name: "base32hexpadupper", code: 'T',
|
||||
decr: b32hpud, encr: b32hpue, decl: b32pdl, encl: b32pel
|
||||
),
|
||||
MBCodec(name: "base32", code: 'b',
|
||||
decr: b32d, encr: b32e, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(name: "base32upper", code: 'B',
|
||||
decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(name: "base32pad", code: 'c',
|
||||
decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
|
||||
),
|
||||
MBCodec(name: "base32padupper", code: 'C',
|
||||
decr: b32pud, encr: b32pue, decl: b32pdl, encl: b32pel
|
||||
),
|
||||
MBCodec(name: "base32z", code: 'h'),
|
||||
MBCodec(name: "base58flickr", code: 'Z',
|
||||
decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
|
||||
),
|
||||
MBCodec(name: "base58btc", code: 'z',
|
||||
decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
|
||||
),
|
||||
MBCodec(name: "base64", code: 'm',
|
||||
decr: b64d, encr: b64e, decl: b64dl, encl: b64el
|
||||
),
|
||||
MBCodec(name: "base64pad", code: 'M',
|
||||
decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
|
||||
),
|
||||
MBCodec(name: "base64url", code: 'u',
|
||||
decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
|
||||
),
|
||||
MBCodec(name: "base64urlpad", code: 'U',
|
||||
decr: b64upd, encr: b64upe, decl: b64pdl, encl: b64pel
|
||||
)
|
||||
]
|
||||
const MultiBaseCodecs = [
|
||||
MBCodec(
|
||||
name: "identity", code: chr(0x00), decr: idd, encr: ide, decl: iddl, encl: idel
|
||||
),
|
||||
MBCodec(name: "base1", code: '1'),
|
||||
MBCodec(name: "base2", code: '0'),
|
||||
MBCodec(name: "base8", code: '7'),
|
||||
MBCodec(name: "base10", code: '9'),
|
||||
MBCodec(name: "base16", code: 'f', decr: b16d, encr: b16e, decl: b16dl, encl: b16el),
|
||||
MBCodec(
|
||||
name: "base16upper", code: 'F', decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32hex", code: 'v', decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32hexupper",
|
||||
code: 'V',
|
||||
decr: b32hud,
|
||||
encr: b32hue,
|
||||
decl: b32dl,
|
||||
encl: b32el,
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32hexpad",
|
||||
code: 't',
|
||||
decr: b32hpd,
|
||||
encr: b32hpe,
|
||||
decl: b32pdl,
|
||||
encl: b32pel,
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32hexpadupper",
|
||||
code: 'T',
|
||||
decr: b32hpud,
|
||||
encr: b32hpue,
|
||||
decl: b32pdl,
|
||||
encl: b32pel,
|
||||
),
|
||||
MBCodec(name: "base32", code: 'b', decr: b32d, encr: b32e, decl: b32dl, encl: b32el),
|
||||
MBCodec(
|
||||
name: "base32upper", code: 'B', decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32pad", code: 'c', decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
|
||||
),
|
||||
MBCodec(
|
||||
name: "base32padupper",
|
||||
code: 'C',
|
||||
decr: b32pud,
|
||||
encr: b32pue,
|
||||
decl: b32pdl,
|
||||
encl: b32pel,
|
||||
),
|
||||
MBCodec(name: "base32z", code: 'h'),
|
||||
MBCodec(
|
||||
name: "base58flickr", code: 'Z', decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
|
||||
),
|
||||
MBCodec(
|
||||
name: "base58btc", code: 'z', decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
|
||||
),
|
||||
MBCodec(name: "base64", code: 'm', decr: b64d, encr: b64e, decl: b64dl, encl: b64el),
|
||||
MBCodec(
|
||||
name: "base64pad", code: 'M', decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
|
||||
),
|
||||
MBCodec(
|
||||
name: "base64url", code: 'u', decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
|
||||
),
|
||||
MBCodec(
|
||||
name: "base64urlpad",
|
||||
code: 'U',
|
||||
decr: b64upd,
|
||||
encr: b64upe,
|
||||
decl: b64pdl,
|
||||
encl: b64pel,
|
||||
),
|
||||
]
|
||||
|
||||
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
|
||||
for item in MultiBaseCodecs:
|
||||
@@ -339,8 +388,7 @@ const
|
||||
CodeMultiBases = initMultiBaseCodeTable()
|
||||
NameMultiBases = initMultiBaseNameTable()
|
||||
|
||||
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
length: int): int =
|
||||
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string, length: int): int =
|
||||
## Return estimated size of buffer to store MultiBase encoded value with
|
||||
## encoding ``encoding`` of length ``length``.
|
||||
##
|
||||
@@ -355,8 +403,7 @@ proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
else:
|
||||
result = mb.encl(length) + 1
|
||||
|
||||
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
|
||||
length: int): int =
|
||||
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char, length: int): int =
|
||||
## Return estimated size of buffer to store MultiBase decoded value with
|
||||
## encoding character ``encoding`` of length ``length``.
|
||||
let mb = CodeMultiBases.getOrDefault(encoding)
|
||||
@@ -368,9 +415,13 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
|
||||
else:
|
||||
result = mb.decl(length - 1)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
inbytes: openArray[byte], outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus =
|
||||
proc encode*(
|
||||
mbtype: typedesc[MultiBase],
|
||||
encoding: string,
|
||||
inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int,
|
||||
): MultiBaseStatus =
|
||||
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
||||
## store encoded value to ``outbytes``.
|
||||
##
|
||||
@@ -392,8 +443,7 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
if isNil(mb.encr) or isNil(mb.encl):
|
||||
return MultiBaseStatus.NotSupported
|
||||
if len(outbytes) > 1:
|
||||
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
|
||||
outlen)
|
||||
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high), outlen)
|
||||
if result == MultiBaseStatus.Overrun:
|
||||
outlen += 1
|
||||
elif result == MultiBaseStatus.Success:
|
||||
@@ -408,8 +458,12 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
result = MultiBaseStatus.Overrun
|
||||
outlen = mb.encl(len(inbytes)) + 1
|
||||
|
||||
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
|
||||
outbytes: var openArray[byte], outlen: var int): MultiBaseStatus =
|
||||
proc decode*(
|
||||
mbtype: typedesc[MultiBase],
|
||||
inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int,
|
||||
): MultiBaseStatus =
|
||||
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
|
||||
## to ``outbytes``.
|
||||
##
|
||||
@@ -438,8 +492,9 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
|
||||
else:
|
||||
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
inbytes: openArray[byte]): Result[string, string] =
|
||||
proc encode*(
|
||||
mbtype: typedesc[MultiBase], encoding: string, inbytes: openArray[byte]
|
||||
): Result[string, string] =
|
||||
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
||||
## return encoded string.
|
||||
let length = len(inbytes)
|
||||
@@ -462,7 +517,9 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
buffer[0] = mb.code
|
||||
ok(buffer)
|
||||
|
||||
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[byte], string] =
|
||||
proc decode*(
|
||||
mbtype: typedesc[MultiBase], inbytes: openArray[char]
|
||||
): Result[seq[byte], string] =
|
||||
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
|
||||
## bytes.
|
||||
let length = len(inbytes)
|
||||
@@ -477,10 +534,9 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[
|
||||
let empty: seq[byte] = @[]
|
||||
ok(empty) # empty
|
||||
else:
|
||||
var buffer = newSeq[byte](mb.decl(length - 1))
|
||||
var buffer = newSeqUninit[byte](mb.decl(length - 1))
|
||||
var outlen = 0
|
||||
let res = mb.decr(inbytes.toOpenArray(1, length - 1),
|
||||
buffer, outlen)
|
||||
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
|
||||
if res != MultiBaseStatus.Success:
|
||||
err("multibase: Decoding error [" & $res & "]")
|
||||
else:
|
||||
|
||||
@@ -10,10 +10,11 @@
|
||||
## This module implements MultiCodec.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import varint, vbuffer
|
||||
import stew/results
|
||||
import vbuffer
|
||||
import results
|
||||
export results
|
||||
|
||||
## List of officially supported codecs can BE found here
|
||||
@@ -49,132 +50,325 @@ const MultiCodecList = [
|
||||
("keccak-384", 0x1C),
|
||||
("keccak-512", 0x1D),
|
||||
("murmur3", 0x22),
|
||||
("blake2b-8", 0xB201), ("blake2b-16", 0xB202), ("blake2b-24", 0xB203),
|
||||
("blake2b-32", 0xB204), ("blake2b-40", 0xB205), ("blake2b-48", 0xB206),
|
||||
("blake2b-56", 0xB207), ("blake2b-64", 0xB208), ("blake2b-72", 0xB209),
|
||||
("blake2b-80", 0xB20A), ("blake2b-88", 0xB20B), ("blake2b-96", 0xB20C),
|
||||
("blake2b-104", 0xB20D), ("blake2b-112", 0xB20E), ("blake2b-120", 0xB20F),
|
||||
("blake2b-128", 0xB210), ("blake2b-136", 0xB211), ("blake2b-144", 0xB212),
|
||||
("blake2b-152", 0xB213), ("blake2b-160", 0xB214), ("blake2b-168", 0xB215),
|
||||
("blake2b-176", 0xB216), ("blake2b-184", 0xB217), ("blake2b-192", 0xB218),
|
||||
("blake2b-200", 0xB219), ("blake2b-208", 0xB21A), ("blake2b-216", 0xB21B),
|
||||
("blake2b-224", 0xB21C), ("blake2b-232", 0xB21D), ("blake2b-240", 0xB21E),
|
||||
("blake2b-248", 0xB21F), ("blake2b-256", 0xB220), ("blake2b-264", 0xB221),
|
||||
("blake2b-272", 0xB222), ("blake2b-280", 0xB223), ("blake2b-288", 0xB224),
|
||||
("blake2b-296", 0xB225), ("blake2b-304", 0xB226), ("blake2b-312", 0xB227),
|
||||
("blake2b-320", 0xB228), ("blake2b-328", 0xB229), ("blake2b-336", 0xB22A),
|
||||
("blake2b-344", 0xB22B), ("blake2b-352", 0xB22C), ("blake2b-360", 0xB22D),
|
||||
("blake2b-368", 0xB22E), ("blake2b-376", 0xB22F), ("blake2b-384", 0xB230),
|
||||
("blake2b-392", 0xB231), ("blake2b-400", 0xB232), ("blake2b-408", 0xB233),
|
||||
("blake2b-416", 0xB234), ("blake2b-424", 0xB235), ("blake2b-432", 0xB236),
|
||||
("blake2b-440", 0xB237), ("blake2b-448", 0xB238), ("blake2b-456", 0xB239),
|
||||
("blake2b-464", 0xB23A), ("blake2b-472", 0xB23B), ("blake2b-480", 0xB23C),
|
||||
("blake2b-488", 0xB23D), ("blake2b-496", 0xB23E), ("blake2b-504", 0xB23F),
|
||||
("blake2b-512", 0xB240), ("blake2s-8", 0xB241), ("blake2s-16", 0xB242),
|
||||
("blake2s-24", 0xB243), ("blake2s-32", 0xB244), ("blake2s-40", 0xB245),
|
||||
("blake2s-48", 0xB246), ("blake2s-56", 0xB247), ("blake2s-64", 0xB248),
|
||||
("blake2s-72", 0xB249), ("blake2s-80", 0xB24A), ("blake2s-88", 0xB24B),
|
||||
("blake2s-96", 0xB24C), ("blake2s-104", 0xB24D), ("blake2s-112", 0xB24E),
|
||||
("blake2s-120", 0xB24F), ("blake2s-128", 0xB250), ("blake2s-136", 0xB251),
|
||||
("blake2s-144", 0xB252), ("blake2s-152", 0xB253), ("blake2s-160", 0xB254),
|
||||
("blake2s-168", 0xB255), ("blake2s-176", 0xB256), ("blake2s-184", 0xB257),
|
||||
("blake2s-192", 0xB258), ("blake2s-200", 0xB259), ("blake2s-208", 0xB25A),
|
||||
("blake2s-216", 0xB25B), ("blake2s-224", 0xB25C), ("blake2s-232", 0xB25D),
|
||||
("blake2s-240", 0xB25E), ("blake2s-248", 0xB25F), ("blake2s-256", 0xB260),
|
||||
("skein256-8", 0xB301), ("skein256-16", 0xB302), ("skein256-24", 0xB303),
|
||||
("skein256-32", 0xB304), ("skein256-40", 0xB305), ("skein256-48", 0xB306),
|
||||
("skein256-56", 0xB307), ("skein256-64", 0xB308), ("skein256-72", 0xB309),
|
||||
("skein256-80", 0xB30A), ("skein256-88", 0xB30B), ("skein256-96", 0xB30C),
|
||||
("skein256-104", 0xB30D), ("skein256-112", 0xB30E), ("skein256-120", 0xB30F),
|
||||
("skein256-128", 0xB310), ("skein256-136", 0xB311), ("skein256-144", 0xB312),
|
||||
("skein256-152", 0xB313), ("skein256-160", 0xB314), ("skein256-168", 0xB315),
|
||||
("skein256-176", 0xB316), ("skein256-184", 0xB317), ("skein256-192", 0xB318),
|
||||
("skein256-200", 0xB319), ("skein256-208", 0xB31A), ("skein256-216", 0xB31B),
|
||||
("skein256-224", 0xB31C), ("skein256-232", 0xB31D), ("skein256-240", 0xB31E),
|
||||
("skein256-248", 0xB31F), ("skein256-256", 0xB320),
|
||||
("skein512-8", 0xB321), ("skein512-16", 0xB322), ("skein512-24", 0xB323),
|
||||
("skein512-32", 0xB324), ("skein512-40", 0xB325), ("skein512-48", 0xB326),
|
||||
("skein512-56", 0xB327), ("skein512-64", 0xB328), ("skein512-72", 0xB329),
|
||||
("skein512-80", 0xB32A), ("skein512-88", 0xB32B), ("skein512-96", 0xB32C),
|
||||
("skein512-104", 0xB32D), ("skein512-112", 0xB32E), ("skein512-120", 0xB32F),
|
||||
("skein512-128", 0xB330), ("skein512-136", 0xB331), ("skein512-144", 0xB332),
|
||||
("skein512-152", 0xB333), ("skein512-160", 0xB334), ("skein512-168", 0xB335),
|
||||
("skein512-176", 0xB336), ("skein512-184", 0xB337), ("skein512-192", 0xB338),
|
||||
("skein512-200", 0xB339), ("skein512-208", 0xB33A), ("skein512-216", 0xB33B),
|
||||
("skein512-224", 0xB33C), ("skein512-232", 0xB33D), ("skein512-240", 0xB33E),
|
||||
("skein512-248", 0xB33F), ("skein512-256", 0xB340), ("skein512-264", 0xB341),
|
||||
("skein512-272", 0xB342), ("skein512-280", 0xB343), ("skein512-288", 0xB344),
|
||||
("skein512-296", 0xB345), ("skein512-304", 0xB346), ("skein512-312", 0xB347),
|
||||
("skein512-320", 0xB348), ("skein512-328", 0xB349), ("skein512-336", 0xB34A),
|
||||
("skein512-344", 0xB34B), ("skein512-352", 0xB34C), ("skein512-360", 0xB34D),
|
||||
("skein512-368", 0xB34E), ("skein512-376", 0xB34F), ("skein512-384", 0xB350),
|
||||
("skein512-392", 0xB351), ("skein512-400", 0xB352), ("skein512-408", 0xB353),
|
||||
("skein512-416", 0xB354), ("skein512-424", 0xB355), ("skein512-432", 0xB356),
|
||||
("skein512-440", 0xB357), ("skein512-448", 0xB358), ("skein512-456", 0xB359),
|
||||
("skein512-464", 0xB35A), ("skein512-472", 0xB35B), ("skein512-480", 0xB35C),
|
||||
("skein512-488", 0xB35D), ("skein512-496", 0xB35E), ("skein512-504", 0xB35F),
|
||||
("skein512-512", 0xB360), ("skein1024-8", 0xB361), ("skein1024-16", 0xB362),
|
||||
("skein1024-24", 0xB363), ("skein1024-32", 0xB364), ("skein1024-40", 0xB365),
|
||||
("skein1024-48", 0xB366), ("skein1024-56", 0xB367), ("skein1024-64", 0xB368),
|
||||
("skein1024-72", 0xB369), ("skein1024-80", 0xB36A), ("skein1024-88", 0xB36B),
|
||||
("skein1024-96", 0xB36C), ("skein1024-104", 0xB36D),
|
||||
("skein1024-112", 0xB36E), ("skein1024-120", 0xB36F),
|
||||
("skein1024-128", 0xB370), ("skein1024-136", 0xB371),
|
||||
("skein1024-144", 0xB372), ("skein1024-152", 0xB373),
|
||||
("skein1024-160", 0xB374), ("skein1024-168", 0xB375),
|
||||
("skein1024-176", 0xB376), ("skein1024-184", 0xB377),
|
||||
("skein1024-192", 0xB378), ("skein1024-200", 0xB379),
|
||||
("skein1024-208", 0xB37A), ("skein1024-216", 0xB37B),
|
||||
("skein1024-224", 0xB37C), ("skein1024-232", 0xB37D),
|
||||
("skein1024-240", 0xB37E), ("skein1024-248", 0xB37F),
|
||||
("skein1024-256", 0xB380), ("skein1024-264", 0xB381),
|
||||
("skein1024-272", 0xB382), ("skein1024-280", 0xB383),
|
||||
("skein1024-288", 0xB384), ("skein1024-296", 0xB385),
|
||||
("skein1024-304", 0xB386), ("skein1024-312", 0xB387),
|
||||
("skein1024-320", 0xB388), ("skein1024-328", 0xB389),
|
||||
("skein1024-336", 0xB38A), ("skein1024-344", 0xB38B),
|
||||
("skein1024-352", 0xB38C), ("skein1024-360", 0xB38D),
|
||||
("skein1024-368", 0xB38E), ("skein1024-376", 0xB38F),
|
||||
("skein1024-384", 0xB390), ("skein1024-392", 0xB391),
|
||||
("skein1024-400", 0xB392), ("skein1024-408", 0xB393),
|
||||
("skein1024-416", 0xB394), ("skein1024-424", 0xB395),
|
||||
("skein1024-432", 0xB396), ("skein1024-440", 0xB397),
|
||||
("skein1024-448", 0xB398), ("skein1024-456", 0xB399),
|
||||
("skein1024-464", 0xB39A), ("skein1024-472", 0xB39B),
|
||||
("skein1024-480", 0xB39C), ("skein1024-488", 0xB39D),
|
||||
("skein1024-496", 0xB39E), ("skein1024-504", 0xB39F),
|
||||
("skein1024-512", 0xB3A0), ("skein1024-520", 0xB3A1),
|
||||
("skein1024-528", 0xB3A2), ("skein1024-536", 0xB3A3),
|
||||
("skein1024-544", 0xB3A4), ("skein1024-552", 0xB3A5),
|
||||
("skein1024-560", 0xB3A6), ("skein1024-568", 0xB3A7),
|
||||
("skein1024-576", 0xB3A8), ("skein1024-584", 0xB3A9),
|
||||
("skein1024-592", 0xB3AA), ("skein1024-600", 0xB3AB),
|
||||
("skein1024-608", 0xB3AC), ("skein1024-616", 0xB3AD),
|
||||
("skein1024-624", 0xB3AE), ("skein1024-632", 0xB3AF),
|
||||
("skein1024-640", 0xB3B0), ("skein1024-648", 0xB3B1),
|
||||
("skein1024-656", 0xB3B2), ("skein1024-664", 0xB3B3),
|
||||
("skein1024-672", 0xB3B4), ("skein1024-680", 0xB3B5),
|
||||
("skein1024-688", 0xB3B6), ("skein1024-696", 0xB3B7),
|
||||
("skein1024-704", 0xB3B8), ("skein1024-712", 0xB3B9),
|
||||
("skein1024-720", 0xB3BA), ("skein1024-728", 0xB3BB),
|
||||
("skein1024-736", 0xB3BC), ("skein1024-744", 0xB3BD),
|
||||
("skein1024-752", 0xB3BE), ("skein1024-760", 0xB3BF),
|
||||
("skein1024-768", 0xB3C0), ("skein1024-776", 0xB3C1),
|
||||
("skein1024-784", 0xB3C2), ("skein1024-792", 0xB3C3),
|
||||
("skein1024-800", 0xB3C4), ("skein1024-808", 0xB3C5),
|
||||
("skein1024-816", 0xB3C6), ("skein1024-824", 0xB3C7),
|
||||
("skein1024-832", 0xB3C8), ("skein1024-840", 0xB3C9),
|
||||
("skein1024-848", 0xB3CA), ("skein1024-856", 0xB3CB),
|
||||
("skein1024-864", 0xB3CC), ("skein1024-872", 0xB3CD),
|
||||
("skein1024-880", 0xB3CE), ("skein1024-888", 0xB3CF),
|
||||
("skein1024-896", 0xB3D0), ("skein1024-904", 0xB3D1),
|
||||
("skein1024-912", 0xB3D2), ("skein1024-920", 0xB3D3),
|
||||
("skein1024-928", 0xB3D4), ("skein1024-936", 0xB3D5),
|
||||
("skein1024-944", 0xB3D6), ("skein1024-952", 0xB3D7),
|
||||
("skein1024-960", 0xB3D8), ("skein1024-968", 0xB3D9),
|
||||
("skein1024-976", 0xB3DA), ("skein1024-984", 0xB3DB),
|
||||
("skein1024-992", 0xB3DC), ("skein1024-1000", 0xB3DD),
|
||||
("skein1024-1008", 0xB3DE), ("skein1024-1016", 0xB3DF),
|
||||
("blake2b-8", 0xB201),
|
||||
("blake2b-16", 0xB202),
|
||||
("blake2b-24", 0xB203),
|
||||
("blake2b-32", 0xB204),
|
||||
("blake2b-40", 0xB205),
|
||||
("blake2b-48", 0xB206),
|
||||
("blake2b-56", 0xB207),
|
||||
("blake2b-64", 0xB208),
|
||||
("blake2b-72", 0xB209),
|
||||
("blake2b-80", 0xB20A),
|
||||
("blake2b-88", 0xB20B),
|
||||
("blake2b-96", 0xB20C),
|
||||
("blake2b-104", 0xB20D),
|
||||
("blake2b-112", 0xB20E),
|
||||
("blake2b-120", 0xB20F),
|
||||
("blake2b-128", 0xB210),
|
||||
("blake2b-136", 0xB211),
|
||||
("blake2b-144", 0xB212),
|
||||
("blake2b-152", 0xB213),
|
||||
("blake2b-160", 0xB214),
|
||||
("blake2b-168", 0xB215),
|
||||
("blake2b-176", 0xB216),
|
||||
("blake2b-184", 0xB217),
|
||||
("blake2b-192", 0xB218),
|
||||
("blake2b-200", 0xB219),
|
||||
("blake2b-208", 0xB21A),
|
||||
("blake2b-216", 0xB21B),
|
||||
("blake2b-224", 0xB21C),
|
||||
("blake2b-232", 0xB21D),
|
||||
("blake2b-240", 0xB21E),
|
||||
("blake2b-248", 0xB21F),
|
||||
("blake2b-256", 0xB220),
|
||||
("blake2b-264", 0xB221),
|
||||
("blake2b-272", 0xB222),
|
||||
("blake2b-280", 0xB223),
|
||||
("blake2b-288", 0xB224),
|
||||
("blake2b-296", 0xB225),
|
||||
("blake2b-304", 0xB226),
|
||||
("blake2b-312", 0xB227),
|
||||
("blake2b-320", 0xB228),
|
||||
("blake2b-328", 0xB229),
|
||||
("blake2b-336", 0xB22A),
|
||||
("blake2b-344", 0xB22B),
|
||||
("blake2b-352", 0xB22C),
|
||||
("blake2b-360", 0xB22D),
|
||||
("blake2b-368", 0xB22E),
|
||||
("blake2b-376", 0xB22F),
|
||||
("blake2b-384", 0xB230),
|
||||
("blake2b-392", 0xB231),
|
||||
("blake2b-400", 0xB232),
|
||||
("blake2b-408", 0xB233),
|
||||
("blake2b-416", 0xB234),
|
||||
("blake2b-424", 0xB235),
|
||||
("blake2b-432", 0xB236),
|
||||
("blake2b-440", 0xB237),
|
||||
("blake2b-448", 0xB238),
|
||||
("blake2b-456", 0xB239),
|
||||
("blake2b-464", 0xB23A),
|
||||
("blake2b-472", 0xB23B),
|
||||
("blake2b-480", 0xB23C),
|
||||
("blake2b-488", 0xB23D),
|
||||
("blake2b-496", 0xB23E),
|
||||
("blake2b-504", 0xB23F),
|
||||
("blake2b-512", 0xB240),
|
||||
("blake2s-8", 0xB241),
|
||||
("blake2s-16", 0xB242),
|
||||
("blake2s-24", 0xB243),
|
||||
("blake2s-32", 0xB244),
|
||||
("blake2s-40", 0xB245),
|
||||
("blake2s-48", 0xB246),
|
||||
("blake2s-56", 0xB247),
|
||||
("blake2s-64", 0xB248),
|
||||
("blake2s-72", 0xB249),
|
||||
("blake2s-80", 0xB24A),
|
||||
("blake2s-88", 0xB24B),
|
||||
("blake2s-96", 0xB24C),
|
||||
("blake2s-104", 0xB24D),
|
||||
("blake2s-112", 0xB24E),
|
||||
("blake2s-120", 0xB24F),
|
||||
("blake2s-128", 0xB250),
|
||||
("blake2s-136", 0xB251),
|
||||
("blake2s-144", 0xB252),
|
||||
("blake2s-152", 0xB253),
|
||||
("blake2s-160", 0xB254),
|
||||
("blake2s-168", 0xB255),
|
||||
("blake2s-176", 0xB256),
|
||||
("blake2s-184", 0xB257),
|
||||
("blake2s-192", 0xB258),
|
||||
("blake2s-200", 0xB259),
|
||||
("blake2s-208", 0xB25A),
|
||||
("blake2s-216", 0xB25B),
|
||||
("blake2s-224", 0xB25C),
|
||||
("blake2s-232", 0xB25D),
|
||||
("blake2s-240", 0xB25E),
|
||||
("blake2s-248", 0xB25F),
|
||||
("blake2s-256", 0xB260),
|
||||
("skein256-8", 0xB301),
|
||||
("skein256-16", 0xB302),
|
||||
("skein256-24", 0xB303),
|
||||
("skein256-32", 0xB304),
|
||||
("skein256-40", 0xB305),
|
||||
("skein256-48", 0xB306),
|
||||
("skein256-56", 0xB307),
|
||||
("skein256-64", 0xB308),
|
||||
("skein256-72", 0xB309),
|
||||
("skein256-80", 0xB30A),
|
||||
("skein256-88", 0xB30B),
|
||||
("skein256-96", 0xB30C),
|
||||
("skein256-104", 0xB30D),
|
||||
("skein256-112", 0xB30E),
|
||||
("skein256-120", 0xB30F),
|
||||
("skein256-128", 0xB310),
|
||||
("skein256-136", 0xB311),
|
||||
("skein256-144", 0xB312),
|
||||
("skein256-152", 0xB313),
|
||||
("skein256-160", 0xB314),
|
||||
("skein256-168", 0xB315),
|
||||
("skein256-176", 0xB316),
|
||||
("skein256-184", 0xB317),
|
||||
("skein256-192", 0xB318),
|
||||
("skein256-200", 0xB319),
|
||||
("skein256-208", 0xB31A),
|
||||
("skein256-216", 0xB31B),
|
||||
("skein256-224", 0xB31C),
|
||||
("skein256-232", 0xB31D),
|
||||
("skein256-240", 0xB31E),
|
||||
("skein256-248", 0xB31F),
|
||||
("skein256-256", 0xB320),
|
||||
("skein512-8", 0xB321),
|
||||
("skein512-16", 0xB322),
|
||||
("skein512-24", 0xB323),
|
||||
("skein512-32", 0xB324),
|
||||
("skein512-40", 0xB325),
|
||||
("skein512-48", 0xB326),
|
||||
("skein512-56", 0xB327),
|
||||
("skein512-64", 0xB328),
|
||||
("skein512-72", 0xB329),
|
||||
("skein512-80", 0xB32A),
|
||||
("skein512-88", 0xB32B),
|
||||
("skein512-96", 0xB32C),
|
||||
("skein512-104", 0xB32D),
|
||||
("skein512-112", 0xB32E),
|
||||
("skein512-120", 0xB32F),
|
||||
("skein512-128", 0xB330),
|
||||
("skein512-136", 0xB331),
|
||||
("skein512-144", 0xB332),
|
||||
("skein512-152", 0xB333),
|
||||
("skein512-160", 0xB334),
|
||||
("skein512-168", 0xB335),
|
||||
("skein512-176", 0xB336),
|
||||
("skein512-184", 0xB337),
|
||||
("skein512-192", 0xB338),
|
||||
("skein512-200", 0xB339),
|
||||
("skein512-208", 0xB33A),
|
||||
("skein512-216", 0xB33B),
|
||||
("skein512-224", 0xB33C),
|
||||
("skein512-232", 0xB33D),
|
||||
("skein512-240", 0xB33E),
|
||||
("skein512-248", 0xB33F),
|
||||
("skein512-256", 0xB340),
|
||||
("skein512-264", 0xB341),
|
||||
("skein512-272", 0xB342),
|
||||
("skein512-280", 0xB343),
|
||||
("skein512-288", 0xB344),
|
||||
("skein512-296", 0xB345),
|
||||
("skein512-304", 0xB346),
|
||||
("skein512-312", 0xB347),
|
||||
("skein512-320", 0xB348),
|
||||
("skein512-328", 0xB349),
|
||||
("skein512-336", 0xB34A),
|
||||
("skein512-344", 0xB34B),
|
||||
("skein512-352", 0xB34C),
|
||||
("skein512-360", 0xB34D),
|
||||
("skein512-368", 0xB34E),
|
||||
("skein512-376", 0xB34F),
|
||||
("skein512-384", 0xB350),
|
||||
("skein512-392", 0xB351),
|
||||
("skein512-400", 0xB352),
|
||||
("skein512-408", 0xB353),
|
||||
("skein512-416", 0xB354),
|
||||
("skein512-424", 0xB355),
|
||||
("skein512-432", 0xB356),
|
||||
("skein512-440", 0xB357),
|
||||
("skein512-448", 0xB358),
|
||||
("skein512-456", 0xB359),
|
||||
("skein512-464", 0xB35A),
|
||||
("skein512-472", 0xB35B),
|
||||
("skein512-480", 0xB35C),
|
||||
("skein512-488", 0xB35D),
|
||||
("skein512-496", 0xB35E),
|
||||
("skein512-504", 0xB35F),
|
||||
("skein512-512", 0xB360),
|
||||
("skein1024-8", 0xB361),
|
||||
("skein1024-16", 0xB362),
|
||||
("skein1024-24", 0xB363),
|
||||
("skein1024-32", 0xB364),
|
||||
("skein1024-40", 0xB365),
|
||||
("skein1024-48", 0xB366),
|
||||
("skein1024-56", 0xB367),
|
||||
("skein1024-64", 0xB368),
|
||||
("skein1024-72", 0xB369),
|
||||
("skein1024-80", 0xB36A),
|
||||
("skein1024-88", 0xB36B),
|
||||
("skein1024-96", 0xB36C),
|
||||
("skein1024-104", 0xB36D),
|
||||
("skein1024-112", 0xB36E),
|
||||
("skein1024-120", 0xB36F),
|
||||
("skein1024-128", 0xB370),
|
||||
("skein1024-136", 0xB371),
|
||||
("skein1024-144", 0xB372),
|
||||
("skein1024-152", 0xB373),
|
||||
("skein1024-160", 0xB374),
|
||||
("skein1024-168", 0xB375),
|
||||
("skein1024-176", 0xB376),
|
||||
("skein1024-184", 0xB377),
|
||||
("skein1024-192", 0xB378),
|
||||
("skein1024-200", 0xB379),
|
||||
("skein1024-208", 0xB37A),
|
||||
("skein1024-216", 0xB37B),
|
||||
("skein1024-224", 0xB37C),
|
||||
("skein1024-232", 0xB37D),
|
||||
("skein1024-240", 0xB37E),
|
||||
("skein1024-248", 0xB37F),
|
||||
("skein1024-256", 0xB380),
|
||||
("skein1024-264", 0xB381),
|
||||
("skein1024-272", 0xB382),
|
||||
("skein1024-280", 0xB383),
|
||||
("skein1024-288", 0xB384),
|
||||
("skein1024-296", 0xB385),
|
||||
("skein1024-304", 0xB386),
|
||||
("skein1024-312", 0xB387),
|
||||
("skein1024-320", 0xB388),
|
||||
("skein1024-328", 0xB389),
|
||||
("skein1024-336", 0xB38A),
|
||||
("skein1024-344", 0xB38B),
|
||||
("skein1024-352", 0xB38C),
|
||||
("skein1024-360", 0xB38D),
|
||||
("skein1024-368", 0xB38E),
|
||||
("skein1024-376", 0xB38F),
|
||||
("skein1024-384", 0xB390),
|
||||
("skein1024-392", 0xB391),
|
||||
("skein1024-400", 0xB392),
|
||||
("skein1024-408", 0xB393),
|
||||
("skein1024-416", 0xB394),
|
||||
("skein1024-424", 0xB395),
|
||||
("skein1024-432", 0xB396),
|
||||
("skein1024-440", 0xB397),
|
||||
("skein1024-448", 0xB398),
|
||||
("skein1024-456", 0xB399),
|
||||
("skein1024-464", 0xB39A),
|
||||
("skein1024-472", 0xB39B),
|
||||
("skein1024-480", 0xB39C),
|
||||
("skein1024-488", 0xB39D),
|
||||
("skein1024-496", 0xB39E),
|
||||
("skein1024-504", 0xB39F),
|
||||
("skein1024-512", 0xB3A0),
|
||||
("skein1024-520", 0xB3A1),
|
||||
("skein1024-528", 0xB3A2),
|
||||
("skein1024-536", 0xB3A3),
|
||||
("skein1024-544", 0xB3A4),
|
||||
("skein1024-552", 0xB3A5),
|
||||
("skein1024-560", 0xB3A6),
|
||||
("skein1024-568", 0xB3A7),
|
||||
("skein1024-576", 0xB3A8),
|
||||
("skein1024-584", 0xB3A9),
|
||||
("skein1024-592", 0xB3AA),
|
||||
("skein1024-600", 0xB3AB),
|
||||
("skein1024-608", 0xB3AC),
|
||||
("skein1024-616", 0xB3AD),
|
||||
("skein1024-624", 0xB3AE),
|
||||
("skein1024-632", 0xB3AF),
|
||||
("skein1024-640", 0xB3B0),
|
||||
("skein1024-648", 0xB3B1),
|
||||
("skein1024-656", 0xB3B2),
|
||||
("skein1024-664", 0xB3B3),
|
||||
("skein1024-672", 0xB3B4),
|
||||
("skein1024-680", 0xB3B5),
|
||||
("skein1024-688", 0xB3B6),
|
||||
("skein1024-696", 0xB3B7),
|
||||
("skein1024-704", 0xB3B8),
|
||||
("skein1024-712", 0xB3B9),
|
||||
("skein1024-720", 0xB3BA),
|
||||
("skein1024-728", 0xB3BB),
|
||||
("skein1024-736", 0xB3BC),
|
||||
("skein1024-744", 0xB3BD),
|
||||
("skein1024-752", 0xB3BE),
|
||||
("skein1024-760", 0xB3BF),
|
||||
("skein1024-768", 0xB3C0),
|
||||
("skein1024-776", 0xB3C1),
|
||||
("skein1024-784", 0xB3C2),
|
||||
("skein1024-792", 0xB3C3),
|
||||
("skein1024-800", 0xB3C4),
|
||||
("skein1024-808", 0xB3C5),
|
||||
("skein1024-816", 0xB3C6),
|
||||
("skein1024-824", 0xB3C7),
|
||||
("skein1024-832", 0xB3C8),
|
||||
("skein1024-840", 0xB3C9),
|
||||
("skein1024-848", 0xB3CA),
|
||||
("skein1024-856", 0xB3CB),
|
||||
("skein1024-864", 0xB3CC),
|
||||
("skein1024-872", 0xB3CD),
|
||||
("skein1024-880", 0xB3CE),
|
||||
("skein1024-888", 0xB3CF),
|
||||
("skein1024-896", 0xB3D0),
|
||||
("skein1024-904", 0xB3D1),
|
||||
("skein1024-912", 0xB3D2),
|
||||
("skein1024-920", 0xB3D3),
|
||||
("skein1024-928", 0xB3D4),
|
||||
("skein1024-936", 0xB3D5),
|
||||
("skein1024-944", 0xB3D6),
|
||||
("skein1024-952", 0xB3D7),
|
||||
("skein1024-960", 0xB3D8),
|
||||
("skein1024-968", 0xB3D9),
|
||||
("skein1024-976", 0xB3DA),
|
||||
("skein1024-984", 0xB3DB),
|
||||
("skein1024-992", 0xB3DC),
|
||||
("skein1024-1000", 0xB3DD),
|
||||
("skein1024-1008", 0xB3DE),
|
||||
("skein1024-1016", 0xB3DF),
|
||||
("skein1024-1024", 0xB3E0),
|
||||
# multiaddrs
|
||||
("ip4", 0x04),
|
||||
@@ -193,6 +387,7 @@ const MultiCodecList = [
|
||||
("https", 0x01BB),
|
||||
("tls", 0x01C0),
|
||||
("quic", 0x01CC),
|
||||
("quic-v1", 0x01CD),
|
||||
("ws", 0x01DD),
|
||||
("wss", 0x01DE),
|
||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||
@@ -202,6 +397,7 @@ const MultiCodecList = [
|
||||
("onion3", 0x01BD),
|
||||
("p2p-circuit", 0x0122),
|
||||
("libp2p-peer-record", 0x0301),
|
||||
("memory", 0x0309),
|
||||
("dns", 0x35),
|
||||
("dns4", 0x36),
|
||||
("dns6", 0x37),
|
||||
@@ -209,6 +405,7 @@ const MultiCodecList = [
|
||||
# IPLD formats
|
||||
("dag-pb", 0x70),
|
||||
("dag-cbor", 0x71),
|
||||
("libp2p-key", 0x72),
|
||||
("dag-json", 0x129),
|
||||
("git-raw", 0x78),
|
||||
("eth-block", 0x90),
|
||||
@@ -232,7 +429,7 @@ const MultiCodecList = [
|
||||
("dash-tx", 0xF1),
|
||||
("torrent-info", 0x7B),
|
||||
("torrent-file", 0x7C),
|
||||
("ed25519-pub", 0xED)
|
||||
("ed25519-pub", 0xED),
|
||||
]
|
||||
|
||||
type
|
||||
@@ -240,8 +437,7 @@ type
|
||||
MultiCodecError* = enum
|
||||
MultiCodecNotSupported
|
||||
|
||||
const
|
||||
InvalidMultiCodec* = MultiCodec(-1)
|
||||
const InvalidMultiCodec* = MultiCodec(-1)
|
||||
|
||||
proc initMultiCodecNameTable(): Table[string, int] {.compileTime.} =
|
||||
for item in MultiCodecList:
|
||||
@@ -288,10 +484,6 @@ proc `==`*(a, b: MultiCodec): bool =
|
||||
## Returns ``true`` if MultiCodecs ``a`` and ``b`` are equal.
|
||||
int(a) == int(b)
|
||||
|
||||
proc `!=`*(a, b: MultiCodec): bool =
|
||||
## Returns ``true`` if MultiCodecs ``a`` and ``b`` are not equal.
|
||||
int(a) != int(b)
|
||||
|
||||
proc hash*(m: MultiCodec): Hash {.inline.} =
|
||||
## Hash procedure for tables.
|
||||
hash(int(m))
|
||||
|
||||
@@ -22,12 +22,13 @@
|
||||
## 2. MURMUR
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
import varint, vbuffer, multicodec, multibase
|
||||
import stew/base58
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
# This is workaround for Nim `import` bug.
|
||||
export sha, sha2, keccak, blake2, hash, utils
|
||||
@@ -41,8 +42,9 @@ const
|
||||
ErrParseError = "Parse error fromHex"
|
||||
|
||||
type
|
||||
MHashCoderProc* = proc(data: openArray[byte],
|
||||
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
MHashCoderProc* = proc(data: openArray[byte], output: var openArray[byte]) {.
|
||||
nimcall, gcsafe, noSideEffect, raises: []
|
||||
.}
|
||||
MHash* = object
|
||||
mcodec*: MultiCodec
|
||||
size*: int
|
||||
@@ -58,107 +60,152 @@ type
|
||||
|
||||
proc identhash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var length = if len(data) > len(output): len(output)
|
||||
else: len(data)
|
||||
var length =
|
||||
if len(data) > len(output):
|
||||
len(output)
|
||||
else:
|
||||
len(data)
|
||||
copyMem(addr output[0], unsafeAddr data[0], length)
|
||||
|
||||
proc sha1hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha1.digest(data)
|
||||
var length = if sha1.sizeDigest > len(output): len(output)
|
||||
else: sha1.sizeDigest
|
||||
var length =
|
||||
if sha1.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha1.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc dblsha2_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest1 = sha256.digest(data)
|
||||
var digest2 = sha256.digest(digest1.data)
|
||||
var length = if sha256.sizeDigest > len(output): len(output)
|
||||
else: sha256.sizeDigest
|
||||
var length =
|
||||
if sha256.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha256.sizeDigest
|
||||
copyMem(addr output[0], addr digest2.data[0], length)
|
||||
|
||||
proc blake2Bhash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = blake2_512.digest(data)
|
||||
var length = if blake2_512.sizeDigest > len(output): len(output)
|
||||
else: blake2_512.sizeDigest
|
||||
var length =
|
||||
if blake2_512.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
blake2_512.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc blake2Shash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = blake2_256.digest(data)
|
||||
var length = if blake2_256.sizeDigest > len(output): len(output)
|
||||
else: blake2_256.sizeDigest
|
||||
var length =
|
||||
if blake2_256.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
blake2_256.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha2_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha256.digest(data)
|
||||
var length = if sha256.sizeDigest > len(output): len(output)
|
||||
else: sha256.sizeDigest
|
||||
var length =
|
||||
if sha256.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha256.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha2_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha512.digest(data)
|
||||
var length = if sha512.sizeDigest > len(output): len(output)
|
||||
else: sha512.sizeDigest
|
||||
var length =
|
||||
if sha512.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha512.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha3_224hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha3_224.digest(data)
|
||||
var length = if sha3_224.sizeDigest > len(output): len(output)
|
||||
else: sha3_224.sizeDigest
|
||||
var length =
|
||||
if sha3_224.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha3_224.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha3_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha3_256.digest(data)
|
||||
var length = if sha3_256.sizeDigest > len(output): len(output)
|
||||
else: sha3_256.sizeDigest
|
||||
var length =
|
||||
if sha3_256.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha3_256.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha3_384hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha3_384.digest(data)
|
||||
var length = if sha3_384.sizeDigest > len(output): len(output)
|
||||
else: sha3_384.sizeDigest
|
||||
var length =
|
||||
if sha3_384.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha3_384.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc sha3_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = sha3_512.digest(data)
|
||||
var length = if sha3_512.sizeDigest > len(output): len(output)
|
||||
else: sha3_512.sizeDigest
|
||||
var length =
|
||||
if sha3_512.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
sha3_512.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc keccak_224hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = keccak224.digest(data)
|
||||
var length = if keccak224.sizeDigest > len(output): len(output)
|
||||
else: keccak224.sizeDigest
|
||||
var length =
|
||||
if keccak224.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
keccak224.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc keccak_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = keccak256.digest(data)
|
||||
var length = if keccak256.sizeDigest > len(output): len(output)
|
||||
else: keccak256.sizeDigest
|
||||
var length =
|
||||
if keccak256.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
keccak256.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc keccak_384hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = keccak384.digest(data)
|
||||
var length = if keccak384.sizeDigest > len(output): len(output)
|
||||
else: keccak384.sizeDigest
|
||||
var length =
|
||||
if keccak384.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
keccak384.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc keccak_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = keccak512.digest(data)
|
||||
var length = if keccak512.sizeDigest > len(output): len(output)
|
||||
else: keccak512.sizeDigest
|
||||
var length =
|
||||
if keccak512.sizeDigest > len(output):
|
||||
len(output)
|
||||
else:
|
||||
keccak512.sizeDigest
|
||||
copyMem(addr output[0], addr digest.data[0], length)
|
||||
|
||||
proc shake_128hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
@@ -179,151 +226,135 @@ proc shake_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||
discard sctx.output(addr output[0], uint(len(output)))
|
||||
sctx.clear()
|
||||
|
||||
const
|
||||
HashesList = [
|
||||
MHash(mcodec: multiCodec("identity"), size: 0,
|
||||
coder: identhash),
|
||||
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest,
|
||||
coder: sha1hash),
|
||||
MHash(mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest,
|
||||
coder: dblsha2_256hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest,
|
||||
coder: sha2_256hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest,
|
||||
coder: sha2_512hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest,
|
||||
coder: sha3_224hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest,
|
||||
coder: sha3_256hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest,
|
||||
coder: sha3_384hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest,
|
||||
coder: sha3_512hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
|
||||
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
|
||||
MHash(mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest,
|
||||
coder: keccak_224hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest,
|
||||
coder: keccak_256hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest,
|
||||
coder: keccak_384hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest,
|
||||
coder: keccak_512hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash)
|
||||
]
|
||||
const HashesList = [
|
||||
MHash(mcodec: multiCodec("identity"), size: 0, coder: identhash),
|
||||
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest, coder: sha1hash),
|
||||
MHash(
|
||||
mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest, coder: dblsha2_256hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest, coder: sha2_256hash),
|
||||
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest, coder: sha2_512hash),
|
||||
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest, coder: sha3_224hash),
|
||||
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest, coder: sha3_256hash),
|
||||
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest, coder: sha3_384hash),
|
||||
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest, coder: sha3_512hash),
|
||||
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
|
||||
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
|
||||
MHash(
|
||||
mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest, coder: keccak_224hash
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest, coder: keccak_256hash
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest, coder: keccak_384hash
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest, coder: keccak_512hash
|
||||
),
|
||||
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
|
||||
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
|
||||
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash),
|
||||
]
|
||||
|
||||
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
||||
for item in HashesList:
|
||||
result[item.mcodec] = item
|
||||
|
||||
const
|
||||
CodeHashes = initMultiHashCodeTable()
|
||||
const CodeHashes = initMultiHashCodeTable()
|
||||
|
||||
proc digestImplWithHash(hash: MHash, data: openArray[byte]): MultiHash =
|
||||
var buffer: array[MaxHashSize, byte]
|
||||
@@ -353,8 +384,9 @@ proc digestImplWithoutHash(hash: MHash, data: openArray[byte]): MultiHash =
|
||||
result.data.writeArray(data)
|
||||
result.data.finish()
|
||||
|
||||
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
|
||||
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||
proc digest*(
|
||||
mhtype: typedesc[MultiHash], hashname: string, data: openArray[byte]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Perform digest calculation using hash algorithm with name ``hashname`` on
|
||||
## data array ``data``.
|
||||
let mc = MultiCodec.codec(hashname)
|
||||
@@ -367,8 +399,9 @@ proc digest*(mhtype: typedesc[MultiHash], hashname: string,
|
||||
else:
|
||||
ok(digestImplWithHash(hash, data))
|
||||
|
||||
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
|
||||
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||
proc digest*(
|
||||
mhtype: typedesc[MultiHash], hashcode: int, data: openArray[byte]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Perform digest calculation using hash algorithm with code ``hashcode`` on
|
||||
## data array ``data``.
|
||||
let hash = CodeHashes.getOrDefault(hashcode)
|
||||
@@ -377,8 +410,9 @@ proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
|
||||
else:
|
||||
ok(digestImplWithHash(hash, data))
|
||||
|
||||
proc init*[T](mhtype: typedesc[MultiHash], hashname: string,
|
||||
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
|
||||
proc init*[T](
|
||||
mhtype: typedesc[MultiHash], hashname: string, mdigest: MDigest[T]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from nimcrypto's `MDigest` object and hash algorithm name
|
||||
## ``hashname``.
|
||||
let mc = MultiCodec.codec(hashname)
|
||||
@@ -393,8 +427,9 @@ proc init*[T](mhtype: typedesc[MultiHash], hashname: string,
|
||||
else:
|
||||
ok(digestImplWithoutHash(hash, mdigest.data))
|
||||
|
||||
proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
|
||||
proc init*[T](
|
||||
mhtype: typedesc[MultiHash], hashcode: MultiCodec, mdigest: MDigest[T]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from nimcrypto's `MDigest` and hash algorithm code
|
||||
## ``hashcode``.
|
||||
let hash = CodeHashes.getOrDefault(hashcode)
|
||||
@@ -405,8 +440,9 @@ proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||
else:
|
||||
ok(digestImplWithoutHash(hash, mdigest.data))
|
||||
|
||||
proc init*(mhtype: typedesc[MultiHash], hashname: string,
|
||||
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||
proc init*(
|
||||
mhtype: typedesc[MultiHash], hashname: string, bdigest: openArray[byte]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
||||
## ``hashcode``.
|
||||
let mc = MultiCodec.codec(hashname)
|
||||
@@ -421,8 +457,9 @@ proc init*(mhtype: typedesc[MultiHash], hashname: string,
|
||||
else:
|
||||
ok(digestImplWithoutHash(hash, bdigest))
|
||||
|
||||
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||
proc init*(
|
||||
mhtype: typedesc[MultiHash], hashcode: MultiCodec, bdigest: openArray[byte]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
||||
## ``hashcode``.
|
||||
let hash = CodeHashes.getOrDefault(hashcode)
|
||||
@@ -433,8 +470,9 @@ proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||
else:
|
||||
ok(digestImplWithoutHash(hash, bdigest))
|
||||
|
||||
proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
|
||||
mhash: var MultiHash): MhResult[int] =
|
||||
proc decode*(
|
||||
mhtype: typedesc[MultiHash], data: openArray[byte], mhash: var MultiHash
|
||||
): MhResult[int] =
|
||||
## Decode MultiHash value from array of bytes ``data``.
|
||||
##
|
||||
## On success decoded MultiHash will be stored into ``mhash`` and number of
|
||||
@@ -473,9 +511,10 @@ proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
|
||||
if not vb.isEnough(int(size)):
|
||||
return err(ErrDecodeError)
|
||||
|
||||
mhash = ? MultiHash.init(MultiCodec(code),
|
||||
vb.buffer.toOpenArray(vb.offset,
|
||||
vb.offset + int(size) - 1))
|
||||
mhash =
|
||||
?MultiHash.init(
|
||||
MultiCodec(code), vb.buffer.toOpenArray(vb.offset, vb.offset + int(size) - 1)
|
||||
)
|
||||
ok(vb.offset + int(size))
|
||||
|
||||
proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
|
||||
@@ -508,27 +547,27 @@ proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
|
||||
return false
|
||||
result = true
|
||||
|
||||
proc init*(mhtype: typedesc[MultiHash],
|
||||
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||
proc init*(
|
||||
mhtype: typedesc[MultiHash], data: openArray[byte]
|
||||
): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from bytes array ``data``.
|
||||
var hash: MultiHash
|
||||
discard ? MultiHash.decode(data, hash)
|
||||
discard ?MultiHash.decode(data, hash)
|
||||
ok(hash)
|
||||
|
||||
proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inline.} =
|
||||
## Create MultiHash from hexadecimal string representation ``data``.
|
||||
var hash: MultiHash
|
||||
try:
|
||||
discard ? MultiHash.decode(fromHex(data), hash)
|
||||
discard ?MultiHash.decode(fromHex(data), hash)
|
||||
ok(hash)
|
||||
except ValueError:
|
||||
err(ErrParseError)
|
||||
|
||||
proc init58*(mhtype: typedesc[MultiHash],
|
||||
data: string): MultiHash {.inline.} =
|
||||
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
|
||||
## Create MultiHash from BASE58 encoded string representation ``data``.
|
||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
|
||||
|
||||
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
if len(a) != len(b):
|
||||
@@ -538,7 +577,7 @@ proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
while n > 0:
|
||||
dec(n)
|
||||
diff = int(a[n]) - int(b[n])
|
||||
res = (res and -not(diff)) or diff
|
||||
res = (res and - not (diff)) or diff
|
||||
result = (res == 0)
|
||||
|
||||
proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
|
||||
@@ -548,8 +587,10 @@ proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
|
||||
return false
|
||||
if len(mdigest.data) != mh.size:
|
||||
return false
|
||||
result = cmp(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
|
||||
mdigest.data.toOpenArray(0, mdigest.data.high))
|
||||
result = cmp(
|
||||
mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
|
||||
mdigest.data.toOpenArray(0, mdigest.data.high),
|
||||
)
|
||||
|
||||
proc `==`*[T](mdigest: MDigest[T], mh: MultiHash): bool {.inline.} =
|
||||
## Compares MultiHash with nimcrypto's MDigest[T], returns ``true`` if
|
||||
@@ -565,8 +606,10 @@ proc `==`*(a: MultiHash, b: MultiHash): bool =
|
||||
return false
|
||||
if a.size != b.size:
|
||||
return false
|
||||
result = cmp(a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
|
||||
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1))
|
||||
result = cmp(
|
||||
a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
|
||||
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1),
|
||||
)
|
||||
|
||||
proc hex*(value: MultiHash): string =
|
||||
## Return hexadecimal string representation of MultiHash ``value``.
|
||||
@@ -578,16 +621,16 @@ proc base58*(value: MultiHash): string =
|
||||
|
||||
proc `$`*(mh: MultiHash): string =
|
||||
## Return string representation of MultiHash ``value``.
|
||||
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos,
|
||||
mh.dpos + mh.size - 1))
|
||||
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1))
|
||||
result = $(mh.mcodec) & "/" & digest
|
||||
|
||||
proc write*(vb: var VBuffer, mh: MultiHash) {.inline.} =
|
||||
## Write MultiHash value ``mh`` to buffer ``vb``.
|
||||
vb.writeArray(mh.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
mh: MultiHash): string {.inline.} =
|
||||
proc encode*(
|
||||
mbtype: typedesc[MultiBase], encoding: string, mh: MultiHash
|
||||
): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``mh`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, mh.data.buffer)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -11,8 +11,7 @@
|
||||
|
||||
import std/[strutils, sequtils, tables]
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import stream/connection,
|
||||
protocols/protocol
|
||||
import stream/connection, protocols/protocol
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multistream"
|
||||
@@ -25,7 +24,7 @@ const
|
||||
Ls = "ls\n"
|
||||
|
||||
type
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
|
||||
Matcher* = proc(proto: string): bool {.gcsafe, raises: [].}
|
||||
|
||||
MultiStreamError* = object of LPError
|
||||
|
||||
@@ -40,20 +39,17 @@ type
|
||||
codec*: string
|
||||
|
||||
proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
T(
|
||||
codec: Codec,
|
||||
)
|
||||
T(codec: Codec)
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise (ref MultiStreamError)(msg: "MultistreamSelect failed, malformed message")
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]):
|
||||
Future[string] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: seq[string]
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "initiating handshake", conn, codec = Codec
|
||||
## select a remote protocol
|
||||
await conn.writeLp(Codec & "\n") # write handshake
|
||||
@@ -66,7 +62,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
|
||||
if s != Codec:
|
||||
notice "handshake failed", conn, codec = s
|
||||
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
|
||||
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
|
||||
else:
|
||||
trace "multistream handshake success", conn
|
||||
|
||||
@@ -82,7 +78,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
return proto[0]
|
||||
elif proto.len > 1:
|
||||
# Try to negotiate alternatives
|
||||
let protos = proto[1..<proto.len()]
|
||||
let protos = proto[1 ..< proto.len()]
|
||||
trace "selecting one of several protos", conn, protos = protos
|
||||
for p in protos:
|
||||
trace "selecting proto", conn, proto = p
|
||||
@@ -98,19 +94,26 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
# No alternatives, fail
|
||||
return ""
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string): Future[bool] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: string
|
||||
): Future[bool] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
if proto.len > 0:
|
||||
return (await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
(await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
else:
|
||||
return (await MultistreamSelect.select(conn, @[])) == Codec
|
||||
(await MultistreamSelect.select(conn, @[])) == Codec
|
||||
|
||||
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
||||
proc select*(
|
||||
m: MultistreamSelect, conn: Connection
|
||||
): Future[bool] {.
|
||||
async: (raises: [CancelledError, LPStreamError, MultiStreamError], raw: true)
|
||||
.} =
|
||||
m.select(conn, "")
|
||||
|
||||
proc list*(m: MultistreamSelect,
|
||||
conn: Connection): Future[seq[string]] {.async.} =
|
||||
proc list*(
|
||||
m: MultistreamSelect, conn: Connection
|
||||
): Future[seq[string]] {.
|
||||
async: (raises: [CancelledError, LPStreamError, MultiStreamError])
|
||||
.} =
|
||||
## list remote protos requests on connection
|
||||
if not await m.select(conn):
|
||||
return
|
||||
@@ -126,12 +129,12 @@ proc list*(m: MultistreamSelect,
|
||||
result = list
|
||||
|
||||
proc handle*(
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
@@ -139,16 +142,17 @@ proc handle*(
|
||||
validateSuffix(ms)
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
debug "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
debug "expected handshake message", conn, instead = ms
|
||||
raise (ref MultiStreamError)(
|
||||
msg: "MultistreamSelect handling failed, invalid first message"
|
||||
)
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
trace "handle: invalid proto", conn
|
||||
await conn.writeLp(Na)
|
||||
|
||||
case ms:
|
||||
case ms
|
||||
of "ls":
|
||||
trace "handle: listing protos", conn
|
||||
#TODO this doens't seem to follow spec, each protocol
|
||||
@@ -160,8 +164,7 @@ proc handle*(
|
||||
await conn.writeLp(Codec & "\n")
|
||||
handshaked = true
|
||||
else:
|
||||
trace "handle: sending `na` for duplicate handshake while handshaked",
|
||||
conn
|
||||
trace "handle: sending `na` for duplicate handshake while handshaked", conn
|
||||
await conn.writeLp(Na)
|
||||
elif ms in protos or matchers.anyIt(it(ms)):
|
||||
trace "found handler", conn, protocol = ms
|
||||
@@ -172,14 +175,15 @@ proc handle*(
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
proc handle*(
|
||||
m: MultistreamSelect, conn: Connection, active: bool = false
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
if not isNil(h.match):
|
||||
if h.match != nil:
|
||||
matchers.add(h.match)
|
||||
for proto in h.protos:
|
||||
protos.add(proto)
|
||||
@@ -187,7 +191,7 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
||||
try:
|
||||
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
|
||||
var protocolHolder = h
|
||||
@@ -208,43 +212,64 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Exception in multistream", conn, msg = exc.msg
|
||||
trace "Exception in multistream", conn, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
trace "Stopped multistream handler", conn
|
||||
|
||||
proc addHandler*(m: MultistreamSelect,
|
||||
codecs: seq[string],
|
||||
protocol: LPProtocol,
|
||||
matcher: Matcher = nil) =
|
||||
proc addHandler*(
|
||||
m: MultistreamSelect,
|
||||
codecs: seq[string],
|
||||
protocol: LPProtocol,
|
||||
matcher: Matcher = nil,
|
||||
) =
|
||||
trace "registering protocols", protos = codecs
|
||||
m.handlers.add(HandlerHolder(protos: codecs,
|
||||
protocol: protocol,
|
||||
match: matcher))
|
||||
m.handlers.add(HandlerHolder(protos: codecs, protocol: protocol, match: matcher))
|
||||
|
||||
proc addHandler*(m: MultistreamSelect,
|
||||
codec: string,
|
||||
protocol: LPProtocol,
|
||||
matcher: Matcher = nil) =
|
||||
proc addHandler*(
|
||||
m: MultistreamSelect, codec: string, protocol: LPProtocol, matcher: Matcher = nil
|
||||
) =
|
||||
addHandler(m, @[codec], protocol, matcher)
|
||||
|
||||
proc addHandler*(m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler: LPProtoHandler,
|
||||
matcher: Matcher = nil) =
|
||||
proc addHandler*[E](
|
||||
m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler:
|
||||
LPProtoHandler |
|
||||
proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
|
||||
matcher: Matcher = nil,
|
||||
) =
|
||||
## helper to allow registering pure handlers
|
||||
trace "registering proto handler", proto = codec
|
||||
let protocol = new LPProtocol
|
||||
protocol.codec = codec
|
||||
protocol.handler = handler
|
||||
|
||||
m.handlers.add(HandlerHolder(protos: @[codec],
|
||||
protocol: protocol,
|
||||
match: matcher))
|
||||
m.handlers.add(HandlerHolder(protos: @[codec], protocol: protocol, match: matcher))
|
||||
|
||||
proc start*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.start()))
|
||||
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
let futs = m.handlers.mapIt(it.protocol.start())
|
||||
try:
|
||||
await allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
except CancelledError as exc:
|
||||
var pending: seq[Future[void].Raising([])]
|
||||
doAssert m.handlers.len == futs.len, "Handlers modified while starting"
|
||||
for i, fut in futs:
|
||||
if not fut.finished:
|
||||
pending.add fut.cancelAndWait()
|
||||
elif fut.completed:
|
||||
pending.add m.handlers[i].protocol.stop()
|
||||
else:
|
||||
static:
|
||||
doAssert typeof(fut).E is (CancelledError,)
|
||||
await noCancel allFutures(pending)
|
||||
raise exc
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.stop()))
|
||||
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
|
||||
let futs = m.handlers.mapIt(it.protocol.stop())
|
||||
await noCancel allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -10,29 +10,22 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/[chronos, chronicles, stew/byteutils]
|
||||
import ../../stream/connection,
|
||||
../../utility,
|
||||
../../varint,
|
||||
../../vbuffer,
|
||||
../muxer
|
||||
import ../../stream/connection, ../../utility, ../../varint, ../../vbuffer, ../muxer
|
||||
|
||||
logScope:
|
||||
topics = "libp2p mplexcoder"
|
||||
|
||||
type
|
||||
MessageType* {.pure.} = enum
|
||||
New,
|
||||
MsgIn,
|
||||
MsgOut,
|
||||
CloseIn,
|
||||
CloseOut,
|
||||
ResetIn,
|
||||
New
|
||||
MsgIn
|
||||
MsgOut
|
||||
CloseIn
|
||||
CloseOut
|
||||
ResetIn
|
||||
ResetOut
|
||||
|
||||
Msg* = tuple
|
||||
id: uint64
|
||||
msgType: MessageType
|
||||
data: seq[byte]
|
||||
Msg* = tuple[id: uint64, msgType: MessageType, data: seq[byte]]
|
||||
|
||||
InvalidMplexMsgType* = object of MuxerError
|
||||
|
||||
@@ -42,7 +35,9 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||
newException(InvalidMplexMsgType, "invalid message type")
|
||||
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
proc readMsg*(
|
||||
conn: Connection
|
||||
): Future[Msg] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
|
||||
let header = await conn.readVarint()
|
||||
trace "read header varint", varint = header, conn
|
||||
|
||||
@@ -55,10 +50,9 @@ proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
|
||||
return (header shr 3, MessageType(msgType), data)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection, id: uint64, msgType: MessageType, data: seq[byte] = @[]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
var
|
||||
left = data.len
|
||||
offset = 0
|
||||
@@ -66,8 +60,11 @@ proc writeMsg*(conn: Connection,
|
||||
|
||||
# Split message into length-prefixed chunks
|
||||
while left > 0 or data.len == 0:
|
||||
let
|
||||
chunkSize = if left > MaxMsgSize: MaxMsgSize - 64 else: left
|
||||
let chunkSize =
|
||||
if left > MaxMsgSize:
|
||||
MaxMsgSize - 64
|
||||
else:
|
||||
left
|
||||
|
||||
buf.writePBVarint(id shl 3 or ord(msgType).uint64)
|
||||
buf.writeSeq(data.toOpenArray(offset, offset + chunkSize - 1))
|
||||
@@ -84,8 +81,7 @@ proc writeMsg*(conn: Connection,
|
||||
# message gets written before some of the chunks
|
||||
conn.write(buf.buffer)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection, id: uint64, msgType: MessageType, data: string
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
conn.writeMsg(id, msgType, data.toBytes())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -11,10 +11,7 @@
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import ./coder,
|
||||
../muxer,
|
||||
../../stream/[bufferstream, connection, streamseq],
|
||||
../../peerinfo
|
||||
import ./coder, ../muxer, ../../stream/[bufferstream, connection], ../../peerinfo
|
||||
|
||||
export connection
|
||||
|
||||
@@ -22,13 +19,15 @@ logScope:
|
||||
topics = "libp2p mplexchannel"
|
||||
|
||||
when defined(libp2p_mplex_metrics):
|
||||
declareHistogram libp2p_mplex_qlen, "message queue length",
|
||||
declareHistogram libp2p_mplex_qlen,
|
||||
"message queue length",
|
||||
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
|
||||
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
|
||||
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
||||
declareCounter libp2p_protocols_bytes,
|
||||
"total sent or received bytes", ["protocol", "direction"]
|
||||
|
||||
## Channel half-closed states
|
||||
##
|
||||
@@ -42,38 +41,41 @@ when defined(libp2p_network_protocols_metrics):
|
||||
## EOF marker
|
||||
|
||||
const
|
||||
MaxWrites = 1024 ##\
|
||||
MaxWrites = 1024
|
||||
##\
|
||||
## Maximum number of in-flight writes - after this, we disconnect the peer
|
||||
|
||||
LPChannelTrackerName* = "LPChannel"
|
||||
|
||||
type
|
||||
LPChannel* = ref object of BufferStream
|
||||
id*: uint64 # channel id
|
||||
name*: string # name of the channel (for debugging)
|
||||
conn*: Connection # wrapped connection used to for writing
|
||||
initiator*: bool # initiated remotely or locally flag
|
||||
isOpen*: bool # has channel been opened
|
||||
closedLocal*: bool # has channel been closed locally
|
||||
remoteReset*: bool # has channel been remotely reset
|
||||
localReset*: bool # has channel been reset locally
|
||||
msgCode*: MessageType # cached in/out message code
|
||||
closeCode*: MessageType # cached in/out close code
|
||||
resetCode*: MessageType # cached in/out reset code
|
||||
writes*: int # In-flight writes
|
||||
type LPChannel* = ref object of BufferStream
|
||||
id*: uint64 # channel id
|
||||
name*: string # name of the channel (for debugging)
|
||||
conn*: Connection # wrapped connection used to for writing
|
||||
initiator*: bool # initiated remotely or locally flag
|
||||
isOpen*: bool # has channel been opened
|
||||
closedLocal*: bool # has channel been closed locally
|
||||
remoteReset*: bool # has channel been remotely reset
|
||||
localReset*: bool # has channel been reset locally
|
||||
msgCode*: MessageType # cached in/out message code
|
||||
closeCode*: MessageType # cached in/out close code
|
||||
resetCode*: MessageType # cached in/out reset code
|
||||
writes*: int # In-flight writes
|
||||
|
||||
func shortLog*(s: LPChannel): auto =
|
||||
try:
|
||||
if s.isNil: "LPChannel(nil)"
|
||||
if s == nil:
|
||||
"LPChannel(nil)"
|
||||
elif s.name != $s.oid and s.name.len > 0:
|
||||
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
|
||||
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
|
||||
else:
|
||||
&"{shortLog(s.conn.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(LPChannel): shortLog(it)
|
||||
chronicles.formatIt(LPChannel):
|
||||
shortLog(it)
|
||||
|
||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Opening channel", s, conn = s.conn
|
||||
if s.conn.isClosed:
|
||||
return
|
||||
@@ -82,20 +84,20 @@ proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
s.isOpen = true
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
|
||||
## Channels may be closed for reading and writing in any order - we'll close
|
||||
## the underlying bufferstream when both directions are closed
|
||||
if s.closedLocal and s.atEof():
|
||||
await procCall BufferStream(s).close()
|
||||
|
||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc reset*(s: LPChannel) {.async: (raises: []).} =
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
@@ -108,22 +110,21 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
# If the connection is still active, notify the other end
|
||||
proc resetMessage() {.async.} =
|
||||
proc resetMessage() {.async: (raises: []).} =
|
||||
try:
|
||||
trace "sending reset message", s, conn = s.conn
|
||||
await s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except CatchableError as exc:
|
||||
# No cancellations
|
||||
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except LPStreamError as exc:
|
||||
trace "Can't send reset message", s, conn = s.conn, description = exc.msg
|
||||
await s.conn.close()
|
||||
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
||||
|
||||
asyncSpawn resetMessage()
|
||||
|
||||
await s.closeImpl() # noraises, nocancels
|
||||
await s.closeImpl()
|
||||
|
||||
trace "Channel reset", s
|
||||
|
||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
method close*(s: LPChannel) {.async: (raises: []).} =
|
||||
## Close channel for writing - a message will be sent to the other peer
|
||||
## informing them that the channel is closed and that we're waiting for
|
||||
## their acknowledgement.
|
||||
@@ -137,38 +138,41 @@ method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
try:
|
||||
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
||||
except CancelledError as exc:
|
||||
except CancelledError:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
# It's harmless that close message cannot be sent - the connection is
|
||||
# likely down already
|
||||
await s.conn.close()
|
||||
trace "Cannot send close message", s, id = s.id, msg = exc.msg
|
||||
trace "Cannot send close message", s, id = s.id, description = exc.msg
|
||||
|
||||
await s.closeUnderlying() # maybe already eofed
|
||||
|
||||
trace "Closed channel", s, len = s.len
|
||||
|
||||
method closeWrite*(s: LPChannel) {.async: (raises: []).} =
|
||||
## For mplex, closeWrite is the same as close - it implements half-close
|
||||
await s.close()
|
||||
|
||||
method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting LPChannel", s
|
||||
s.reset()
|
||||
|
||||
procCall BufferStream(s).initStream()
|
||||
|
||||
method readOnce*(s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: LPChannel, pbytes: pointer, nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Mplex relies on reading being done regularly from every channel, or all
|
||||
## channels are blocked - in particular, this means that reading from one
|
||||
## channel must not be done from within a callback / read handler of another
|
||||
## or the reads will lock each other.
|
||||
if s.remoteReset:
|
||||
trace "reset stream in readOnce", s
|
||||
raise newLPStreamResetError()
|
||||
if s.localReset:
|
||||
raise newLPStreamClosedError()
|
||||
@@ -180,24 +184,28 @@ method readOnce*(s: LPChannel,
|
||||
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
|
||||
libp2p_protocols_bytes.inc(bytes.int64, labelValues = [s.protocol, "in"])
|
||||
|
||||
trace "readOnce", s, bytes
|
||||
if bytes == 0:
|
||||
await s.closeUnderlying()
|
||||
return bytes
|
||||
except CatchableError as exc:
|
||||
# readOnce in BufferStream generally raises on EOF or cancellation - for
|
||||
# the former, resetting is harmless, for the latter it's necessary because
|
||||
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
||||
# use the channel any more
|
||||
except CancelledError as exc:
|
||||
await s.reset()
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
# Resetting is necessary because data has been lost in s.readBuf and
|
||||
# there's no way to gracefully recover / use the channel any more
|
||||
await s.reset()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
|
||||
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
proc prepareWrite(
|
||||
s: LPChannel, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
# prepareWrite is the slow path of writing a message - see conditions in
|
||||
# write
|
||||
if s.remoteReset:
|
||||
trace "stream is reset when prepareWrite", s
|
||||
raise newLPStreamResetError()
|
||||
if s.closedLocal:
|
||||
raise newLPStreamClosedError()
|
||||
@@ -211,7 +219,7 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
debug "Closing connection, too many in-flight writes on channel",
|
||||
s, conn = s.conn, writes = s.writes
|
||||
when defined(libp2p_mplex_metrics):
|
||||
libp2p_mplex_qlenclose.inc()
|
||||
libp2p_mplex_qlenclose.inc()
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
return
|
||||
@@ -222,7 +230,10 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
await s.conn.writeMsg(s.id, s.msgCode, msg)
|
||||
|
||||
proc completeWrite(
|
||||
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
|
||||
s: LPChannel,
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
||||
msgLen: int,
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
try:
|
||||
s.writes += 1
|
||||
|
||||
@@ -235,7 +246,9 @@ proc completeWrite(
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
|
||||
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
|
||||
# https://github.com/status-im/nim-metrics/issues/79
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues = [s.protocol, "out"])
|
||||
|
||||
s.activity = true
|
||||
except CancelledError as exc:
|
||||
@@ -247,20 +260,21 @@ proc completeWrite(
|
||||
raise exc
|
||||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
trace "exception in lpchannel write handler", s, description = exc.msg
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
finally:
|
||||
s.writes -= 1
|
||||
|
||||
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: LPChannel, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to mplex channel - there may be up to MaxWrite concurrent writes
|
||||
## pending after which the peer is disconnected
|
||||
|
||||
let
|
||||
closed = s.closedLocal or s.conn.closed
|
||||
let closed = s.closedLocal or s.conn.closed
|
||||
|
||||
let fut =
|
||||
if (not closed) and msg.len > 0 and s.writes < MaxWrites and s.isOpen:
|
||||
@@ -273,16 +287,17 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
|
||||
s.completeWrite(fut, msg.len)
|
||||
|
||||
method getWrapped*(s: LPChannel): Connection = s.conn
|
||||
method getWrapped*(s: LPChannel): Connection =
|
||||
s.conn
|
||||
|
||||
proc init*(
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout,
|
||||
): LPChannel =
|
||||
let chann = L(
|
||||
id: id,
|
||||
name: name,
|
||||
@@ -293,12 +308,17 @@ proc init*(
|
||||
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
|
||||
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
|
||||
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
|
||||
dir: if initiator: Direction.Out else: Direction.In)
|
||||
dir: if initiator: Direction.Out else: Direction.In,
|
||||
)
|
||||
|
||||
chann.initStream()
|
||||
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
|
||||
chann.name =
|
||||
if chann.name.len > 0:
|
||||
chann.name
|
||||
else:
|
||||
$chann.oid
|
||||
|
||||
trace "Created new lpchannel", s = chann, id, initiator
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -11,13 +11,14 @@
|
||||
|
||||
import tables, sequtils, oids
|
||||
import chronos, chronicles, stew/byteutils, metrics
|
||||
import ../muxer,
|
||||
../../stream/connection,
|
||||
../../stream/bufferstream,
|
||||
../../utility,
|
||||
../../peerinfo,
|
||||
./coder,
|
||||
./lpchannel
|
||||
import
|
||||
../muxer,
|
||||
../../stream/connection,
|
||||
../../stream/bufferstream,
|
||||
../../utility,
|
||||
../../peerinfo,
|
||||
./coder,
|
||||
./lpchannel
|
||||
|
||||
export muxer
|
||||
|
||||
@@ -26,12 +27,10 @@ logScope:
|
||||
|
||||
const MplexCodec* = "/mplex/6.7.0"
|
||||
|
||||
const
|
||||
MaxChannelCount = 200
|
||||
const MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareGauge(libp2p_mplex_channels,
|
||||
"mplex channels", labels = ["initiator", "peer"])
|
||||
declareGauge(libp2p_mplex_channels, "mplex channels", labels = ["initiator", "peer"])
|
||||
|
||||
type
|
||||
InvalidChannelIdError* = object of MuxerError
|
||||
@@ -48,7 +47,8 @@ type
|
||||
func shortLog*(m: Mplex): auto =
|
||||
shortLog(m.connection)
|
||||
|
||||
chronicles.formatIt(Mplex): shortLog(it)
|
||||
chronicles.formatIt(Mplex):
|
||||
shortLog(it)
|
||||
|
||||
proc newTooManyChannels(): ref TooManyChannels =
|
||||
newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
@@ -56,7 +56,7 @@ proc newTooManyChannels(): ref TooManyChannels =
|
||||
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
|
||||
newException(InvalidChannelIdError, "max allowed channel count exceeded")
|
||||
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
|
||||
## remove the local channel from the internal tables
|
||||
##
|
||||
try:
|
||||
@@ -67,34 +67,35 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_mplex_channels.set(
|
||||
m.channels[chann.initiator].len.int64,
|
||||
labelValues = [$chann.initiator, $m.connection.peerId])
|
||||
except CatchableError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||
labelValues = [$chann.initiator, $m.connection.peerId],
|
||||
)
|
||||
except CancelledError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, description = exc.msg
|
||||
|
||||
proc newStreamInternal*(m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel
|
||||
{.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
proc newStreamInternal*(
|
||||
m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration,
|
||||
): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
## create new channel/stream
|
||||
##
|
||||
let id = if initiator:
|
||||
m.currentId.inc(); m.currentId
|
||||
else: chanId
|
||||
let id =
|
||||
if initiator:
|
||||
m.currentId.inc()
|
||||
m.currentId
|
||||
else:
|
||||
chanId
|
||||
|
||||
if id in m.channels[initiator]:
|
||||
raise newInvalidChannelIdError()
|
||||
|
||||
result = LPChannel.init(
|
||||
id,
|
||||
m.connection,
|
||||
initiator,
|
||||
name,
|
||||
timeout = timeout)
|
||||
result = LPChannel.init(id, m.connection, initiator, name, timeout = timeout)
|
||||
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.localAddr = m.connection.localAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
@@ -108,21 +109,17 @@ proc newStreamInternal*(m: Mplex,
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_mplex_channels.set(
|
||||
m.channels[initiator].len.int64,
|
||||
labelValues = [$initiator, $m.connection.peerId])
|
||||
m.channels[initiator].len.int64, labelValues = [$initiator, $m.connection.peerId]
|
||||
)
|
||||
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||
await chann.reset()
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
method handle*(m: Mplex) {.async: (raises: []).} =
|
||||
trace "Starting mplex handler", m
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -150,7 +147,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
else:
|
||||
if m.channels[false].len > m.maxChannCount - 1:
|
||||
warn "too many channels created by remote peer",
|
||||
allowedMax = MaxChannelCount, m
|
||||
allowedMax = MaxChannelCount, m
|
||||
raise newTooManyChannels()
|
||||
|
||||
let name = string.fromBytes(data)
|
||||
@@ -158,60 +155,64 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
|
||||
trace "Processing channel message", m, channel, data = data.shortLog
|
||||
|
||||
case msgType:
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
case msgType
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
|
||||
if not isNil(m.streamHandler):
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
if m.streamHandler != nil:
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed",
|
||||
m, channel, len = data.len, description = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
except CancelledError:
|
||||
debug "Unexpected cancellation in mplex handler", m
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", m, msg = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
|
||||
trace "Stream EOF", m, description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in mplex read loop", m, description = exc.msg
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in mplex read loop", m, description = exc.msg
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped mplex handler", m
|
||||
|
||||
proc new*(M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
M(connection: conn,
|
||||
proc new*(
|
||||
M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
): Mplex =
|
||||
M(
|
||||
connection: conn,
|
||||
inChannTimeout: inTimeout,
|
||||
outChannTimeout: outTimeout,
|
||||
oid: genOid(),
|
||||
maxChannCount: maxChannCount)
|
||||
maxChannCount: maxChannCount,
|
||||
)
|
||||
|
||||
method newStream*(m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
method newStream*(
|
||||
m: Mplex, name: string = "", lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
|
||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||
|
||||
if not lazy:
|
||||
@@ -219,7 +220,7 @@ method newStream*(m: Mplex,
|
||||
|
||||
return Connection(channel)
|
||||
|
||||
method close*(m: Mplex) {.async, gcsafe.} =
|
||||
method close*(m: Mplex) {.async: (raises: []).} =
|
||||
if m.isClosed:
|
||||
trace "Already closed", m
|
||||
return
|
||||
@@ -247,6 +248,8 @@ method close*(m: Mplex) {.async, gcsafe.} =
|
||||
|
||||
trace "Closed mplex", m
|
||||
|
||||
method getStreams*(m: Mplex): seq[Connection] =
|
||||
for c in m.channels[false].values: result.add(c)
|
||||
for c in m.channels[true].values: result.add(c)
|
||||
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
|
||||
for c in m.channels[false].values:
|
||||
result.add(c)
|
||||
for c in m.channels[true].values:
|
||||
result.add(c)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -10,25 +10,23 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import ../stream/connection,
|
||||
../errors
|
||||
import ../stream/connection, ../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p muxer"
|
||||
|
||||
const
|
||||
DefaultChanTimeout* = 5.minutes
|
||||
const DefaultChanTimeout* = 5.minutes
|
||||
|
||||
type
|
||||
MuxerError* = object of LPError
|
||||
TooManyChannels* = object of MuxerError
|
||||
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
handler*: Future[void]
|
||||
handler*: Future[void].Raising([])
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
@@ -40,24 +38,38 @@ type
|
||||
codec*: string
|
||||
|
||||
func shortLog*(m: Muxer): auto =
|
||||
if isNil(m): "nil"
|
||||
else: shortLog(m.connection)
|
||||
chronicles.formatIt(Muxer): shortLog(it)
|
||||
if m == nil:
|
||||
"nil"
|
||||
else:
|
||||
shortLog(m.connection)
|
||||
|
||||
chronicles.formatIt(Muxer):
|
||||
shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
if not isNil(m.connection):
|
||||
method newStream*(
|
||||
m: Muxer, name: string = "", lazy: bool = false
|
||||
): Future[Connection] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
|
||||
.} =
|
||||
raiseAssert("[Muxer.newStream] abstract method not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
method setShortAgent*(m: Muxer, shortAgent: string) {.base, gcsafe.} =
|
||||
m.connection.shortAgent = shortAgent
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} =
|
||||
discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
T: typedesc[MuxerProvider], creator: MuxerConstructor, codec: string
|
||||
): T {.gcsafe.} =
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
|
||||
raiseAssert("[Muxer.getStreams] abstract method not implemented!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -11,8 +11,8 @@
|
||||
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
import ../muxer,
|
||||
../../stream/connection
|
||||
import ../muxer, ../../stream/connection
|
||||
import ../../utils/[zeroqueue, sequninit]
|
||||
|
||||
export muxer
|
||||
|
||||
@@ -22,18 +22,21 @@ logScope:
|
||||
const
|
||||
YamuxCodec* = "/yamux/1.0.0"
|
||||
YamuxVersion = 0.uint8
|
||||
DefaultWindowSize = 256000
|
||||
YamuxDefaultWindowSize* = 256000
|
||||
MaxSendQueueSize = 256000
|
||||
MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
|
||||
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareGauge libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"]
|
||||
declareHistogram libp2p_yamux_send_queue,
|
||||
"message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue,
|
||||
"message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
|
||||
type
|
||||
YamuxError* = object of CatchableError
|
||||
YamuxError* = object of MuxerError
|
||||
|
||||
MsgType = enum
|
||||
Data = 0x0
|
||||
@@ -48,9 +51,9 @@ type
|
||||
Rst
|
||||
|
||||
GoAwayStatus = enum
|
||||
NormalTermination = 0x0,
|
||||
ProtocolError = 0x1,
|
||||
InternalError = 0x2,
|
||||
NormalTermination = 0x0
|
||||
ProtocolError = 0x1
|
||||
InternalError = 0x2
|
||||
|
||||
YamuxHeader = object
|
||||
version: uint8
|
||||
@@ -59,212 +62,264 @@ type
|
||||
streamId: uint32
|
||||
length: uint32
|
||||
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
proc readHeader(
|
||||
conn: LPStream
|
||||
): Future[YamuxHeader] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
|
||||
var buffer: array[12, byte]
|
||||
await conn.readExactly(addr buffer[0], 12)
|
||||
|
||||
result.version = buffer[0]
|
||||
let flags = fromBytesBE(uint16, buffer[2..3])
|
||||
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16..15'u16:
|
||||
raise newException(YamuxError, "Wrong header")
|
||||
let flags = fromBytesBE(uint16, buffer[2 .. 3])
|
||||
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16 .. 15'u16:
|
||||
raise newException(YamuxError, "Wrong header")
|
||||
result.flags = cast[set[MsgFlags]](flags)
|
||||
result.streamId = fromBytesBE(uint32, buffer[4..7])
|
||||
result.length = fromBytesBE(uint32, buffer[8..11])
|
||||
result.streamId = fromBytesBE(uint32, buffer[4 .. 7])
|
||||
result.length = fromBytesBE(uint32, buffer[8 .. 11])
|
||||
return result
|
||||
|
||||
proc `$`(header: YamuxHeader): string =
|
||||
result = "{" & $header.msgType & ", "
|
||||
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
|
||||
result &= "streamId: " & $header.streamId & ", "
|
||||
result &= "length: " & $header.length & "}"
|
||||
"{" & $header.msgType & ", " & "{" &
|
||||
header.flags.foldl(
|
||||
if a != "":
|
||||
a & ", " & $b
|
||||
else:
|
||||
$b,
|
||||
"",
|
||||
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
|
||||
"}"
|
||||
|
||||
proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[0] = header.version
|
||||
result[1] = uint8(header.msgType)
|
||||
result[2..3] = toBytesBE(uint16(cast[uint8](header.flags))) # workaround https://github.com/nim-lang/Nim/issues/21789
|
||||
result[4..7] = toBytesBE(header.streamId)
|
||||
result[8..11] = toBytesBE(header.length)
|
||||
result[2 .. 3] = toBytesBE(uint16(cast[uint8](header.flags)))
|
||||
# workaround https://github.com/nim-lang/Nim/issues/21789
|
||||
result[4 .. 7] = toBytesBE(header.streamId)
|
||||
result[8 .. 11] = toBytesBE(header.length)
|
||||
|
||||
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
|
||||
proc write(
|
||||
conn: LPStream, header: YamuxHeader
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "write directly on stream", h = $header
|
||||
var buffer = header.encode()
|
||||
return conn.write(@buffer)
|
||||
conn.write(@buffer)
|
||||
|
||||
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.Ping,
|
||||
flags: {flag},
|
||||
length: pingData
|
||||
)
|
||||
T(version: YamuxVersion, msgType: MsgType.Ping, flags: {flag}, length: pingData)
|
||||
|
||||
proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.GoAway,
|
||||
length: uint32(status)
|
||||
)
|
||||
T(version: YamuxVersion, msgType: MsgType.GoAway, length: uint32(status))
|
||||
|
||||
proc data(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.Data,
|
||||
length: length,
|
||||
flags: flags,
|
||||
streamId: streamId
|
||||
streamId: streamId,
|
||||
)
|
||||
|
||||
proc windowUpdate(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader], streamId: uint32, delta: uint32, flags: set[MsgFlags] = {}
|
||||
): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.WindowUpdate,
|
||||
length: delta,
|
||||
flags: flags,
|
||||
streamId: streamId
|
||||
streamId: streamId,
|
||||
)
|
||||
|
||||
type
|
||||
ToSend = tuple
|
||||
ToSend = ref object
|
||||
data: seq[byte]
|
||||
sent: int
|
||||
fut: Future[void]
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError])
|
||||
|
||||
YamuxChannel* = ref object of Connection
|
||||
id: uint32
|
||||
recvWindow: int
|
||||
sendWindow: int
|
||||
maxRecvWindow: int
|
||||
maxSendQueueSize: int
|
||||
conn: Connection
|
||||
isSrc: bool
|
||||
opened: bool
|
||||
isSending: bool
|
||||
sendQueue: seq[ToSend]
|
||||
recvQueue: seq[byte]
|
||||
recvQueue: ZeroQueue
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: Future[void]
|
||||
closedRemotely: AsyncEvent
|
||||
closedLocally: bool
|
||||
receivedData: AsyncEvent
|
||||
returnedEof: bool
|
||||
|
||||
proc `$`(channel: YamuxChannel): string =
|
||||
result = if channel.conn.dir == Out: "=> " else: "<= "
|
||||
result &= $channel.id
|
||||
var s: seq[string] = @[]
|
||||
if channel.closedRemotely.done():
|
||||
if channel.closedRemotely.isSet():
|
||||
s.add("ClosedRemotely")
|
||||
if channel.closedLocally:
|
||||
s.add("ClosedLocally")
|
||||
if channel.isReset:
|
||||
s.add("Reset")
|
||||
if s.len > 0:
|
||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||
result &=
|
||||
" {" &
|
||||
s.foldl(
|
||||
if a != "":
|
||||
a & ", " & b
|
||||
else:
|
||||
b,
|
||||
"",
|
||||
) & "}"
|
||||
|
||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
||||
for (elem, sent, _) in channel.sendQueue:
|
||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
||||
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent
|
||||
##
|
||||
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent, but limit the size of big messages.
|
||||
##
|
||||
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||
# This value is arbitrary, it's not in the specs, it permits to store up to
|
||||
# 3 big messages if the peer is stalling.
|
||||
channel.sendQueue.foldl(
|
||||
a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0
|
||||
)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||
channel.closedRemotely.done():
|
||||
channel.closedRemotely.isSet():
|
||||
await procCall Connection(channel).closeImpl()
|
||||
|
||||
proc remoteClosed(channel: YamuxChannel) {.async.} =
|
||||
if not channel.closedRemotely.done():
|
||||
channel.closedRemotely.complete()
|
||||
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedRemotely.isSet():
|
||||
channel.closedRemotely.fire()
|
||||
await channel.actuallyClose()
|
||||
channel.isClosedRemotely = true
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedLocally:
|
||||
trace "Closing yamux channel locally", streamId = channel.id, conn = channel.conn
|
||||
channel.closedLocally = true
|
||||
|
||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
if not channel.isReset and channel.sendQueue.len == 0:
|
||||
try:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
await channel.actuallyClose()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
method closeWrite*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## For yamux, closeWrite is the same as close - it implements half-close
|
||||
await channel.close()
|
||||
|
||||
proc clearQueues(channel: YamuxChannel, error: ref LPStreamEOFError = nil) =
|
||||
for toSend in channel.sendQueue:
|
||||
if error.isNil():
|
||||
toSend.fut.complete()
|
||||
else:
|
||||
toSend.fut.fail(error)
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue.clear()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
# it receives the reset.
|
||||
if channel.isReset:
|
||||
return
|
||||
trace "Reset channel"
|
||||
channel.isReset = true
|
||||
channel.remoteReset = not isLocal
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue = @[]
|
||||
channel.clearQueues(newLPStreamEOFError())
|
||||
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||
except LPStreamEOFError as exc: discard
|
||||
except LPStreamClosedError as exc: discard
|
||||
if isLocal and not channel.isSending:
|
||||
try:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
await channel.close()
|
||||
if not channel.closedRemotely.done():
|
||||
if not channel.closedRemotely.isSet():
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
if not isLocal:
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||
# If the reset is remote, there's no reason to flush anything.
|
||||
channel.recvWindow = 0
|
||||
|
||||
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
proc updateRecvWindow(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Send to the peer a window update when the recvWindow is empty enough
|
||||
##
|
||||
# In order to avoid spamming a window update everytime a byte is read,
|
||||
# we send it everytime half of the maxRecvWindow is read.
|
||||
let inWindow = channel.recvWindow + channel.recvQueue.len
|
||||
if inWindow > channel.maxRecvWindow div 2:
|
||||
return
|
||||
|
||||
let delta = channel.maxRecvWindow - inWindow
|
||||
channel.recvWindow.inc(delta)
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
delta.uint32
|
||||
))
|
||||
channel.recvWindow.inc(delta.int)
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
method readOnce*(
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
channel: YamuxChannel, pbytes: pointer, nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Read from a yamux channel
|
||||
|
||||
if channel.isReset:
|
||||
raise if channel.remoteReset:
|
||||
raise
|
||||
if channel.remoteReset:
|
||||
trace "stream is remote reset when readOnce", channel = $channel
|
||||
newLPStreamResetError()
|
||||
elif channel.closedLocally:
|
||||
trace "stream is closed locally when readOnce", channel = $channel
|
||||
newLPStreamClosedError()
|
||||
else:
|
||||
trace "stream is down when readOnce", channel = $channel
|
||||
newLPStreamConnDownError()
|
||||
if channel.returnedEof:
|
||||
if channel.isEof:
|
||||
channel.clearQueues()
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
if channel.recvQueue.isEmpty():
|
||||
channel.receivedData.clear()
|
||||
await channel.closedRemotely or channel.receivedData.wait()
|
||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||
channel.returnedEof = true
|
||||
return 0
|
||||
let
|
||||
closedRemotelyFut = channel.closedRemotely.wait()
|
||||
receivedDataFut = channel.receivedData.wait()
|
||||
defer:
|
||||
if not closedRemotelyFut.finished():
|
||||
await closedRemotelyFut.cancelAndWait()
|
||||
if not receivedDataFut.finished():
|
||||
await receivedDataFut.cancelAndWait()
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
|
||||
channel.isEof = true
|
||||
channel.clearQueues()
|
||||
return
|
||||
0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead..^1]
|
||||
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
await channel.updateRecvWindow()
|
||||
channel.activity = true
|
||||
return toRead
|
||||
return consumed
|
||||
|
||||
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel, b: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.recvQueue.push(b)
|
||||
channel.receivedData.fire()
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
|
||||
@@ -273,221 +328,319 @@ proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
|
||||
channel.maxRecvWindow = maxRecvWindow
|
||||
|
||||
proc trySend(channel: YamuxChannel) {.async.} =
|
||||
proc sendLoop(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.isSending:
|
||||
return
|
||||
channel.isSending = true
|
||||
defer: channel.isSending = false
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
defer:
|
||||
channel.isSending = false
|
||||
|
||||
const NumBytesHeader = 12
|
||||
|
||||
while channel.sendQueue.len > 0:
|
||||
channel.sendQueue.keepItIf(not it.fut.finished())
|
||||
|
||||
if channel.sendWindow == 0:
|
||||
trace "send window empty"
|
||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
||||
currentQueueSize = channel.sendQueueBytes(true)
|
||||
try:
|
||||
await channel.reset(true)
|
||||
except CatchableError as exc:
|
||||
debug "failed to reset", msg=exc.msg
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
trace "channel send queue too big, resetting",
|
||||
maxSendQueueSize = channel.maxSendQueueSize,
|
||||
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||
await channel.reset(isLocal = true)
|
||||
break
|
||||
|
||||
let
|
||||
bytesAvailable = channel.sendQueueBytes()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
numBytesToSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
header = YamuxHeader.data(channel.id, toSend.uint32)
|
||||
sendBuffer = newSeqUninit[byte](NumBytesHeader + numBytesToSend)
|
||||
header = YamuxHeader.data(channel.id, numBytesToSend.uint32)
|
||||
inBuffer = 0
|
||||
|
||||
if toSend >= bytesAvailable and channel.closedLocally:
|
||||
trace "last buffer we'll sent on this channel", toSend, bytesAvailable
|
||||
if numBytesToSend >= bytesAvailable and channel.closedLocally:
|
||||
trace "last buffer we will send on this channel", numBytesToSend, bytesAvailable
|
||||
header.flags.incl({Fin})
|
||||
|
||||
sendBuffer[0..<12] = header.encode()
|
||||
sendBuffer[0 ..< NumBytesHeader] = header.encode()
|
||||
|
||||
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
|
||||
while inBuffer < numBytesToSend:
|
||||
var toSend = channel.sendQueue[0]
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let bufferToSend = min(toSend.data.len - toSend.sent, numBytesToSend - inBuffer)
|
||||
|
||||
sendBuffer.toOpenArray(NumBytesHeader, NumBytesHeader + numBytesToSend - 1)[
|
||||
inBuffer ..< (inBuffer + bufferToSend)
|
||||
] = toSend.data.toOpenArray(toSend.sent, toSend.sent + bufferToSend - 1)
|
||||
|
||||
var futures: seq[Future[void]]
|
||||
while inBuffer < toSend:
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
|
||||
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
futures.add(fut)
|
||||
|
||||
if toSend.sent >= toSend.data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(toSend.fut)
|
||||
channel.sendQueue.delete(0)
|
||||
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
|
||||
channel.sendWindow.dec(toSend)
|
||||
try: await channel.conn.write(sendBuffer)
|
||||
except CatchableError as exc:
|
||||
trace "try to send the buffer", h = $header
|
||||
try:
|
||||
await channel.conn.write(sendBuffer)
|
||||
channel.sendWindow.dec(inBuffer)
|
||||
except CancelledError:
|
||||
## Just for compiler. This should never happen as sendLoop is started by asyncSpawn.
|
||||
## Therefore, no one owns that sendLoop's future and no one can cancel it.
|
||||
discard
|
||||
except LPStreamError as exc:
|
||||
error "failed to send the buffer", description = exc.msg
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
for fut in futures:
|
||||
fut.fail(connDown)
|
||||
await channel.reset()
|
||||
break
|
||||
for fut in futures.items():
|
||||
|
||||
for fut in futures:
|
||||
fut.complete()
|
||||
|
||||
channel.activity = true
|
||||
|
||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
result.fail(newLPStreamResetError())
|
||||
return result
|
||||
if channel.closedLocally or channel.isReset:
|
||||
result.fail(newLPStreamClosedError())
|
||||
return result
|
||||
if msg.len == 0:
|
||||
result.complete()
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
method write*(
|
||||
channel: YamuxChannel, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to yamux channel
|
||||
##
|
||||
var resFut = newFuture[void]("Yamux Send")
|
||||
|
||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
if channel.remoteReset:
|
||||
trace "stream is reset when write", channel = $channel
|
||||
resFut.fail(newLPStreamResetError())
|
||||
return resFut
|
||||
|
||||
if channel.closedLocally or channel.isReset:
|
||||
resFut.fail(newLPStreamClosedError())
|
||||
return resFut
|
||||
|
||||
if msg.len == 0:
|
||||
resFut.complete()
|
||||
return resFut
|
||||
|
||||
channel.sendQueue.add(ToSend(data: msg, sent: 0, fut: resFut))
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
|
||||
asyncSpawn channel.sendLoop()
|
||||
|
||||
return resFut
|
||||
|
||||
proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
##
|
||||
if channel.opened:
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
channel.isReset = false
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
await channel.conn.write(
|
||||
YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||
{if channel.isSrc: Syn else: Ack},
|
||||
)
|
||||
)
|
||||
|
||||
type
|
||||
Yamux* = ref object of Muxer
|
||||
channels: Table[uint32, YamuxChannel]
|
||||
flushed: Table[uint32, int]
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
method getWrapped*(channel: YamuxChannel): Connection =
|
||||
channel.conn
|
||||
|
||||
type Yamux* = ref object of Muxer
|
||||
channels: Table[uint32, YamuxChannel]
|
||||
flushed: Table[uint32, int]
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
windowSize: int
|
||||
maxSendQueueSize: int
|
||||
inTimeout: Duration
|
||||
outTimeout: Duration
|
||||
|
||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||
for v in m.channels.values():
|
||||
if v.isSrc == isSrc: result += 1
|
||||
if v.isSrc == isSrc:
|
||||
result += 1
|
||||
|
||||
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
await channel.join()
|
||||
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
try:
|
||||
await channel.join()
|
||||
except CancelledError:
|
||||
discard
|
||||
m.channels.del(channel.id)
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
libp2p_yamux_channels.set(
|
||||
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId]
|
||||
)
|
||||
if channel.isReset and channel.recvWindow > 0:
|
||||
m.flushed[channel.id] = channel.recvWindow
|
||||
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
result = YamuxChannel(
|
||||
proc createStream(
|
||||
m: Yamux, id: uint32, isSrc: bool, recvWindow: int, maxSendQueueSize: int
|
||||
): YamuxChannel =
|
||||
# During initialization, recvWindow can be larger than maxRecvWindow.
|
||||
# This is because the peer we're connected to will always assume
|
||||
# that the initial recvWindow is 256k.
|
||||
# To solve this contradiction, no updateWindow will be sent until
|
||||
# recvWindow is less than maxRecvWindow
|
||||
var stream = YamuxChannel(
|
||||
id: id,
|
||||
maxRecvWindow: DefaultWindowSize,
|
||||
recvWindow: DefaultWindowSize,
|
||||
sendWindow: DefaultWindowSize,
|
||||
maxRecvWindow: recvWindow,
|
||||
recvWindow:
|
||||
if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||
sendWindow: YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
isSrc: isSrc,
|
||||
conn: m.connection,
|
||||
receivedData: newAsyncEvent(),
|
||||
closedRemotely: newFuture[void]()
|
||||
closedRemotely: newAsyncEvent(),
|
||||
)
|
||||
result.objName = "YamuxStream"
|
||||
result.dir = if isSrc: Direction.Out else: Direction.In
|
||||
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
stream.objName = "YamuxStream"
|
||||
if isSrc:
|
||||
stream.dir = Direction.Out
|
||||
stream.timeout = m.outTimeout
|
||||
else:
|
||||
stream.dir = Direction.In
|
||||
stream.timeout = m.inTimeout
|
||||
stream.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
result.reset()
|
||||
result.initStream()
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
stream.reset(isLocal = true)
|
||||
stream.initStream()
|
||||
stream.peerId = m.connection.peerId
|
||||
stream.observedAddr = m.connection.observedAddr
|
||||
stream.localAddr = m.connection.localAddr
|
||||
stream.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = result
|
||||
asyncSpawn m.cleanupChann(result)
|
||||
trace "created channel", id, pid=m.connection.peerId
|
||||
stream.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = stream
|
||||
asyncSpawn m.cleanupChannel(stream)
|
||||
trace "created channel", id, pid = m.connection.peerId
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
|
||||
return stream
|
||||
|
||||
method close*(m: Yamux) {.async.} =
|
||||
method close*(m: Yamux) {.async: (raises: []).} =
|
||||
if m.isClosed == true:
|
||||
trace "Already closed"
|
||||
return
|
||||
m.isClosed = true
|
||||
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(true)
|
||||
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||
channel.clearQueues(newLPStreamEOFError())
|
||||
channel.recvWindow = 0
|
||||
channel.sendWindow = 0
|
||||
channel.closedLocally = true
|
||||
channel.isReset = true
|
||||
channel.opened = false
|
||||
channel.isClosed = true
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CancelledError as exc:
|
||||
trace "cancelled sending goAway", description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send goAway", description = exc.msg
|
||||
await m.connection.close()
|
||||
|
||||
m.isClosed = true
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## Call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in yamux stream handler", msg = exc.msg
|
||||
await channel.reset()
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
trace "Starting yamux handler", pid=m.connection.peerId
|
||||
method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Starting yamux handler", pid = m.connection.peerId
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
trace "waiting for header"
|
||||
let header = await m.connection.readHeader()
|
||||
trace "got message", h = $header
|
||||
|
||||
case header.msgType:
|
||||
case header.msgType
|
||||
of Ping:
|
||||
if MsgFlags.Syn in header.flags:
|
||||
await m.connection.write(YamuxHeader.ping(MsgFlags.Ack, header.length))
|
||||
of GoAway:
|
||||
var status: GoAwayStatus
|
||||
if status.checkedEnumAssign(header.length): trace "Received go away", status
|
||||
else: trace "Received unexpected error go away"
|
||||
if status.checkedEnumAssign(header.length):
|
||||
trace "Received go away", status
|
||||
else:
|
||||
trace "Received unexpected error go away"
|
||||
break
|
||||
of Data, WindowUpdate:
|
||||
if MsgFlags.Syn in header.flags:
|
||||
if header.streamId in m.channels:
|
||||
debug "Trying to create an existing channel, skipping", id=header.streamId
|
||||
debug "Trying to create an existing channel, skipping", id = header.streamId
|
||||
else:
|
||||
if header.streamId in m.flushed:
|
||||
m.flushed.del(header.streamId)
|
||||
|
||||
if header.streamId mod 2 == m.currentId mod 2:
|
||||
debug "Peer used our reserved stream id, skipping",
|
||||
id = header.streamId,
|
||||
currentId = m.currentId,
|
||||
peerId = m.connection.peerId
|
||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||
let newStream = m.createStream(header.streamId, false)
|
||||
let newStream =
|
||||
m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||
if m.channels.len >= m.maxChannCount:
|
||||
await newStream.reset()
|
||||
continue
|
||||
await newStream.open()
|
||||
asyncSpawn m.handleStream(newStream)
|
||||
elif header.streamId notin m.channels:
|
||||
if header.streamId notin m.flushed:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
elif header.msgType == Data:
|
||||
# Flush the data
|
||||
m.flushed[header.streamId].dec(int(header.length))
|
||||
if m.flushed[header.streamId] < 0:
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
# Flush the data
|
||||
m.flushed.withValue(header.streamId, flushed):
|
||||
if header.msgType == Data:
|
||||
flushed[].dec(int(header.length))
|
||||
if flushed[] < 0:
|
||||
raise
|
||||
newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninit[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
|
||||
# If we do not have a stream, likely we sent a RST and/or closed the stream
|
||||
trace "unknown stream id", id = header.streamId
|
||||
|
||||
continue
|
||||
|
||||
let channel = m.channels[header.streamId]
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError as e:
|
||||
raise newException(
|
||||
YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
|
||||
e.msg,
|
||||
e,
|
||||
)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
channel.sendWindow += int(header.length)
|
||||
await channel.trySend()
|
||||
asyncSpawn channel.sendLoop()
|
||||
else:
|
||||
if header.length.int > channel.recvWindow.int:
|
||||
# check before allocating the buffer
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow")
|
||||
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
var buffer = newSeqUninit[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", msg=string.fromBytes(buffer)
|
||||
trace "Msg Rcv", description = shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
if MsgFlags.Fin in header.flags:
|
||||
@@ -496,34 +649,58 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if MsgFlags.Rst in header.flags:
|
||||
trace "remote reset channel"
|
||||
await channel.reset()
|
||||
except CancelledError as exc:
|
||||
debug "Unexpected cancellation in yamux handler", description = exc.msg
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", msg = exc.msg
|
||||
trace "Stream EOF", description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in yamux read loop", description = exc.msg
|
||||
except YamuxError as exc:
|
||||
trace "Closing yamux connection", error=exc.msg
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
trace "Closing yamux connection", description = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in yamux read loop", description = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
|
||||
method getStreams*(m: Yamux): seq[Connection] =
|
||||
for c in m.channels.values: result.add(c)
|
||||
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
|
||||
for c in m.channels.values:
|
||||
result.add(c)
|
||||
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
|
||||
m: Yamux, name: string = "", lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
|
||||
if m.channels.len > m.maxChannCount - 1:
|
||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
let stream = m.createStream(m.currentId, true)
|
||||
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||
m.currentId += 2
|
||||
if not lazy:
|
||||
await stream.open()
|
||||
return stream
|
||||
|
||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
||||
proc new*(
|
||||
T: type[Yamux],
|
||||
conn: Connection,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: int = MaxSendQueueSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
): T =
|
||||
T(
|
||||
connection: conn,
|
||||
currentId: if conn.dir == Out: 1 else: 2,
|
||||
maxChannCount: maxChannCount
|
||||
maxChannCount: maxChannCount,
|
||||
windowSize: windowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
inTimeout: inTimeout,
|
||||
outTimeout: outTimeout,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -10,20 +10,21 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[streams, strutils, sets, sequtils],
|
||||
chronos, chronicles, stew/byteutils,
|
||||
std/[streams, sets, sequtils],
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/byteutils,
|
||||
dnsclientpkg/[protocol, types],
|
||||
../utility
|
||||
../utility,
|
||||
../utils/sequninit
|
||||
|
||||
import
|
||||
nameresolver
|
||||
import nameresolver
|
||||
|
||||
logScope:
|
||||
topics = "libp2p dnsresolver"
|
||||
|
||||
type
|
||||
DnsResolver* = ref object of NameResolver
|
||||
nameServers*: seq[TransportAddress]
|
||||
type DnsResolver* = ref object of NameResolver
|
||||
nameServers*: seq[TransportAddress]
|
||||
|
||||
proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
||||
try:
|
||||
@@ -37,28 +38,35 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
||||
let dataLen = requestStream.getPosition()
|
||||
requestStream.setPosition(0)
|
||||
|
||||
var buf = newSeq[byte](dataLen)
|
||||
var buf = newSeqUninit[byte](dataLen)
|
||||
discard requestStream.readData(addr buf[0], dataLen)
|
||||
return buf
|
||||
except CatchableError as exc:
|
||||
info "Failed to created DNS buffer", msg = exc.msg
|
||||
return newSeq[byte](0)
|
||||
buf
|
||||
except IOError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeqUninit[byte](0)
|
||||
except OSError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeqUninit[byte](0)
|
||||
except ValueError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeqUninit[byte](0)
|
||||
|
||||
proc getDnsResponse(
|
||||
dnsServer: TransportAddress,
|
||||
address: string,
|
||||
kind: QKind): Future[Response] {.async.} =
|
||||
|
||||
dnsServer: TransportAddress, address: string, kind: QKind
|
||||
): Future[Response] {.
|
||||
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
|
||||
.} =
|
||||
var sendBuf = questionToBuf(address, kind)
|
||||
|
||||
if sendBuf.len == 0:
|
||||
raise newException(ValueError, "Incorrect DNS query")
|
||||
|
||||
let receivedDataFuture = newFuture[void]()
|
||||
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
|
||||
|
||||
proc datagramDataReceived(transp: DatagramTransport,
|
||||
raddr: TransportAddress): Future[void] {.async, closure.} =
|
||||
receivedDataFuture.complete()
|
||||
proc datagramDataReceived(
|
||||
transp: DatagramTransport, raddr: TransportAddress
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
receivedDataFuture.complete()
|
||||
|
||||
let sock =
|
||||
if dnsServer.family == AddressFamily.IPv6:
|
||||
@@ -69,29 +77,41 @@ proc getDnsResponse(
|
||||
try:
|
||||
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
|
||||
|
||||
await receivedDataFuture or sleepAsync(5.seconds) #unix default
|
||||
|
||||
if not receivedDataFuture.finished:
|
||||
raise newException(IOError, "DNS server timeout")
|
||||
try:
|
||||
await receivedDataFuture.wait(5.seconds) #unix default
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(IOError, "DNS server timeout: " & e.msg, e)
|
||||
|
||||
let rawResponse = sock.getMessage()
|
||||
# parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
return exceptionToAssert: parseResponse(string.fromBytes(rawResponse))
|
||||
try:
|
||||
parseResponse(string.fromBytes(rawResponse))
|
||||
except IOError as exc:
|
||||
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except OSError as exc:
|
||||
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert "Exception parsing DN response: " & exc.msg
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
method resolveIp*(
|
||||
self: DnsResolver,
|
||||
address: string,
|
||||
port: Port,
|
||||
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
|
||||
|
||||
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError])
|
||||
.} =
|
||||
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
|
||||
for _ in 0 ..< self.nameServers.len:
|
||||
let server = self.nameServers[0]
|
||||
var responseFutures: seq[Future[Response]]
|
||||
var responseFutures: seq[
|
||||
Future[Response].Raising(
|
||||
[CancelledError, IOError, OSError, TransportError, ValueError]
|
||||
)
|
||||
]
|
||||
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
||||
responseFutures.add(getDnsResponse(server, address, A))
|
||||
|
||||
@@ -106,25 +126,32 @@ method resolveIp*(
|
||||
var
|
||||
resolvedAddresses: OrderedSet[string]
|
||||
resolveFailed = false
|
||||
template handleFail(e): untyped =
|
||||
info "Failed to query DNS", address, error = e.msg
|
||||
resolveFailed = true
|
||||
break
|
||||
|
||||
for fut in responseFutures:
|
||||
try:
|
||||
let resp = await fut
|
||||
for answer in resp.answers:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
resolvedAddresses.incl(
|
||||
exceptionToAssert(answer.toString())
|
||||
)
|
||||
resolvedAddresses.incl(answer.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except ValueError as e:
|
||||
info "Invalid DNS query", address, error=e.msg
|
||||
info "Invalid DNS query", address, error = e.msg
|
||||
return @[]
|
||||
except CatchableError as e:
|
||||
info "Failed to query DNS", address, error=e.msg
|
||||
resolveFailed = true
|
||||
break
|
||||
except IOError as e:
|
||||
handleFail(e)
|
||||
except OSError as e:
|
||||
handleFail(e)
|
||||
except TransportError as e:
|
||||
handleFail(e)
|
||||
except Exception as e:
|
||||
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert e.msg
|
||||
|
||||
if resolveFailed:
|
||||
self.nameServers.add(self.nameServers[0])
|
||||
@@ -138,32 +165,40 @@ method resolveIp*(
|
||||
return @[]
|
||||
|
||||
method resolveTxt*(
|
||||
self: DnsResolver,
|
||||
address: string): Future[seq[string]] {.async.} =
|
||||
|
||||
self: DnsResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
|
||||
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
|
||||
for _ in 0 ..< self.nameServers.len:
|
||||
let server = self.nameServers[0]
|
||||
try:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
let response = await getDnsResponse(server, address, TXT)
|
||||
return exceptionToAssert:
|
||||
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
|
||||
response.answers.mapIt(it.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
info "Failed to query DNS", address, error=e.msg
|
||||
template handleFail(e): untyped =
|
||||
info "Failed to query DNS", address, error = e.msg
|
||||
self.nameServers.add(self.nameServers[0])
|
||||
self.nameServers.delete(0)
|
||||
continue
|
||||
|
||||
try:
|
||||
let response = await getDnsResponse(server, address, TXT)
|
||||
trace "Got TXT response",
|
||||
server = $server, answer = response.answers.mapIt(it.toString())
|
||||
return response.answers.mapIt(it.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except IOError as e:
|
||||
handleFail(e)
|
||||
except OSError as e:
|
||||
handleFail(e)
|
||||
except TransportError as e:
|
||||
handleFail(e)
|
||||
except ValueError as e:
|
||||
handleFail(e)
|
||||
except Exception as e:
|
||||
# Nim 1.6: toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert e.msg
|
||||
|
||||
debug "Failed to resolve TXT, returning empty set"
|
||||
return @[]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[DnsResolver],
|
||||
nameServers: seq[TransportAddress]): T =
|
||||
proc new*(T: typedesc[DnsResolver], nameServers: seq[TransportAddress]): T =
|
||||
T(nameServers: nameServers)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,9 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronos, chronicles
|
||||
import std/tables, chronos, chronicles
|
||||
|
||||
import nameresolver
|
||||
|
||||
@@ -26,21 +24,26 @@ type MockResolver* = ref object of NameResolver
|
||||
ipResponses*: Table[(string, bool), seq[string]]
|
||||
|
||||
method resolveIp*(
|
||||
self: MockResolver,
|
||||
address: string,
|
||||
port: Port,
|
||||
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
|
||||
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError])
|
||||
.} =
|
||||
var res: seq[TransportAddress]
|
||||
|
||||
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
||||
for resp in self.ipResponses.getOrDefault((address, false)):
|
||||
result.add(initTAddress(resp, port))
|
||||
res.add(initTAddress(resp, port))
|
||||
|
||||
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
|
||||
for resp in self.ipResponses.getOrDefault((address, true)):
|
||||
result.add(initTAddress(resp, port))
|
||||
res.add(initTAddress(resp, port))
|
||||
|
||||
res
|
||||
|
||||
method resolveTxt*(
|
||||
self: MockResolver,
|
||||
address: string): Future[seq[string]] {.async.} =
|
||||
return self.txtResponses.getOrDefault(address)
|
||||
self: MockResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
|
||||
self.txtResponses.getOrDefault(address)
|
||||
|
||||
proc new*(T: typedesc[MockResolver]): T = T()
|
||||
proc new*(T: typedesc[MockResolver]): T =
|
||||
T()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,74 +9,75 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sugar, sets, sequtils, strutils]
|
||||
import
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/endians2
|
||||
import std/[sets, sequtils, strutils]
|
||||
import chronos, chronicles, stew/endians2
|
||||
import ".."/[multiaddress, multicodec]
|
||||
|
||||
logScope:
|
||||
topics = "libp2p nameresolver"
|
||||
|
||||
type
|
||||
NameResolver* = ref object of RootObj
|
||||
type NameResolver* = ref object of RootObj
|
||||
|
||||
method resolveTxt*(
|
||||
self: NameResolver,
|
||||
address: string): Future[seq[string]] {.async, base.} =
|
||||
self: NameResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Get TXT record
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
|
||||
|
||||
method resolveIp*(
|
||||
self: NameResolver,
|
||||
address: string,
|
||||
port: Port,
|
||||
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, base.} =
|
||||
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError]), base
|
||||
.} =
|
||||
## Resolve the specified address
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
|
||||
|
||||
proc getHostname*(ma: MultiAddress): string =
|
||||
let
|
||||
firstPart = ma[0].valueOr: return ""
|
||||
firstPart = ma[0].valueOr:
|
||||
return ""
|
||||
fpSplitted = ($firstPart).split('/', 2)
|
||||
if fpSplitted.len > 2: fpSplitted[2]
|
||||
else: ""
|
||||
if fpSplitted.len > 2:
|
||||
fpSplitted[2]
|
||||
else:
|
||||
""
|
||||
|
||||
proc resolveOneAddress(
|
||||
self: NameResolver,
|
||||
ma: MultiAddress,
|
||||
domain: Domain = Domain.AF_UNSPEC,
|
||||
prefix = ""): Future[seq[MultiAddress]]
|
||||
{.async, raises: [MaError, TransportAddressError].} =
|
||||
#Resolve a single address
|
||||
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
# Resolve a single address
|
||||
let portPart = ma[1].valueOr:
|
||||
raise maErr error
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
var dnsval = getHostname(ma)
|
||||
|
||||
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
|
||||
raise newException(MaError, "Incorrect port number")
|
||||
let plen = portPart.protoArgument(pbuf).valueOr:
|
||||
raise maErr error
|
||||
if plen == 0:
|
||||
raise maErr "Incorrect port number"
|
||||
let
|
||||
port = Port(fromBytesBE(uint16, pbuf))
|
||||
dnsval = getHostname(ma)
|
||||
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
|
||||
|
||||
return collect(newSeqOfCap(4)):
|
||||
for address in resolvedAddresses:
|
||||
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
|
||||
for part in ma:
|
||||
if DNS.match(part.tryGet()): continue
|
||||
createdAddress &= part.tryGet()
|
||||
createdAddress
|
||||
resolvedAddresses.mapIt:
|
||||
let address = MultiAddress.init(it).valueOr:
|
||||
raise maErr error
|
||||
var createdAddress = address[0].valueOr:
|
||||
raise maErr error
|
||||
for part in ma:
|
||||
let part = part.valueOr:
|
||||
raise maErr error
|
||||
if DNS.match(part):
|
||||
continue
|
||||
createdAddress &= part
|
||||
createdAddress
|
||||
|
||||
proc resolveDnsAddr*(
|
||||
self: NameResolver,
|
||||
ma: MultiAddress,
|
||||
depth: int = 0): Future[seq[MultiAddress]] {.async.} =
|
||||
|
||||
self: NameResolver, ma: MultiAddress, depth: int = 0
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
if not DNSADDR.matchPartial(ma):
|
||||
return @[ma]
|
||||
|
||||
@@ -85,52 +86,67 @@ proc resolveDnsAddr*(
|
||||
info "Stopping DNSADDR recursion, probably malicious", ma
|
||||
return @[]
|
||||
|
||||
var dnsval = getHostname(ma)
|
||||
|
||||
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
|
||||
let
|
||||
dnsval = getHostname(ma)
|
||||
txt = await self.resolveTxt("_dnsaddr." & dnsval)
|
||||
|
||||
trace "txt entries", txt
|
||||
|
||||
var result: seq[MultiAddress]
|
||||
for entry in txt:
|
||||
if not entry.startsWith("dnsaddr="): continue
|
||||
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
|
||||
const codec = multiCodec("p2p")
|
||||
let maCodec = block:
|
||||
let hasCodec = ma.contains(codec).valueOr:
|
||||
raise maErr error
|
||||
if hasCodec:
|
||||
ma[codec]
|
||||
else:
|
||||
(static(default(MaResult[MultiAddress])))
|
||||
|
||||
if entryValue.contains(multiCodec("p2p")).tryGet() and ma.contains(multiCodec("p2p")).tryGet():
|
||||
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
|
||||
continue
|
||||
var res: seq[MultiAddress]
|
||||
for entry in txt:
|
||||
if not entry.startsWith("dnsaddr="):
|
||||
continue
|
||||
let
|
||||
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
|
||||
raise maErr error
|
||||
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
|
||||
raise maErr error
|
||||
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
|
||||
continue
|
||||
|
||||
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
|
||||
for r in resolved:
|
||||
result.add(r)
|
||||
res.add(r)
|
||||
|
||||
if result.len == 0:
|
||||
if res.len == 0:
|
||||
debug "Failed to resolve a DNSADDR", ma
|
||||
return @[]
|
||||
return result
|
||||
|
||||
res
|
||||
|
||||
proc resolveMAddress*(
|
||||
self: NameResolver,
|
||||
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
|
||||
self: NameResolver, address: MultiAddress
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
var res = initOrderedSet[MultiAddress]()
|
||||
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].tryGet().protoCode().tryGet()
|
||||
let seq = case code:
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
assert false
|
||||
@[address]
|
||||
for ad in seq:
|
||||
let
|
||||
firstPart = address[0].valueOr:
|
||||
raise maErr error
|
||||
code = firstPart.protoCode().valueOr:
|
||||
raise maErr error
|
||||
ads =
|
||||
case code
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
raise maErr("Unsupported codec " & $code)
|
||||
for ad in ads:
|
||||
res.incl(ad)
|
||||
return res.toSeq
|
||||
res.toSeq
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user