Compare commits

...

78 Commits

Author SHA1 Message Date
Vlado Pajić
c62dd76de0 bump 2025-08-27 22:30:09 +02:00
Vlado Pajić
5fd3ed1931 bump 2025-08-27 22:20:34 +02:00
Vlado Pajić
eebe6ac3f1 bump 2025-08-27 21:44:22 +02:00
Vlado Pajić
f88332a8e4 bump 2025-08-27 11:40:26 +02:00
Vlado Pajić
3a61631e83 bump 2025-08-27 11:15:37 +02:00
Vlado Pajić
54b73619d7 test 2025-08-26 16:10:03 +02:00
vladopajic
9865cc39b5 chore(perf): follow up for PR#1600 (#1620) 2025-08-26 10:00:25 -04:00
Gabriel Cruz
601f56b786 chore(autonat-v2): add message types (#1637) 2025-08-25 15:18:43 +00:00
Ben
25a8ed4d07 refactor(kad): Refine, and reduce, exception scope (#1627) 2025-08-25 11:33:26 +00:00
Radosław Kamiński
955e28ff70 test(yamux): Add unit tests - frame handling and stream initiation (#1634) 2025-08-22 12:02:54 +01:00
Radosław Kamiński
f952e6d436 test(performance): do not run publish steps on forks and fix cleanup (#1630) 2025-08-19 13:25:52 +01:00
MorganaFuture
bed83880bf fix(test): Race condition on Windows-specific daemon close (#1628)
Co-authored-by: Ben <benph@vac.dev>
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-08-18 17:09:31 -04:00
richΛrd
9bd4b7393f feat(kad-dht): findPeer (#1624) 2025-08-18 13:45:31 +00:00
Radosław Kamiński
12d1fae404 test(yamux): Add header unit tests (#1625) 2025-08-18 13:50:54 +01:00
MorganaFuture
17073dc9e0 fix(tests): prevent race condition in testgossipsubcontrolmessages (#1626) 2025-08-15 18:46:39 +00:00
vladopajic
b1649b3566 chore(quic): add length prefixed test (#1599) 2025-08-15 15:57:56 +02:00
Ben
ef20f46b47 refactor: rm dhttypes.nim (#1612) 2025-08-15 12:23:27 +00:00
Gabriel Cruz
9161529c84 fix: pubsub signature verification (#1618) 2025-08-14 20:15:02 +00:00
Ben
8b70384b6a refactor: Removal of "Unhashed" key variant (#1623)
Internal keydata is _always_ unhashed. The parts that require its data in hashed form hash it themselves using the provided hasher (with default fallback)
2025-08-14 11:22:09 +00:00
MorganaFuture
f25814a890 feat(perf): implement proper half-close semantics (#1600)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-08-13 10:08:17 -04:00
Radosław Kamiński
3d5ea1fa3c test(performance): fetch before push and improve latency history (#1617) 2025-08-13 14:22:42 +01:00
richΛrd
2114008704 fix: compilation warning on yamux due to using CatchableErr (#1616) 2025-08-12 22:11:33 +00:00
richΛrd
04796b210b fix: don't check for errors as close() will only contain futures that raise [] (#1615) 2025-08-12 21:26:22 +00:00
Ben
59faa023aa feat(kad): Initial unstable putval api (#1582) 2025-08-12 12:25:21 +02:00
vladopajic
fdebea4e14 chore(quic): fix flaky test when eof is expected (#1611) 2025-08-11 17:02:13 +00:00
vladopajic
0c188df806 fix(quic): race errors when stopping transport (#1614) 2025-08-11 15:48:37 +00:00
Radosław Kamiński
abee5326dc test(gossipsub): Performance tests - plot latency history (#1608) 2025-08-11 16:11:29 +01:00
Radosław Kamiński
71f04d1bb3 test(gossipsub): Performance tests - plot docker stats (#1597) 2025-08-11 15:45:50 +01:00
Radosław Kamiński
41ae43ae80 test(gossipsub): Performance tests - collect docker stats (#1593) 2025-08-11 14:01:38 +00:00
vladopajic
5dbf077d9e chore(pubsub): simplify prune backoff test (#1596) 2025-08-09 17:49:14 +00:00
vladopajic
b5fc7582ff fix(quic): setting shortAgent (#1609) 2025-08-08 17:21:58 +00:00
vladopajic
7f83ebb198 chore(quic): readOnce better exception handling (#1610) 2025-08-08 16:02:33 +00:00
vladopajic
ceb89986c1 chore(quic): exception msg fix (#1607) 2025-08-08 10:24:55 -03:00
vladopajic
f4ff27ca6b fix(quic): test improvement (#1595) 2025-08-06 14:34:07 -03:00
richΛrd
b517b692df chore: v1.12.0 (#1581) 2025-08-05 13:59:43 +00:00
Ben
7cfd26035a fix(kad): Skip self when iterating through findNode dialouts (#1594) 2025-08-05 12:00:09 +02:00
Radosław Kamiński
cd5fea53e3 test(gossipsub): Performance tests - more scenarios (#1585) 2025-08-01 08:33:39 +01:00
Radosław Kamiński
d9aa393761 test(gossipsub): Performance tests - aggregation script and workflow (#1577) 2025-07-31 17:59:09 +01:00
Gabriel Cruz
a4a0d9e375 ci: add nimbus compilation daily test (#1571) 2025-07-31 15:01:10 +00:00
richΛrd
c8b406d6ed feat(kad-dht): find nodes (#1324)
Co-authored-by: Ben-PH <benphawke@gmail.com>
2025-07-31 12:30:02 +02:00
Radosław Kamiński
f0125a62df test(gossipsub): Performance tests - base scenario and runner (#1573) 2025-07-31 09:22:14 +00:00
Gabriel Cruz
9bf2636186 ci(docs): fix generation (#1590) 2025-07-31 00:42:49 +02:00
Ben
01a33ebe5c docs: Document nimble 0.20.1 dev req and instructions (#1586) 2025-07-30 16:15:48 +02:00
Gabriel Cruz
c1cd31079b fix(interop): redis not installed (#1584) 2025-07-25 19:25:48 +00:00
vladopajic
9f9f38e314 chore(quic): add close session test (#1583) 2025-07-25 13:46:15 -04:00
Gabriel Cruz
f83638eb82 chore: remove support for nim 1.6 (#1572) 2025-07-24 15:51:44 -04:00
Ivan FB
882cb5dfe3 fix: more secure send loop in yamux and exception-handling-code cleanup in pubsubpeer (#1579) 2025-07-24 17:15:59 +00:00
vladopajic
81310df2a2 chore(switch): remove unnecesery debug log entry (#1575) 2025-07-24 15:00:21 +00:00
Ivan FB
34110a37d7 chore: revert excp type in yamux write (#1578) 2025-07-24 14:30:29 +00:00
vladopajic
1035e4f314 fix(quic): close all streams when closing session (#1576) 2025-07-24 11:04:05 -03:00
Ivan FB
d08bad5893 fix: memory consumption in yamux and pubsubpeer (#1570) 2025-07-24 09:27:45 -04:00
vladopajic
7bdba4909f chore(PeerTable): add stringification operator (#1567) 2025-07-23 10:53:26 +00:00
vladopajic
e71c7caf82 chore(perf): remove timeout when reading from quic stream (#1569) 2025-07-22 17:16:09 -04:00
vladopajic
45476bdd6b chore(deps): bump quic to v0.2.9 (#1568) 2025-07-22 10:24:01 -03:00
Gabriel Cruz
c7ee7b950d chore(autotls): only import dnsclient when autotls is required (#1565) 2025-07-22 11:53:09 +00:00
Vedran Mendelski
87b3d2c864 feat(ci): add CI failure notifications
Add Discord webhook notifications for failed daily workflows to improve visibility
of branch build issues.

- https://github.com/vacp2p/nim-libp2p/issues/1403
2025-07-22 12:00:55 +02:00
Gabriel Cruz
19b4c20e2f chore: add TransportConfig (#1561) 2025-07-21 20:47:01 +00:00
Gabriel Cruz
514bd4b5f5 fix(autotls): integration tests (#1560) 2025-07-21 19:57:37 +00:00
Ben-PH
46d936b80c chore(ci): initial flake.nix file with dev shell
Signed-off-by: Jakub Sokołowski <jakub@status.im>
2025-07-21 19:14:26 +02:00
Gabriel Cruz
80bf27c6bb fix: typo on startAt (#1562) 2025-07-21 16:56:12 +02:00
Farooq
6576c5c3bf feat: GossipSub v1.4 (#1448)
Co-authored-by: Richard Ramos <info@richardramos.me>
2025-07-20 18:47:13 +00:00
Gabriel Cruz
2e6b1d2738 feat(wstransport): add autotls support (#1535) 2025-07-18 11:58:27 -04:00
MorganaFuture
9e6c4cb4d2 fix: properly handle CancelledError in switch accept loop (#1537)
Signed-off-by: MorganaFuture <andrewmochalskyi@gmail.com>
2025-07-17 21:29:37 +00:00
MorganaFuture
5f256049ab fix: build failures caused by missing import (#1541) 2025-07-17 08:00:45 -04:00
richΛrd
e29ca73386 fix: autotls related imports (#1550) 2025-07-16 21:43:34 +00:00
vladopajic
577809750a fix(quic): add getWrapped method for QuicStream (#1546) 2025-07-16 17:16:18 +00:00
Gabriel Cruz
46a5430cc2 chore(dialer): expand dns4 and dns6 (#1543) 2025-07-15 18:28:55 +00:00
vladopajic
d8b9f59c5e chore(readme): add link to community channel (#1542) 2025-07-15 15:46:59 +02:00
MorganaFuture
2951356c9d fix(perf): add QUIC transport compatibility (#1524) 2025-07-15 08:20:26 -04:00
richΛrd
7ae21d0cbd fix: also hide autotls/utils methods (#1538) 2025-07-14 17:19:19 -04:00
richΛrd
eee8341ad2 chore: hide autotls under compile flag (#1533) 2025-07-14 13:52:33 -04:00
richΛrd
e83bd2d582 feat(gossipsub1.4): adding new attributes and protobuffers (#1515) 2025-07-12 16:12:06 +00:00
richΛrd
998bb58aef feat(gossipsub1_4): preamble store (#1513) 2025-07-11 16:48:37 -04:00
Gabriel Cruz
c1f6dec7d3 chore(peerinfo): enable calling update multiple times (#1525) 2025-07-11 13:28:34 -04:00
vladopajic
13c613c26c chore: add template newSeqUninit (#1518) 2025-07-11 13:19:24 +00:00
vladopajic
45f0f9f47a chore: removing unused type StreamSeq (#1507) 2025-07-11 12:24:01 +00:00
Gabriel Cruz
b1dd0a2ec6 chore: fix broken README links (#1517) 2025-07-10 22:35:37 +00:00
Gabriel Cruz
beecfdfadb chore(autotls): prevent nil access on stop (#1514) 2025-07-10 14:02:58 -03:00
148 changed files with 6580 additions and 1639 deletions

34
.github/actions/add_comment/action.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Add Comment
description: "Add or update comment in the PR"
runs:
using: "composite"
steps:
- name: Add/Update Comment
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const marker = "${{ env.MARKER }}";
const body = fs.readFileSync("${{ env.COMMENT_SUMMARY_PATH }}", 'utf8');
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body && c.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body,
});
}

View File

@@ -0,0 +1,49 @@
name: Discord Failure Notification
description: "Send Discord notification when CI jobs fail"
inputs:
webhook_url:
description: "Discord webhook URL"
required: true
workflow_name:
description: "Name of the workflow that failed"
required: false
default: ${{ github.workflow }}
branch:
description: "Branch name"
required: false
default: ${{ github.ref_name }}
repository:
description: "Repository name"
required: false
default: ${{ github.repository }}
run_id:
description: "GitHub run ID"
required: false
default: ${{ github.run_id }}
server_url:
description: "GitHub server URL"
required: false
default: ${{ github.server_url }}
runs:
using: "composite"
steps:
- name: Send Discord notification
shell: bash
run: |
curl -H "Content-Type: application/json" \
-X POST \
-d "{
\"embeds\": [{
\"title\": \"${{ inputs.workflow_name }} Job Failed\",
\"url\": \"${{ inputs.server_url }}/${{ inputs.repository }}/actions/runs/${{ inputs.run_id }}\",
\"description\": \"The workflow has failed on branch \`${{ inputs.branch }}\`\",
\"color\": 15158332,
\"fields\": [
{\"name\": \"Repository\", \"value\": \"${{ inputs.repository }}\", \"inline\": true},
{\"name\": \"Branch\", \"value\": \"${{ inputs.branch }}\", \"inline\": true}
],
\"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%S.000Z)\"
}]
}" \
"${{ inputs.webhook_url }}"

View File

@@ -0,0 +1,24 @@
name: Generate Plots
description: "Set up Python and run script to generate plots with Docker Stats"
runs:
using: "composite"
steps:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install Python dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install matplotlib
- name: Plot Docker Stats
shell: bash
run: python performance/scripts/plot_docker_stats.py
- name: Plot Latency History
shell: bash
run: python performance/scripts/plot_latency_history.py

View File

@@ -8,7 +8,7 @@ inputs:
default: "amd64"
nim_ref:
description: "Nim version"
default: "version-1-6"
default: "version-2-0"
shell:
description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail"

View File

@@ -0,0 +1,21 @@
name: Process Stats
description: "Set up Nim and run scripts to aggregate latency and process raw docker stats"
runs:
using: "composite"
steps:
- name: Set up Nim
uses: jiro4989/setup-nim-action@v2
with:
nim-version: "2.x"
repo-token: ${{ env.GITHUB_TOKEN }}
- name: Aggregate latency stats and prepare markdown for comment and summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_latency_stats ./performance/scripts/process_latency_stats.nim
- name: Process raw docker stats to csv files
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_docker_stats ./performance/scripts/process_docker_stats.nim

View File

@@ -0,0 +1,36 @@
name: Publish Latency History
description: "Publish latency history CSVs in a configurable branch and folder"
runs:
using: "composite"
steps:
- name: Clone the branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_HISTORY }}
fetch-depth: 0
- name: Commit & push latency history CSVs
shell: bash
run: |
cd "$CHECKOUT_SUBFOLDER_HISTORY"
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
mkdir -p "$PUBLISH_DIR_LATENCY_HISTORY"
cp ../$SHARED_VOLUME_PATH/$LATENCY_HISTORY_PREFIX*.csv "$PUBLISH_DIR_LATENCY_HISTORY/"
git add "$PUBLISH_DIR_LATENCY_HISTORY"
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update latency history CSVs"
git push origin "$PUBLISH_BRANCH_NAME"
fi
cd ..

View File

@@ -0,0 +1,56 @@
name: Publish Plots
description: "Publish plots in performance_plots branch and add to the workflow summary"
runs:
using: "composite"
steps:
- name: Clone the performance_plots branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_SUBPLOTS }}
fetch-depth: 0
- name: Commit & push plots
shell: bash
run: |
cd $CHECKOUT_SUBFOLDER_SUBPLOTS
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
# Remove any branch folder older than 7 days
DAYS=7
cutoff=$(( $(date +%s) - DAYS*24*3600 ))
scan_dir="${PUBLISH_DIR_PLOTS%/}"
find "$scan_dir" -mindepth 1 -maxdepth 1 -type d -print0 \
| while IFS= read -r -d $'\0' d; do \
ts=$(git log -1 --format=%ct -- "$d" 2>/dev/null || true); \
if [ -n "$ts" ] && [ "$ts" -le "$cutoff" ]; then \
echo "[cleanup] Deleting: $d"; \
rm -rf -- "$d"; \
fi; \
done
rm -rf $PUBLISH_DIR_PLOTS/$BRANCH_NAME
mkdir -p $PUBLISH_DIR_PLOTS/$BRANCH_NAME
cp ../$SHARED_VOLUME_PATH/*.png $PUBLISH_DIR_PLOTS/$BRANCH_NAME/ 2>/dev/null || true
cp ../$LATENCY_HISTORY_PATH/*.png $PUBLISH_DIR_PLOTS/ 2>/dev/null || true
git add -A "$PUBLISH_DIR_PLOTS/"
git status
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update performance plots for $BRANCH_NAME"
git push origin $PUBLISH_BRANCH_NAME
fi
- name: Add plots to GitHub Actions summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/add_plots_to_summary ./performance/scripts/add_plots_to_summary.nim

View File

@@ -32,8 +32,6 @@ jobs:
- os: windows
cpu: amd64
nim:
- ref: version-1-6
memory_management: refc
- ref: version-2-0
memory_management: refc
- ref: version-2-2

View File

@@ -7,25 +7,36 @@ on:
jobs:
test_amd64_latest:
name: Daily amd64 (latest dependencies)
name: Daily test amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
name: Daily test amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
cpu: "['amd64']"
notify-on-failure:
name: Notify Discord on Failure
needs: [test_amd64_latest, test_amd64_pinned]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -6,18 +6,45 @@ on:
workflow_dispatch:
jobs:
test_i386:
name: Daily i386 (Linux)
test_i386_latest:
name: Daily i386 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
test_i386_pinned:
name: Daily i386 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
notify-on-failure:
name: Notify Discord on Failure
needs: [test_i386_latest, test_i386_pinned]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

39
.github/workflows/daily_nimbus.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Daily Nimbus
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
compile_nimbus:
timeout-minutes: 80
name: 'Compile Nimbus (linux-amd64)'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Compile nimbus using nim-libp2p
run: |
git clone --branch unstable --single-branch https://github.com/status-im/nimbus-eth2.git
cd nimbus-eth2
git submodule set-branch --branch ${{ github.sha }} vendor/nim-libp2p
make -j"$(nproc)"
make -j"$(nproc)" nimbus_beacon_node
notify-on-failure:
name: Notify Discord on Failure
needs: compile_nimbus
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -50,4 +50,16 @@ jobs:
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
notify-on-failure:
name: Notify Discord on Failure
needs: [bumper]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -21,7 +21,7 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: '1.6.x'
nim-version: '2.2.x'
- name: Generate doc
run: |

View File

@@ -36,7 +36,7 @@ jobs:
shell: bash
os: linux
cpu: amd64
nim_ref: version-1-6
nim_ref: version-2-2
- name: Restore deps from cache
id: deps-cache

86
.github/workflows/performance.yml vendored Normal file
View File

@@ -0,0 +1,86 @@
name: Performance
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
performance:
timeout-minutes: 20
strategy:
fail-fast: false
defaults:
run:
shell: bash
env:
VACP2P: "vacp2p"
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
PR_NUMBER: ${{ github.event.number }}
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
MARKER: "<!-- perf-summary-marker -->"
COMMENT_SUMMARY_PATH: "/tmp/perf-summary.md"
SHARED_VOLUME_PATH: "performance/output"
DOCKER_STATS_PREFIX: "docker_stats_"
PUBLISH_BRANCH_NAME: "performance_plots"
CHECKOUT_SUBFOLDER_SUBPLOTS: "subplots"
PUBLISH_DIR_PLOTS: "plots"
CHECKOUT_SUBFOLDER_HISTORY: "history"
PUBLISH_DIR_LATENCY_HISTORY: "latency_history"
LATENCY_HISTORY_PATH: "history/latency_history"
LATENCY_HISTORY_PREFIX: "pr"
LATENCY_HISTORY_PLOT_FILENAME: "latency_history_all_scenarios.png"
name: "Performance"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image with cache
uses: docker/build-push-action@v6
with:
context: .
file: performance/Dockerfile
tags: test-node:latest
load: true
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Run
run: |
./performance/runner.sh
- name: Process latency and docker stats
uses: ./.github/actions/process_stats
- name: Publish history
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/publish_history
- name: Generate plots
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/generate_plots
- name: Publish plots and add to summary
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/publish_plots
- name: Post/Update PR comment
if: github.event_name == 'pull_request'
uses: ./.github/actions/add_comment

View File

@@ -1,5 +1,5 @@
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
chronicles;https://github.com/status-im/nim-chronicles@#61759a5e8df8f4d68bcd1b4b8c1adab3e72bbd8d
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
@@ -8,7 +8,7 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
quic;https://github.com/vacp2p/nim-quic@#fe02a9a5e33e538b8265161f443b9b6c5ec1774c
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088

View File

@@ -39,7 +39,7 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
## Install
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
> The currently supported Nim versions are 2.0 and 2.2.
```
nimble install libp2p
@@ -71,6 +71,10 @@ git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
nimble install -dy
```
You can use `nix develop` to start a shell with Nim and Nimble.
nimble 0.20.1 is required for running `testnative`. At time of writing, this is not available in nixpkgs: If using `nix develop`, follow up with `nimble install nimble`, and use that (typically `~/.nimble/bin/nimble`).
### Testing
Run unit tests:
```sh
@@ -97,6 +101,7 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
- **Join the Conversation**. Connect with other contributors in our [community channel](https://discord.com/channels/1204447718093750272/1351621032263417946). Ask questions, share ideas, get support, and stay informed about the latest updates from the maintainers.
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
@@ -119,6 +124,11 @@ Enable quic transport support
nim c -d:libp2p_quic_support some_file.nim
```
Enable autotls support
```bash
nim c -d:libp2p_autotls_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
@@ -190,7 +200,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `2.0 & 2.2`
## License

View File

@@ -1,5 +1,5 @@
# nim-libp2p examples
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
In this folder, you'll find the sources of the [nim-libp2p website](https://vacp2p.github.io/nim-libp2p/docs/)
We recommand to follow the tutorials on the website, but feel free to grok the sources here!

View File

@@ -1,3 +1,4 @@
{.used.}
## # Circuit Relay example
##
## Circuit Relay can be used when a node cannot reach another node

View File

@@ -1,3 +1,4 @@
{.used.}
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}

View File

@@ -1,3 +1,5 @@
{.used.}
import chronos # an efficient library for async
import stew/byteutils # various utils
import libp2p

View File

@@ -3,4 +3,4 @@
Welcome to the nim-libp2p documentation!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
the [full reference](https://vacp2p.github.io/nim-libp2p/master/libp2p.html).

View File

@@ -1,3 +1,4 @@
{.used.}
## # Simple ping tutorial
##
## Hi all, welcome to the first nim-libp2p tutorial!

View File

@@ -1,3 +1,4 @@
{.used.}
## # Custom protocol in libp2p
##
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.

View File

@@ -1,3 +1,4 @@
{.used.}
## # Protobuf usage
##
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.

View File

@@ -1,3 +1,4 @@
{.used.}
## # GossipSub
##
## In this tutorial, we'll build a simple GossipSub network

View File

@@ -1,3 +1,4 @@
{.used.}
## # Discovery Manager
##
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and

View File

@@ -1,3 +1,4 @@
{.used.}
## # Tron example
##
## In this tutorial, we will create a video game based on libp2p, using

27
flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1752620740,
"narHash": "sha256-f3pO+9lg66mV7IMmmIqG4PL3223TYMlnlw+pnpelbss=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "32a4e87942101f1c9f9865e04dc3ddb175f5f32e",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

34
flake.nix Normal file
View File

@@ -0,0 +1,34 @@
{
description = "nim-libp2p dev shell flake";
nixConfig = {
extra-substituters = [ "https://nix-cache.status.im/" ];
extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
};
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
};
outputs = { self, nixpkgs }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux" "armv7a-linux"
"x86_64-darwin" "aarch64-darwin"
"x86_64-windows"
];
forEach = nixpkgs.lib.genAttrs;
forAllSystems = forEach stableSystems;
pkgsFor = forEach stableSystems (
system: import nixpkgs { inherit system; }
);
in rec {
devShells = forAllSystems (system: {
default = pkgsFor.${system}.mkShell {
nativeBuildInputs = with pkgsFor.${system}; [
nim-2_2 nimble openssl.dev
];
};
});
};
}

View File

@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
FROM nimlang/nim:latest as builder
WORKDIR /workspace
@@ -7,11 +7,11 @@ COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs2 --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev

View File

@@ -1,18 +1,18 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
FROM nimlang/nim:latest as builder
WORKDIR /app
COPY .pinned libp2p.nimble nim-libp2p/
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs2 -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -1,16 +1,17 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.11.0"
version = "1.12.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
requires "nim >= 2.0.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7",
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results",
"https://github.com/vacp2p/nim-quic.git#fe02a9a5e33e538b8265161f443b9b6c5ec1774c",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
@@ -30,7 +31,7 @@ proc runTest(filename: string, moreoptions: string = "") =
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
exec excstr & " -r -d:libp2p_quic_support -d:libp2p_autotls_support tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
@@ -56,7 +57,7 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")
task testfilter, "Run PKI filter test":
runTest("testpkifilter")

View File

@@ -1,6 +1,6 @@
import options, sequtils, strutils, json, uri
import json, uri
from times import DateTime, parse
import chronos/apps/http/httpclient, jwt, results, bearssl/pem, chronicles
import chronos/apps/http/httpclient, results, chronicles
import ./utils
import ../../crypto/crypto
@@ -158,352 +158,376 @@ type ACMECertificateResponse* = object
rawCertificate*: string
certificateExpiry*: DateTime
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
type ACMECertificate* = object
rawCertificate*: string
certificateExpiry*: DateTime
certKeyPair*: KeyPair
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
when defined(libp2p_autotls_support):
import options, sequtils, strutils, jwt, bearssl/pem
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
proc new*(
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
): ACMEApi =
let session = HttpSessionRef.new()
ACMEApi(
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
)
proc getDirectory(
self: ACMEApi
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("getDirectory"):
self.directory.valueOr:
let acmeResponse = await self.get(self.acmeServerURL / "directory")
let directory = acmeResponse.body.to(ACMEDirectory)
self.directory = Opt.some(directory)
directory
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone():
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
kid: kid.get(),
)
method post*(
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
.}
method get*(
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
.}
proc createSignedAcmeRequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
proc new*(
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
): ACMEApi =
let session = HttpSessionRef.new()
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newAccount),
registerRequest,
key,
needsJwk = true,
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
ACMEApi(
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newOrder),
orderRequest,
key,
kid = Opt.some(kid),
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
order: acmeResponse.headers.keyOrError("location"),
)
proc getDirectory(
self: ACMEApi
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("getDirectory"):
self.directory.valueOr:
let acmeResponse = await self.get(self.acmeServerURL / "directory")
let directory = acmeResponse.body.to(ACMEDirectory)
self.directory = Opt.some(directory)
directory
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(parseUri(authorizations[0]))
acmeResponse.body.to(ACMEAuthorizationsResponse)
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestNewOrder(domains, key, kid)
if orderResponse.status != ACMEOrderStatus.PENDING and
orderResponse.status != ACMEOrderStatus.READY:
# ready is a valid status when renewing certs before expiry
raise newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone():
raise newException(ACMEError, "kid not set")
let authorizationsResponse =
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
if authorizationsResponse.challenges.len == 0:
raise newException(ACMEError, "No challenges received")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
return ACMEChallengeResponseWrapper(
finalize: orderResponse.finalize,
order: orderResponse.order,
dns01: authorizationsResponse.challenges.filterIt(
it.`type` == ACMEChallengeType.DNS01
)[0],
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
)
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc sendChallengeCompleted*(
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("sendChallengeCompleted"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.PENDING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.VALID:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalize, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.VALID:
return true
of ACMEOrderStatus.PROCESSING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
error "Failed certificate finalization",
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
return false # do not try again
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
kid: kid.get(),
)
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(order, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, order: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get(order)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, order: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(order)
handleError("downloadCertificate"):
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
proc createSignedAcmeRequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newAccount),
registerRequest,
key,
needsJwk = true,
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status,
kid: acmeResponse.headers.keyOrError("location"),
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newOrder),
orderRequest,
key,
kid = Opt.some(kid),
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
order: acmeResponse.headers.keyOrError("location"),
)
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(parseUri(authorizations[0]))
acmeResponse.body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.
async: (raises: [ACMEError, CancelledError])
.} =
let orderResponse = await self.requestNewOrder(domains, key, kid)
if orderResponse.status != ACMEOrderStatus.PENDING and
orderResponse.status != ACMEOrderStatus.READY:
# ready is a valid status when renewing certs before expiry
raise
newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
let authorizationsResponse =
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
if authorizationsResponse.challenges.len == 0:
raise newException(ACMEError, "No challenges received")
return ACMEChallengeResponseWrapper(
finalize: orderResponse.finalize,
order: orderResponse.order,
dns01: authorizationsResponse.challenges.filterIt(
it.`type` == ACMEChallengeType.DNS01
)[0],
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
)
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
await self.session.closeWait()
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus:
parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc sendChallengeCompleted*(
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("sendChallengeCompleted"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse =
await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.PENDING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.VALID:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
certKeyPair: KeyPair,
key: KeyPair,
kid: Kid,
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalize, %*{"csr": createCSR(domain, certKeyPair)}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalize, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.VALID:
return true
of ACMEOrderStatus.PROCESSING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
error "Failed certificate finalization",
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
return false # do not try again
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
order: Uri,
certKeyPair: KeyPair,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse =
await self.requestFinalize(domain, finalize, certKeyPair, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(order, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, order: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get(order)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, order: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(order)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
await self.session.closeWait()
else:
{.hint: "autotls disabled. Use -d:libp2p_autotls_support".}

View File

@@ -9,12 +9,9 @@
{.push raises: [].}
import uri
import chronos, results, chronicles, stew/byteutils
import ./api, ./utils
import chronicles
import ../../crypto/crypto
import ../../crypto/rsa
import ./api
export api
@@ -28,59 +25,74 @@ type ACMEClient* = ref object
logScope:
topics = "libp2p acme client"
proc new*(
T: typedesc[ACMEClient],
rng: ref HmacDrbgContext = newRng(),
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
key: Opt[KeyPair] = Opt.none(KeyPair),
kid: Kid = Kid(""),
): T {.raises: [].} =
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, rng[]).get()
T(api: api, key: key, kid: kid)
when defined(libp2p_autotls_support):
import uri
import chronos, results, stew/byteutils
import ../../crypto/rsa
import ./utils
proc getOrInitKid*(
self: ACMEClient
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
if self.kid.len == 0:
let registerResponse = await self.api.requestRegister(self.key)
self.kid = registerResponse.kid
return self.kid
proc new*(
T: typedesc[ACMEClient],
rng: ref HmacDrbgContext = newRng(),
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
key: Opt[KeyPair] = Opt.none(KeyPair),
kid: Kid = Kid(""),
): T {.raises: [].} =
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, rng[]).get()
T(api: api, key: key, kid: kid)
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
proc getOrInitKid*(
self: ACMEClient
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
if self.kid.len == 0:
let registerResponse = await self.api.requestRegister(self.key)
self.kid = registerResponse.kid
return self.kid
proc getChallenge*(
self: ACMEClient, domains: seq[api.Domain]
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
proc getCertificate*(
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let chalURL = parseUri(challenge.dns01.url)
let orderURL = parseUri(challenge.order)
let finalizeURL = parseUri(challenge.finalize)
trace "sending challenge completed notification"
discard
await self.api.sendChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
proc getChallenge*(
self: ACMEClient, domains: seq[api.Domain]
): Future[ACMEChallengeResponseWrapper] {.
async: (raises: [ACMEError, CancelledError])
.} =
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
trace "checking for completed challenge"
let completed =
await self.api.checkChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
if not completed:
raise
newException(ACMEError, "Failed to signal ACME server about challenge completion")
proc getCertificate*(
self: ACMEClient,
domain: api.Domain,
certKeyPair: KeyPair,
challenge: ACMEChallengeResponseWrapper,
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let chalURL = parseUri(challenge.dns01.url)
let orderURL = parseUri(challenge.order)
let finalizeURL = parseUri(challenge.finalize)
trace "Sending challenge completed notification"
discard await self.api.sendChallengeCompleted(
chalURL, self.key, await self.getOrInitKid()
)
trace "waiting for certificate to be finalized"
let finalized = await self.api.certificateFinalized(
domain, finalizeURL, orderURL, self.key, await self.getOrInitKid()
)
if not finalized:
raise newException(ACMEError, "Failed to finalize certificate for domain " & domain)
trace "Checking for completed challenge"
let completed = await self.api.checkChallengeCompleted(
chalURL, self.key, await self.getOrInitKid()
)
if not completed:
raise newException(
ACMEError, "Failed to signal ACME server about challenge completion"
)
trace "downloading certificate"
await self.api.downloadCertificate(orderURL)
trace "Waiting for certificate to be finalized"
let finalized = await self.api.certificateFinalized(
domain, finalizeURL, orderURL, certKeyPair, self.key, await self.getOrInitKid()
)
if not finalized:
raise
newException(ACMEError, "Failed to finalize certificate for domain " & domain)
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
await self.api.close()
trace "Downloading certificate"
await self.api.downloadCertificate(orderURL)
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
await self.api.close()

View File

@@ -21,19 +21,20 @@ proc new*(
acmeServerURL: parseUri(LetsEncryptURL),
)
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return $self.acmeServerURL & "/acme/1234"
when defined(libp2p_autotls_support):
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return $self.acmeServerURL & "/acme/1234"
method post*(
self: MockACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method post*(
self: MockACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)

View File

@@ -1,67 +1,73 @@
import base64, strutils, chronos/apps/http/httpclient, json
import ../../errors
import ../../transports/tls/certificate_ffi
import ../../transports/tls/certificate
import ../../crypto/crypto
import ../../crypto/rsa
type ACMEError* = object of LPError
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
when defined(libp2p_autotls_support):
import base64, strutils, chronos/apps/http/httpclient, json
import ../../transports/tls/certificate_ffi
import ../../transports/tls/certificate
import ../../crypto/crypto
import ../../crypto/rsa
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
proc thumbprint*(key: KeyPair): string =
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
let n = base64UrlEncode(nArray)
let e = base64UrlEncode(eArray)
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
let digest = sha256.digest($keyJson)
return base64UrlEncode(@(digest.data))
proc thumbprint*(key: KeyPair): string =
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let bodyBytes = await response.getBodyBytes()
if bodyBytes.len > 0:
return bytesToString(bodyBytes).parseJson()
return %*{} # empty body
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
except Exception as exc: # this is required for nim 1.6
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
let n = base64UrlEncode(nArray)
let e = base64UrlEncode(eArray)
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
let digest = sha256.digest($keyJson)
return base64UrlEncode(@(digest.data))
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let bodyBytes = await response.getBodyBytes()
if bodyBytes.len > 0:
return bytesToString(bodyBytes).parseJson()
return %*{} # empty body
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(
ACMEError, "Unexpected error occurred while getting body bytes", exc
)
except Exception as exc: # this is required for nim 1.6
raise newException(
ACMEError, "Unexpected error occurred while getting body bytes", exc
)
let personalizationStr = "libp2p_autotls"
if cert_init_drbg(
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to initialize certCtx")
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to generate cert key")
proc createCSR*(
domain: string, certKeyPair: KeyPair
): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")
# convert KeyPair to cert_key_t
let rawSeckey: seq[byte] = certKeyPair.seckey.getRawBytes.valueOr:
raise newException(ACMEError, "Failed to get seckey raw bytes (DER)")
let seckeyBuffer = rawSeckey.toCertBuffer()
if cert_new_key_t(seckeyBuffer.unsafeAddr, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to convert key pair to cert_key_t")
base64.encode(derCSR.toSeq, safe = true)
# create CSR
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")
base64.encode(derCSR.toSeq, safe = true)

View File

@@ -0,0 +1,33 @@
when defined(libp2p_autotls_support):
import ./service, ./acme/client, ../peeridauth/client
import ../crypto/crypto, ../crypto/rsa, websock/websock
type MockAutotlsService* = ref object of AutotlsService
mockedCert*: TLSCertificate
mockedKey*: TLSPrivateKey
proc new*(
T: typedesc[MockAutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient:
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
running: newAsyncEvent(),
config: config,
rng: rng,
)
method getCertWhenReady*(
self: MockAutotlsService
): Future[AutotlsCert] {.async: (raises: [AutoTLSError, CancelledError]).} =
AutotlsCert.new(self.mockedCert, self.mockedKey, Moment.now)
method setup*(self: MockAutotlsService) {.base, async.} =
self.running.fire()

View File

@@ -10,19 +10,17 @@
{.push raises: [].}
{.push public.}
import net, results, json, sequtils
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
import chronos, chronicles, net, results
import chronos/apps/http/httpclient, bearssl/rand
import
./acme/client,
./utils,
../crypto/crypto,
../nameresolving/dnsresolver,
../nameresolving/nameresolver,
../peeridauth/client,
../peerinfo,
../switch,
../utils/heartbeat,
../peerinfo,
../wire
logScope:
@@ -40,6 +38,9 @@ const
DefaultRenewCheckTime* = 1.hours
DefaultRenewBufferTime = 1.hours
DefaultIssueRetries = 3
DefaultIssueRetryTime = 1.seconds
AutoTLSBroker* = "registration.libp2p.direct"
AutoTLSDNSServer* = "libp2p.direct"
HttpOk* = 200
@@ -53,171 +54,238 @@ type SigParam = object
type AutotlsCert* = ref object
cert*: TLSCertificate
privkey*: TLSPrivateKey
expiry*: Moment
type AutotlsConfig* = ref object
acmeServerURL*: Uri
dnsResolver*: DnsResolver
nameResolver*: NameResolver
ipAddress: Opt[IpAddress]
renewCheckTime*: Duration
renewBufferTime*: Duration
issueRetries*: int
issueRetryTime*: Duration
type AutotlsService* = ref object of Service
acmeClient: ACMEClient
acmeClient*: ACMEClient
brokerClient*: PeerIDAuthClient
bearer*: Opt[BearerToken]
brokerClient: PeerIDAuthClient
cert*: Opt[AutotlsCert]
certReady*: AsyncEvent
config: AutotlsConfig
running*: AsyncEvent
config*: AutotlsConfig
managerFut: Future[void]
peerInfo: PeerInfo
rng: ref HmacDrbgContext
rng*: ref HmacDrbgContext
proc new*(T: typedesc[AutotlsCert], cert: TLSCertificate, expiry: Moment): T =
T(cert: cert, expiry: expiry)
when defined(libp2p_autotls_support):
import json, sequtils, bearssl/pem
proc getCertWhenReady*(
self: AutotlsService
): Future[TLSCertificate] {.async: (raises: [AutoTLSError, CancelledError]).} =
await self.certReady.wait()
return self.cert.get.cert
import
../crypto/rsa,
../utils/heartbeat,
../transports/transport,
../transports/tcptransport,
../nameresolving/dnsresolver
proc new*(
T: typedesc[AutotlsConfig],
ipAddress: Opt[IpAddress] = NoneIp,
nameServers: seq[TransportAddress] = DefaultDnsServers,
acmeServerURL: Uri = parseUri(LetsEncryptURL),
renewCheckTime: Duration = DefaultRenewCheckTime,
renewBufferTime: Duration = DefaultRenewBufferTime,
): T =
T(
dnsResolver: DnsResolver.new(nameServers),
acmeServerURL: acmeServerURL,
ipAddress: ipAddress,
renewCheckTime: renewCheckTime,
renewBufferTime: renewBufferTime,
)
proc new*(
T: typedesc[AutotlsCert],
cert: TLSCertificate,
privkey: TLSPrivateKey,
expiry: Moment,
): T =
T(cert: cert, privkey: privkey, expiry: expiry)
proc new*(
T: typedesc[AutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient: ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
config: config,
managerFut: nil,
peerInfo: nil,
rng: rng,
)
method getCertWhenReady*(
self: AutotlsService
): Future[AutotlsCert] {.base, async: (raises: [AutoTLSError, CancelledError]).} =
await self.certReady.wait()
return self.cert.get
method setup*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
trace "Setting up AutotlsService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
self.peerInfo = switch.peerInfo
if self.config.ipAddress.isNone():
try:
self.config.ipAddress = Opt.some(getPublicIPAddress())
except AutoTLSError as exc:
error "Failed to get public IP address", err = exc.msg
return false
self.managerFut = self.run(switch)
return hasBeenSetup
method issueCertificate(
self: AutotlsService
) {.base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError]).} =
trace "Issuing certificate"
assert not self.peerInfo.isNil(), "Cannot issue new certificate: peerInfo not set"
# generate autotls domain string: "*.{peerID}.libp2p.direct"
let baseDomain =
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
let domain = api.Domain("*." & baseDomain)
let acmeClient = self.acmeClient
trace "Requesting ACME challenge"
let dns01Challenge = await acmeClient.getChallenge(@[domain])
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
let strMultiaddresses: seq[string] = self.peerInfo.addrs.mapIt($it)
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
trace "Sending challenge to AutoTLS broker"
let (bearer, response) =
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
if self.bearer.isNone():
# save bearer token for future
self.bearer = Opt.some(bearer)
if response.status != HttpOk:
raise newException(
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
proc new*(
T: typedesc[AutotlsConfig],
ipAddress: Opt[IpAddress] = NoneIp,
nameServers: seq[TransportAddress] = DefaultDnsServers,
acmeServerURL: Uri = parseUri(LetsEncryptURL),
renewCheckTime: Duration = DefaultRenewCheckTime,
renewBufferTime: Duration = DefaultRenewBufferTime,
issueRetries: int = DefaultIssueRetries,
issueRetryTime: Duration = DefaultIssueRetryTime,
): T =
T(
nameResolver: DnsResolver.new(nameServers),
acmeServerURL: acmeServerURL,
ipAddress: ipAddress,
renewCheckTime: renewCheckTime,
renewBufferTime: renewBufferTime,
issueRetries: issueRetries,
issueRetryTime: issueRetryTime,
)
debug "Waiting for DNS record to be set"
let dnsSet = await checkDNSRecords(
self.config.dnsResolver, self.config.ipAddress.get(), baseDomain, keyAuth
)
if not dnsSet:
raise newException(AutoTLSError, "DNS records not set")
proc new*(
T: typedesc[AutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient:
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
running: newAsyncEvent(),
config: config,
managerFut: nil,
peerInfo: nil,
rng: rng,
)
debug "Notifying challenge completion to ACME and downloading cert"
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
method setup*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
trace "Setting up AutotlsService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.config.ipAddress.isNone():
try:
self.config.ipAddress = Opt.some(getPublicIPAddress())
except AutoTLSError as exc:
error "Failed to get public IP address", err = exc.msg
return false
self.managerFut = self.run(switch)
return hasBeenSetup
debug "Installing certificate"
let newCert =
try:
AutotlsCert.new(
TLSCertificate.init(certResponse.rawCertificate),
asMoment(certResponse.certificateExpiry),
)
except TLSStreamProtocolError:
raise newException(AutoTLSError, "Could not parse downloaded certificates")
self.cert = Opt.some(newCert)
self.certReady.fire()
debug "Certificate installed"
method issueCertificate(
self: AutotlsService
): Future[bool] {.
base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError])
.} =
trace "Issuing certificate"
method run*(
self: AutotlsService, switch: Switch
) {.async: (raises: [CancelledError]).} =
heartbeat "Certificate Management", self.config.renewCheckTime:
if self.cert.isNone():
if self.peerInfo.isNil():
error "Cannot issue new certificate: peerInfo not set"
return false
# generate autotls domain string: "*.{peerID}.libp2p.direct"
let baseDomain =
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
let domain = api.Domain("*." & baseDomain)
let acmeClient = self.acmeClient
trace "Requesting ACME challenge"
let dns01Challenge = await acmeClient.getChallenge(@[domain])
trace "Generating key authorization"
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
let addrs = await self.peerInfo.expandAddrs()
if addrs.len == 0:
error "Unable to authenticate with broker: no addresses"
return false
let strMultiaddresses: seq[string] = addrs.mapIt($it)
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
trace "Sending challenge to AutoTLS broker"
let (bearer, response) =
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
if self.bearer.isNone():
# save bearer token for future
self.bearer = Opt.some(bearer)
if response.status != HttpOk:
error "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
debug "Broker message",
body = bytesToString(response.body), peerinfo = self.peerInfo
return false
let dashedIpAddr = ($self.config.ipAddress.get()).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
debug "Waiting for DNS record to be set", ip = ip4Domain, acme = acmeChalDomain
let dnsSet = await checkDNSRecords(
self.config.nameResolver, self.config.ipAddress.get(), baseDomain, keyAuth
)
if not dnsSet:
error "DNS records not set"
return false
trace "Notifying challenge completion to ACME and downloading cert"
let certKeyPair = KeyPair.random(PKScheme.RSA, self.rng[]).get()
let certificate =
await acmeClient.getCertificate(domain, certKeyPair, dns01Challenge)
let derPrivKey = certKeyPair.seckey.rsakey.getBytes.valueOr:
raise newException(AutoTLSError, "Unable to get TLS private key")
let pemPrivKey: string = derPrivKey.pemEncode("PRIVATE KEY")
debug "autotls cert", pemPrivKey = pemPrivKey, cert = certificate.rawCertificate
trace "Installing certificate"
let newCert =
try:
await self.issueCertificate()
AutotlsCert.new(
TLSCertificate.init(certificate.rawCertificate),
TLSPrivateKey.init(pemPrivKey),
asMoment(certificate.certificateExpiry),
)
except TLSStreamProtocolError:
error "Could not parse downloaded certificates"
return false
self.cert = Opt.some(newCert)
self.certReady.fire()
trace "Certificate installed"
true
proc hasTcpStarted(switch: Switch): bool =
switch.transports.filterIt(it of TcpTransport and it.running).len == 0
proc tryIssueCertificate(self: AutotlsService) {.async: (raises: [CancelledError]).} =
for _ in 0 ..< self.config.issueRetries:
try:
if await self.issueCertificate():
return
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to issue certificate", err = exc.msg
break
await sleepAsync(self.config.issueRetryTime)
error "Failed to issue certificate"
# AutotlsService will renew the cert 1h before it expires
let cert = self.cert.get
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
if waitTime <= self.config.renewBufferTime:
try:
await self.issueCertificate()
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to renew certificate", err = exc.msg
break
method run*(
self: AutotlsService, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Starting Autotls management"
self.running.fire()
self.peerInfo = switch.peerInfo
method stop*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
await self.acmeClient.close()
await self.brokerClient.close()
await self.managerFut.cancelAndWait()
self.managerFut = nil
return hasBeenStopped
# ensure that there's at least one TcpTransport running
# for communicating with autotls broker
if switch.hasTcpStarted():
error "Could not find a running TcpTransport in switch"
return
heartbeat "Certificate Management", self.config.renewCheckTime:
if self.cert.isNone():
await self.tryIssueCertificate()
# AutotlsService will renew the cert 1h before it expires
let cert = self.cert.get
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
if waitTime <= self.config.renewBufferTime:
await self.tryIssueCertificate()
method stop*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
if not self.acmeClient.isNil():
await self.acmeClient.close()
if not self.brokerClient.isNil():
await self.brokerClient.close()
if not self.managerFut.isNil():
await self.managerFut.cancelAndWait()
self.managerFut = nil
return hasBeenStopped

View File

@@ -6,104 +6,107 @@
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import net, strutils
from times import DateTime, toTime, toUnix
import chronos, chronicles
import ../errors
import chronos, stew/base36, chronicles
import
./acme/client,
../errors,
../peerid,
../multihash,
../cid,
../multicodec,
../nameresolving/dnsresolver
logScope:
topics = "libp2p utils"
const
DefaultDnsRetries = 10
DefaultDnsRetries = 3
DefaultDnsRetryTime = 1.seconds
type AutoTLSError* = object of LPError
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
# This is so that we don't need to catch Exceptions directly
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
try:
return getPrimaryIPAddr()
except Exception as exc:
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
when defined(libp2p_autotls_support):
import net, strutils
from times import DateTime, toTime, toUnix
import stew/base36
import
../peerid,
../multihash,
../cid,
../multicodec,
../nameresolving/nameresolver,
./acme/client
proc isIPv4*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv4
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
let ip = $ip
try:
not (
ip.startsWith("10.") or
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
)
except ValueError as exc:
raise newException(AutoTLSError, "Failed to parse IP address", exc)
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
let ip = checkedGetPrimaryIPAddr()
if not ip.isIPv4():
raise newException(AutoTLSError, "Host does not have an IPv4 address")
if not ip.isPublic():
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
return ip
proc asMoment*(dt: DateTime): Moment =
let unixTime: int64 = dt.toTime.toUnix
return Moment.init(unixTime, Second)
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
var mh: MultiHash
let decodeResult = MultiHash.decode(peerId.data, mh)
if decodeResult.isErr() or decodeResult.get() == -1:
raise
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
if cidResult.isErr():
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
return Base36.encode(cidResult.get().data.buffer)
proc checkDNSRecords*(
dnsResolver: DnsResolver,
ipAddress: IpAddress,
baseDomain: api.Domain,
keyAuth: KeyAuthorization,
retries: int = DefaultDnsRetries,
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
# if my ip address is 100.10.10.3 then the ip4Domain will be:
# 100-10-10-3.{peerIdBase36}.libp2p.direct
# and acme challenge TXT domain will be:
# _acme-challenge.{peerIdBase36}.libp2p.direct
let dashedIpAddr = ($ipAddress).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
var txt: seq[string]
var ip4: seq[TransportAddress]
for _ in 0 .. retries:
txt = await dnsResolver.resolveTxt(acmeChalDomain)
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
# This is so that we don't need to catch Exceptions directly
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
try:
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to resolve IP", description = exc.msg # retry
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
return true
await sleepAsync(DefaultDnsRetryTime)
return getPrimaryIPAddr()
except Exception as exc:
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
return false
proc isIPv4*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv4
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
let ip = $ip
try:
not (
ip.startsWith("10.") or
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
)
except ValueError as exc:
raise newException(AutoTLSError, "Failed to parse IP address", exc)
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
let ip = checkedGetPrimaryIPAddr()
if not ip.isIPv4():
raise newException(AutoTLSError, "Host does not have an IPv4 address")
if not ip.isPublic():
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
return ip
proc asMoment*(dt: DateTime): Moment =
let unixTime: int64 = dt.toTime.toUnix
return Moment.init(unixTime, Second)
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
var mh: MultiHash
let decodeResult = MultiHash.decode(peerId.data, mh)
if decodeResult.isErr() or decodeResult.get() == -1:
raise
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
if cidResult.isErr():
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
return Base36.encode(cidResult.get().data.buffer)
proc checkDNSRecords*(
nameResolver: NameResolver,
ipAddress: IpAddress,
baseDomain: api.Domain,
keyAuth: KeyAuthorization,
retries: int = DefaultDnsRetries,
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
# if my ip address is 100.10.10.3 then the ip4Domain will be:
# 100-10-10-3.{peerIdBase36}.libp2p.direct
# and acme challenge TXT domain will be:
# _acme-challenge.{peerIdBase36}.libp2p.direct
let dashedIpAddr = ($ipAddress).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
var txt: seq[string]
var ip4: seq[TransportAddress]
for _ in 0 .. retries:
txt = await nameResolver.resolveTxt(acmeChalDomain)
try:
ip4 = await nameResolver.resolveIp(ip4Domain, 0.Port)
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to resolve IP", description = exc.msg # retry
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
return true
await sleepAsync(DefaultDnsRetryTime)
return false

View File

@@ -15,7 +15,7 @@ runnableExamples:
{.push raises: [].}
import options, tables, chronos, chronicles, sequtils, uri
import options, tables, chronos, chronicles, sequtils
import
switch,
peerid,
@@ -43,9 +43,16 @@ export
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} = proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport {.gcsafe, raises: [].}
TransportProvider* {.deprecated: "Use TransportBuilder instead".} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
TransportBuilder* {.public.} =
proc(config: TransportConfig): Transport {.gcsafe, raises: [].}
TransportConfig* = ref object
upgr*: Upgrade
privateKey*: PrivateKey
autotls*: AutotlsService
SecureProtocol* {.pure.} = enum
Noise
@@ -55,7 +62,7 @@ type
addresses: seq[MultiAddress]
secureManagers: seq[SecureProtocol]
muxers: seq[MuxerProvider]
transports: seq[TransportProvider]
transports: seq[TransportBuilder]
rng: ref HmacDrbgContext
maxConnections: int
maxIn: int
@@ -152,28 +159,42 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
b
proc withTransport*(
b: SwitchBuilder, prov: TransportProvider
b: SwitchBuilder, prov: TransportBuilder
): SwitchBuilder {.public.} =
## Use a custom transport
runnableExamples:
let switch = SwitchBuilder
.new()
.withTransport(
proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport =
TcpTransport.new(flags, upgr)
proc(config: TransportConfig): Transport =
TcpTransport.new(flags, config.upgr)
)
.build()
b.transports.add(prov)
b
proc withTransport*(
b: SwitchBuilder, prov: TransportProvider
): SwitchBuilder {.deprecated: "Use TransportBuilder instead".} =
## Use a custom transport
runnableExamples:
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
let tBuilder: TransportBuilder = proc(config: TransportConfig): Transport =
prov(config.upgr, config.privateKey)
b.withTransport(tBuilder)
proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
TcpTransport.new(flags, upgr)
proc(config: TransportConfig): Transport =
TcpTransport.new(flags, config.upgr)
)
proc withWsTransport*(
@@ -184,8 +205,10 @@ proc withWsTransport*(
flags: set[ServerFlags] = {},
): SwitchBuilder =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
proc(config: TransportConfig): Transport =
WsTransport.new(
config.upgr, tlsPrivateKey, tlsCertificate, config.autotls, tlsFlags, flags
)
)
when defined(libp2p_quic_support):
@@ -193,14 +216,14 @@ when defined(libp2p_quic_support):
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
QuicTransport.new(upgr, privateKey)
proc(config: TransportConfig): Transport =
QuicTransport.new(config.upgr, config.privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
MemoryTransport.new(upgr)
proc(config: TransportConfig): Transport =
MemoryTransport.new(config.upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
@@ -257,11 +280,12 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
b.autonat = true
b
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
): SwitchBuilder {.public.} =
b.autotls = AutotlsService.new(config = config)
b
when defined(libp2p_autotls_support):
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
): SwitchBuilder {.public.} =
b.autotls = AutotlsService.new(config = config)
b
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
b.circuitRelay = r
@@ -320,7 +344,11 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade, seckey, b.autotls))
transports.add(
tProvider(
TransportConfig(upgr: muxedUpgrade, privateKey: seckey, autotls: b.autotls)
)
)
transports
if b.secureManagers.len == 0:

View File

@@ -15,6 +15,7 @@
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
import ./utils/sequninit
export results
@@ -123,7 +124,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
return err(CidError.Incorrect)
if len(data) == 46:
if data[0] == 'Q' and data[1] == 'm':
buffer = newSeqUninitialized[byte](BTCBase58.decodedLength(len(data)))
buffer = newSeqUninit[byte](BTCBase58.decodedLength(len(data)))
if BTCBase58.decode(data, buffer, plen) != Base58Status.Success:
return err(CidError.Incorrect)
buffer.setLen(plen)
@@ -131,7 +132,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
let length = MultiBase.decodedLength(data[0], len(data))
if length == -1:
return err(CidError.Incorrect)
buffer = newSeqUninitialized[byte](length)
buffer = newSeqUninit[byte](length)
if MultiBase.decode(data, buffer, plen) != MultiBaseStatus.Success:
return err(CidError.Incorrect)
buffer.setLen(plen)

View File

@@ -11,6 +11,7 @@
{.push raises: [].}
from strutils import split, strip, cmpIgnoreCase
import ../utils/sequninit
const libp2p_pki_schemes* {.strdefine.} = "rsa,ed25519,secp256k1,ecnist"
@@ -176,7 +177,7 @@ proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
if x.len == 0:
return
var randValues = newSeqUninitialized[byte](len(x) * 2)
var randValues = newSeqUninit[byte](len(x) * 2)
hmacDrbgGenerate(rng[], randValues)
for i in countdown(x.high, 1):
@@ -873,7 +874,7 @@ proc stretchKeys*(
var seed = "key expansion"
result.macsize = 20
let length = result.ivsize + result.keysize + result.macsize
result.data = newSeqUninitialized[byte](2 * length)
result.data = newSeqUninit[byte](2 * length)
if hashType == "SHA256":
makeSecret(result.data, HMAC[sha256], sharedSecret, seed)
@@ -904,7 +905,7 @@ template macOpenArray*(secret: Secret, id: int): untyped =
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
## Get array of bytes with with initial vector.
result = newSeqUninitialized[byte](secret.ivsize)
result = newSeqUninit[byte](secret.ivsize)
var offset =
if id == 0:
0
@@ -913,7 +914,7 @@ proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeqUninitialized[byte](secret.keysize)
result = newSeqUninit[byte](secret.keysize)
var offset =
if id == 0:
0
@@ -923,7 +924,7 @@ proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeqUninitialized[byte](secret.macsize)
result = newSeqUninit[byte](secret.macsize)
var offset =
if id == 0:
0

View File

@@ -23,6 +23,7 @@ import minasn1
export minasn1.Asn1Error
import stew/ctops
import results
import ../utils/sequninit
import ../utility
@@ -458,7 +459,7 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
if isNil(seckey):
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?seckey.toBytes(res)
res.setLen(length)
discard ?seckey.toBytes(res)
@@ -471,7 +472,7 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
if isNil(pubkey):
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?pubkey.toBytes(res)
res.setLen(length)
discard ?pubkey.toBytes(res)
@@ -483,7 +484,7 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
## Serialize EC signature ``sig`` to ASN.1 DER binary form and return it.
if isNil(sig):
return err(EcSignatureError)
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?sig.toBytes(res)
res.setLen(length)
discard ?sig.toBytes(res)
@@ -494,7 +495,7 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
if isNil(seckey):
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?seckey.toRawBytes(res)
res.setLen(length)
discard ?seckey.toRawBytes(res)
@@ -507,7 +508,7 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
if isNil(pubkey):
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?pubkey.toRawBytes(res)
res.setLen(length)
discard ?pubkey.toRawBytes(res)
@@ -519,7 +520,7 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
## Serialize EC signature ``sig`` to raw binary form and return it.
if isNil(sig):
return err(EcSignatureError)
var res = newSeqUninitialized[byte](0)
var res = newSeqUninit[byte](0)
let length = ?sig.toBytes(res)
res.setLen(length)
discard ?sig.toBytes(res)
@@ -929,7 +930,7 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
var data: array[Secret521Length, byte]
let res = toSecret(pubkey, seckey, data)
if res > 0:
result = newSeqUninitialized[byte](res)
result = newSeqUninit[byte](res)
copyMem(addr result[0], addr data[0], res)
proc sign*[T: byte | char](
@@ -943,7 +944,7 @@ proc sign*[T: byte | char](
var impl = ecGetDefault()
if seckey.key.curve in EcSupportedCurvesCint:
var sig = new EcSignature
sig.buffer = newSeqUninitialized[byte](256)
sig.buffer = newSeqUninit[byte](256)
var kv = addr sha256Vtable
kv.init(addr hc.vtable)
if len(message) > 0:

View File

@@ -17,6 +17,7 @@ export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import ../utils/sequninit
type
Asn1Error* {.pure.} = enum
@@ -679,15 +680,15 @@ proc init*(t: typedesc[Asn1Buffer], data: string): Asn1Buffer =
proc init*(t: typedesc[Asn1Buffer]): Asn1Buffer =
## Initialize empty ``Asn1Buffer``.
Asn1Buffer(buffer: newSeqUninitialized[byte](0))
Asn1Buffer(buffer: newSeqUninit[byte](0))
proc init*(t: typedesc[Asn1Composite], tag: Asn1Tag): Asn1Composite =
## Initialize ``Asn1Composite`` with tag ``tag``.
Asn1Composite(tag: tag, buffer: newSeqUninitialized[byte](0))
Asn1Composite(tag: tag, buffer: newSeqUninit[byte](0))
proc init*(t: typedesc[Asn1Composite], idx: int): Asn1Composite =
## Initialize ``Asn1Composite`` with tag context-specific id ``id``.
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninitialized[byte](0))
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninit[byte](0))
proc `$`*(buffer: Asn1Buffer): string =
## Return string representation of ``buffer``.

View File

@@ -21,6 +21,7 @@ import results
import stew/ctops
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utils/sequninit
export Asn1Error, results
@@ -124,7 +125,7 @@ proc random*[T: RsaKP](
length = eko + ((bits + 7) shr 3)
let res = new T
res.buffer = newSeqUninitialized[byte](length)
res.buffer = newSeqUninit[byte](length)
var keygen = rsaKeygenGetDefault()
@@ -169,7 +170,7 @@ proc copy*[T: RsaPKI](key: T): T =
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
key.pubk.elen.uint + key.pexplen.uint
result = new RsaPrivateKey
result.buffer = newSeqUninitialized[byte](length)
result.buffer = newSeqUninit[byte](length)
let po: uint = 0
let qo = po + key.seck.plen
let dpo = qo + key.seck.qlen
@@ -207,7 +208,7 @@ proc copy*[T: RsaPKI](key: T): T =
if len(key.buffer) > 0:
let length = key.key.nlen + key.key.elen
result = new RsaPublicKey
result.buffer = newSeqUninitialized[byte](length)
result.buffer = newSeqUninit[byte](length)
let no = 0
let eo = no + key.key.nlen
copyMem(addr result.buffer[no], key.key.n, key.key.nlen)
@@ -226,7 +227,7 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
doAssert(not isNil(key))
let length = key.pubk.nlen + key.pubk.elen
result = new RsaPublicKey
result.buffer = newSeqUninitialized[byte](length)
result.buffer = newSeqUninit[byte](length)
result.key.n = addr result.buffer[0]
result.key.e = addr result.buffer[key.pubk.nlen]
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
@@ -357,7 +358,7 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
## return it.
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeqUninitialized[byte](4096)
var res = newSeqUninit[byte](4096)
let length = ?key.toBytes(res)
if length > 0:
res.setLen(length)
@@ -370,7 +371,7 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
## return it.
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeqUninitialized[byte](4096)
var res = newSeqUninit[byte](4096)
let length = ?key.toBytes(res)
if length > 0:
res.setLen(length)
@@ -382,7 +383,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
## Serialize RSA signature ``sig`` to raw binary form and return it.
if isNil(sig):
return err(RsaSignatureError)
var res = newSeqUninitialized[byte](4096)
var res = newSeqUninit[byte](4096)
let length = ?sig.toBytes(res)
if length > 0:
res.setLen(length)
@@ -753,7 +754,7 @@ proc sign*[T: byte | char](
var hash: array[32, byte]
let impl = rsaPkcs1SignGetDefault()
var res = new RsaSignature
res.buffer = newSeqUninitialized[byte]((key.seck.nBitlen + 7) shr 3)
res.buffer = newSeqUninit[byte]((key.seck.nBitlen + 7) shr 3)
var kv = addr sha256Vtable
kv.init(addr hc.vtable)
if len(message) > 0:

View File

@@ -11,6 +11,7 @@
import bearssl/rand
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
import ../utils/sequninit
export sha2, results, rand
@@ -182,7 +183,7 @@ proc getBytes*(key: SkPublicKey): seq[byte] {.inline.} =
proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
## Serialize Secp256k1 `signature` and return it.
result = newSeqUninitialized[byte](72)
result = newSeqUninit[byte](72)
let length = toBytes(sig, result)
result.setLen(length)

View File

@@ -15,6 +15,7 @@ import pkg/[chronos, chronicles]
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
import ../crypto/crypto, ../utility
import ../utils/sequninit
export peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
@@ -496,7 +497,7 @@ proc recvMessage(
size: uint
length: int
res: VarintResult[void]
var buffer = newSeqUninitialized[byte](10)
var buffer = newSeqUninit[byte](10)
try:
for i in 0 ..< len(buffer):
await conn.readExactly(addr buffer[i], 1)
@@ -957,7 +958,7 @@ proc openStream*(
var res: seq[byte]
if pb.getRequiredField(ResponseType.STREAMINFO.int, res).isOk():
let resPb = initProtoBuffer(res)
var raddress = newSeqUninitialized[byte](0)
var raddress = newSeqUninit[byte](0)
stream.protocol = ""
resPb.getRequiredField(1, stream.peer).tryGet()
resPb.getRequiredField(2, raddress).tryGet()
@@ -976,7 +977,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
var message = await transp.recvMessage()
var pb = initProtoBuffer(message)
var stream = new P2PStream
var raddress = newSeqUninitialized[byte](0)
var raddress = newSeqUninit[byte](0)
stream.protocol = ""
pb.getRequiredField(1, stream.peer).tryGet()
pb.getRequiredField(2, raddress).tryGet()
@@ -1115,7 +1116,7 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError
raise newException(DaemonLocalError, "Missing required field `peer`!")
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte] {.raises: [DaemonLocalError].} =
result = newSeqUninitialized[byte](0)
result = newSeqUninit[byte](0)
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
@@ -1452,8 +1453,8 @@ proc pubsubPublish*(
await api.closeConnection(transp)
proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
result.data = newSeqUninitialized[byte](0)
result.seqno = newSeqUninitialized[byte](0)
result.data = newSeqUninit[byte](0)
result.seqno = newSeqUninit[byte](0)
discard pb.getField(1, result.peer)
discard pb.getField(2, result.data)
discard pb.getField(3, result.seqno)

View File

@@ -104,12 +104,13 @@ proc expandDnsAddr(
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
if not DNSADDR.matchPartial(address):
if not DNS.matchPartial(address):
return @[(address, peerId)]
if isNil(self.nameResolver):
info "Can't resolve DNSADDR without NameResolver", ma = address
return @[]
trace "Start trying to resolve addresses"
let
toResolve =
if peerId.isSome:
@@ -121,6 +122,9 @@ proc expandDnsAddr(
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
debug "resolved addresses",
originalAddresses = toResolve, resolvedAddresses = resolved
for resolvedAddress in resolved:
let lastPart = resolvedAddress[^1].tryGet()
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
@@ -145,7 +149,6 @@ proc dialAndUpgrade(
for rawAddress in addrs:
# resolve potential dnsaddr
let addresses = await self.expandDnsAddr(peerId, rawAddress)
for (expandedAddress, addrPeerId) in addresses:
# DNS resolution
let
@@ -156,6 +159,11 @@ proc dialAndUpgrade(
else:
await self.nameResolver.resolveMAddress(expandedAddress)
debug "Expanded address and hostname",
expandedAddress = expandedAddress,
hostname = hostname,
resolvedAddresses = resolvedAddresses
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):

View File

@@ -159,7 +159,7 @@ proc stop*(query: DiscoveryQuery) =
query.finished = true
for r in query.futs:
if not r.finished():
r.cancel()
r.cancelSoon()
proc stop*(dm: DiscoveryManager) =
for q in dm.queries:
@@ -167,7 +167,7 @@ proc stop*(dm: DiscoveryManager) =
for i in dm.interfaces:
if isNil(i.advertiseLoop):
continue
i.advertiseLoop.cancel()
i.advertiseLoop.cancelSoon()
proc getPeer*(
query: DiscoveryQuery
@@ -179,7 +179,7 @@ proc getPeer*(
try:
await getter or allFinished(query.futs)
except CancelledError as exc:
getter.cancel()
getter.cancelSoon()
raise exc
if not finished(getter):

View File

@@ -27,7 +27,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
quote:
for res in `futs`:
if res.failed:
let exc = res.readError()
let exc = res.error
# We still don't abort but warn
debug "A future has failed, enable trace logging for details",
error = exc.name
@@ -37,7 +37,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
for res in `futs`:
block check:
if res.failed:
let exc = res.readError()
let exc = res.error
for i in 0 ..< `nexclude`:
if exc of `exclude`[i]:
trace "A future has failed", error = exc.name, description = exc.msg

View File

@@ -27,6 +27,7 @@ import
utility
import stew/[base58, base32, endians2]
export results, vbuffer, errors, utility
import ./utils/sequninit
logScope:
topics = "libp2p multiaddress"
@@ -223,7 +224,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
proc p2pBtS(vb: var VBuffer, s: var string): bool =
## P2P address bufferToString() implementation.
var address = newSeqUninitialized[byte](0)
var address = newSeqUninit[byte](0)
if vb.readSeq(address) > 0:
var mh: MultiHash
if MultiHash.decode(address, mh).isOk:
@@ -232,7 +233,7 @@ proc p2pBtS(vb: var VBuffer, s: var string): bool =
proc p2pVB(vb: var VBuffer): bool =
## P2P address validateBuffer() implementation.
var address = newSeqUninitialized[byte](0)
var address = newSeqUninit[byte](0)
if vb.readSeq(address) > 0:
var mh: MultiHash
if MultiHash.decode(address, mh).isOk:
@@ -555,7 +556,7 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
##
## If current MultiAddress do not have argument value, then result array will
## be empty.
var buffer = newSeqUninitialized[byte](len(ma.data.buffer))
var buffer = newSeqUninit[byte](len(ma.data.buffer))
let res = ?protoArgument(ma, buffer)
buffer.setLen(res)
ok(buffer)
@@ -569,7 +570,7 @@ proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
var header: uint64
var data = newSeqUninitialized[byte](0)
var data = newSeqUninit[byte](0)
var offset = 0
var vb = ma
var res: MultiAddress
@@ -643,7 +644,7 @@ proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
## Iterates over all addresses inside of MultiAddress ``ma``.
var header: uint64
var data = newSeqUninitialized[byte](0)
var data = newSeqUninit[byte](0)
var vb = ma
while true:
if vb.data.isEmpty():

View File

@@ -18,6 +18,7 @@
import tables
import results
import stew/[base32, base58, base64]
import ./utils/sequninit
type
MultiBaseStatus* {.pure.} = enum
@@ -533,7 +534,7 @@ proc decode*(
let empty: seq[byte] = @[]
ok(empty) # empty
else:
var buffer = newSeqUninitialized[byte](mb.decl(length - 1))
var buffer = newSeqUninit[byte](mb.decl(length - 1))
var outlen = 0
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
if res != MultiBaseStatus.Success:

View File

@@ -249,11 +249,7 @@ proc addHandler*[E](
m.handlers.add(HandlerHolder(protos: @[codec], protocol: protocol, match: matcher))
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([])`
# TODO https://github.com/nim-lang/Nim/issues/23445
var futs = newSeqOfCap[Future[void].Raising([CancelledError])](m.handlers.len)
for it in m.handlers:
futs.add it.protocol.start()
let futs = m.handlers.mapIt(it.protocol.start())
try:
await allFutures(futs)
for fut in futs:
@@ -273,10 +269,7 @@ proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
raise exc
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
for it in m.handlers:
futs.add it.protocol.stop()
let futs = m.handlers.mapIt(it.protocol.stop())
await noCancel allFutures(futs)
for fut in futs:
await fut

View File

@@ -150,6 +150,10 @@ method close*(s: LPChannel) {.async: (raises: []).} =
trace "Closed channel", s, len = s.len
method closeWrite*(s: LPChannel) {.async: (raises: []).} =
## For mplex, closeWrite is the same as close - it implements half-close
await s.close()
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName

View File

@@ -54,6 +54,10 @@ method newStream*(
.} =
raiseAssert("[Muxer.newStream] abstract method not implemented!")
when defined(libp2p_agents_metrics):
method setShortAgent*(m: Muxer, shortAgent: string) {.base, gcsafe.} =
m.connection.shortAgent = shortAgent
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
await m.connection.close()

View File

@@ -12,7 +12,7 @@
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
import ../muxer, ../../stream/connection
import ../../utils/zeroqueue
import ../../utils/[zeroqueue, sequninit]
export muxer
@@ -135,12 +135,11 @@ proc windowUpdate(
)
type
ToSend =
tuple[
data: seq[byte],
sent: int,
fut: Future[void].Raising([CancelledError, LPStreamError]),
]
ToSend = ref object
data: seq[byte]
sent: int
fut: Future[void].Raising([CancelledError, LPStreamError])
YamuxChannel* = ref object of Connection
id: uint32
recvWindow: int
@@ -218,6 +217,19 @@ method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
discard
await channel.actuallyClose()
method closeWrite*(channel: YamuxChannel) {.async: (raises: []).} =
## For yamux, closeWrite is the same as close - it implements half-close
await channel.close()
proc clearQueues(channel: YamuxChannel, error: ref LPStreamEOFError = nil) =
for toSend in channel.sendQueue:
if error.isNil():
toSend.fut.complete()
else:
toSend.fut.fail(error)
channel.sendQueue = @[]
channel.recvQueue.clear()
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
@@ -227,9 +239,8 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
trace "Reset channel"
channel.isReset = true
channel.remoteReset = not isLocal
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.clearQueues(newLPStreamEOFError())
channel.sendWindow = 0
if not channel.closedLocally:
if isLocal and not channel.isSending:
@@ -278,6 +289,7 @@ method readOnce*(
trace "stream is down when readOnce", channel = $channel
newLPStreamConnDownError()
if channel.isEof:
channel.clearQueues()
raise newLPStreamRemoteClosedError()
if channel.recvQueue.isEmpty():
channel.receivedData.clear()
@@ -292,6 +304,7 @@ method readOnce*(
await closedRemotelyFut or receivedDataFut
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
channel.isEof = true
channel.clearQueues()
return
0 # we return 0 to indicate that the channel is closed for reading from now on
@@ -315,17 +328,18 @@ proc gotDataFromRemote(
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
channel.maxRecvWindow = maxRecvWindow
proc trySend(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
proc sendLoop(channel: YamuxChannel) {.async: (raises: []).} =
if channel.isSending:
return
channel.isSending = true
defer:
channel.isSending = false
while channel.sendQueue.len != 0:
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
const NumBytesHeader = 12
while channel.sendQueue.len > 0:
channel.sendQueue.keepItIf(not it.fut.finished())
if channel.sendWindow == 0:
trace "trying to send while the sendWindow is empty"
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
@@ -337,54 +351,57 @@ proc trySend(
let
bytesAvailable = channel.lengthSendQueue()
toSend = min(channel.sendWindow, bytesAvailable)
numBytesToSend = min(channel.sendWindow, bytesAvailable)
var
sendBuffer = newSeqUninitialized[byte](toSend + 12)
header = YamuxHeader.data(channel.id, toSend.uint32)
sendBuffer = newSeqUninit[byte](NumBytesHeader + numBytesToSend)
header = YamuxHeader.data(channel.id, numBytesToSend.uint32)
inBuffer = 0
if toSend >= bytesAvailable and channel.closedLocally:
trace "last buffer we'll sent on this channel", toSend, bytesAvailable
if numBytesToSend >= bytesAvailable and channel.closedLocally:
trace "last buffer we will send on this channel", numBytesToSend, bytesAvailable
header.flags.incl({Fin})
sendBuffer[0 ..< 12] = header.encode()
sendBuffer[0 ..< NumBytesHeader] = header.encode()
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
while inBuffer < toSend:
while inBuffer < numBytesToSend:
var toSend = channel.sendQueue[0]
# concatenate the different message we try to send into one buffer
let (data, sent, fut) = channel.sendQueue[0]
let bufferToSend = min(data.len - sent, toSend - inBuffer)
let bufferToSend = min(toSend.data.len - toSend.sent, numBytesToSend - inBuffer)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[
sendBuffer.toOpenArray(NumBytesHeader, NumBytesHeader + numBytesToSend - 1)[
inBuffer ..< (inBuffer + bufferToSend)
] = channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
] = toSend.data.toOpenArray(toSend.sent, toSend.sent + bufferToSend - 1)
channel.sendQueue[0].sent.inc(bufferToSend)
if channel.sendQueue[0].sent >= data.len:
if toSend.sent >= toSend.data.len:
# if every byte of the message is in the buffer, add the write future to the
# sequence of futures to be completed (or failed) when the buffer is sent
futures.add(fut)
futures.add(toSend.fut)
channel.sendQueue.delete(0)
inBuffer.inc(bufferToSend)
trace "try to send the buffer", h = $header
channel.sendWindow.dec(toSend)
try:
await channel.conn.write(sendBuffer)
channel.sendWindow.dec(inBuffer)
except CancelledError:
trace "cancelled sending the buffer"
for fut in futures.items():
fut.cancelSoon()
await channel.reset()
break
## Just for compiler. This should never happen as sendLoop is started by asyncSpawn.
## Therefore, no one owns that sendLoop's future and no one can cancel it.
discard
except LPStreamError as exc:
trace "failed to send the buffer"
error "failed to send the buffer", description = exc.msg
let connDown = newLPStreamConnDownError(exc)
for fut in futures.items():
for fut in futures:
fut.fail(connDown)
await channel.reset()
break
for fut in futures.items():
for fut in futures:
fut.complete()
channel.activity = true
method write*(
@@ -392,21 +409,29 @@ method write*(
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
## Write to yamux channel
##
result = newFuture[void]("Yamux Send")
var resFut = newFuture[void]("Yamux Send")
if channel.remoteReset:
trace "stream is reset when write", channel = $channel
result.fail(newLPStreamResetError())
return result
resFut.fail(newLPStreamResetError())
return resFut
if channel.closedLocally or channel.isReset:
result.fail(newLPStreamClosedError())
return result
resFut.fail(newLPStreamClosedError())
return resFut
if msg.len == 0:
result.complete()
return result
channel.sendQueue.add((msg, 0, result))
resFut.complete()
return resFut
channel.sendQueue.add(ToSend(data: msg, sent: 0, fut: resFut))
when defined(libp2p_yamux_metrics):
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
asyncSpawn channel.sendLoop()
return resFut
proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
## Open a yamux channel by sending a window update with Syn or Ack flag
@@ -415,6 +440,8 @@ proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamErro
trace "Try to open channel twice"
return
channel.opened = true
channel.isReset = false
await channel.conn.write(
YamuxHeader.windowUpdate(
channel.id,
@@ -502,13 +529,12 @@ method close*(m: Yamux) {.async: (raises: []).} =
if m.isClosed == true:
trace "Already closed"
return
m.isClosed = true
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
for toSend in channel.sendQueue:
toSend.fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.sendWindow = 0
channel.closedLocally = true
@@ -523,6 +549,8 @@ method close*(m: Yamux) {.async: (raises: []).} =
except LPStreamError as exc:
trace "failed to send goAway", description = exc.msg
await m.connection.close()
m.isClosed = true
trace "Closed yamux"
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
@@ -581,7 +609,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
raise
newException(YamuxError, "Peer exhausted the recvWindow after reset")
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
var buffer = newSeqUninit[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
do:
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
@@ -600,14 +628,14 @@ method handle*(m: Yamux) {.async: (raises: []).} =
if header.msgType == WindowUpdate:
channel.sendWindow += int(header.length)
await channel.trySend()
asyncSpawn channel.sendLoop()
else:
if header.length.int > channel.recvWindow.int:
# check before allocating the buffer
raise newException(YamuxError, "Peer exhausted the recvWindow")
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
var buffer = newSeqUninit[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
trace "Msg Rcv", description = shortLog(buffer)
await channel.gotDataFromRemote(buffer)

View File

@@ -15,7 +15,8 @@ import
chronicles,
stew/byteutils,
dnsclientpkg/[protocol, types],
../utility
../utility,
../utils/sequninit
import nameresolver
@@ -37,18 +38,18 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
let dataLen = requestStream.getPosition()
requestStream.setPosition(0)
var buf = newSeqUninitialized[byte](dataLen)
var buf = newSeqUninit[byte](dataLen)
discard requestStream.readData(addr buf[0], dataLen)
buf
except IOError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninitialized[byte](0)
newSeqUninit[byte](0)
except OSError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninitialized[byte](0)
newSeqUninit[byte](0)
except ValueError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninitialized[byte](0)
newSeqUninit[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress, address: string, kind: QKind

View File

@@ -24,7 +24,8 @@ import
./multicodec,
./multihash,
./vbuffer,
./protobuf/minprotobuf
./protobuf/minprotobuf,
./utils/sequninit
export results, utility
@@ -142,7 +143,7 @@ func init*(pid: var PeerId, data: string): bool =
## Initialize peer id from base58 encoded string representation.
##
## Returns ``true`` if peer was successfully initialiazed.
var p = newSeqUninitialized[byte](len(data) + 4)
var p = newSeqUninit[byte](len(data) + 4)
var length = 0
if Base58.decode(data, p, length) == Base58Status.Success:
p.setLen(length)

View File

@@ -52,12 +52,16 @@ func shortLog*(p: PeerInfo): auto =
chronicles.formatIt(PeerInfo):
shortLog(it)
proc expandAddrs*(
p: PeerInfo
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
var addrs = p.listenAddrs
for mapper in p.addressMappers:
addrs = await mapper(addrs)
addrs
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.
if p.addrs.len == 0 or p.addressMappers.len == 0:
p.addrs = p.listenAddrs
p.addrs = p.listenAddrs
for mapper in p.addressMappers:
p.addrs = await mapper(p.addrs)

View File

@@ -214,7 +214,7 @@ proc identify*(
info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
if KnownLibP2PAgentsSeq.contains(shortAgent):
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
muxer.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info, stream.observedAddr)
finally:

View File

@@ -12,6 +12,7 @@
{.push raises: [].}
import ../varint, ../utility, stew/endians2, results
import ../utils/sequninit
export results, utility
{.push public.}
@@ -147,12 +148,12 @@ proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
if WithVarintLength in options:
# Our buffer will start from position 10, so we can store length of buffer
# in [0, 9].
result.buffer = newSeqUninitialized[byte](10)
result.buffer = newSeqUninit[byte](10)
result.offset = 10
elif {WithUint32LeLength, WithUint32BeLength} * options != {}:
# Our buffer will start from position 4, so we can store length of buffer
# in [0, 3].
result.buffer = newSeqUninitialized[byte](4)
result.buffer = newSeqUninit[byte](4)
result.offset = 4
proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =

View File

@@ -105,7 +105,7 @@ proc tryDial(
autonat.sem.release()
for f in futs:
if not f.finished():
f.cancel()
f.cancelSoon()
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
let dial = msg.dial.valueOr:

View File

@@ -0,0 +1,241 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results, chronos, chronicles
import ../../../multiaddress, ../../../peerid #, ../../../errors
import ../../../protobuf/minprotobuf
logScope:
topics = "libp2p autonat v2"
const
AutonatV2DialRequestCodec* = "/libp2p/autonat/2/dial-request"
AutonatV2DialBackCodec* = "/libp2p/autonat/2/dial-back"
type
# DialBack and DialBackResponse are not defined as AutonatV2Msg as per the spec
# likely because they are expected in response to some other message
MsgType* {.pure.} = enum
Unused = 0 # nim requires the first variant to be zero
DialRequest = 1
DialResponse = 2
DialDataRequest = 3
DialDataResponse = 4
ResponseStatus* {.pure.} = enum
EInternalError = 0
ERequestRejected = 100
EDialRefused = 101
Ok = 200
DialBackStatus* {.pure.} = enum
Ok = 0
DialStatus* {.pure.} = enum
Unused = 0
EDialError = 100
EDialBackError = 101
Ok = 200
DialRequest* = object
addrs*: seq[MultiAddress]
nonce*: uint64
DialResponse* = object
status*: ResponseStatus
addrIdx*: Opt[uint32]
dialStatus*: Opt[DialStatus]
DialBack* = object
nonce*: uint64
DialBackResponse* = object
status*: DialBackStatus
DialDataRequest* = object
addrIdx*: uint32
numBytes*: uint64
DialDataResponse* = object
data*: seq[byte]
AutonatV2Msg* = object
case msgType*: MsgType
of MsgType.Unused:
discard
of MsgType.DialRequest:
dialReq*: DialRequest
of MsgType.DialResponse:
dialResp*: DialResponse
of MsgType.DialDataRequest:
dialDataReq*: DialDataRequest
of MsgType.DialDataResponse:
dialDataResp*: DialDataResponse
# DialRequest
proc encode*(dialReq: DialRequest): ProtoBuffer =
var encoded = initProtoBuffer()
for ma in dialReq.addrs:
encoded.write(1, ma.data.buffer)
encoded.write(2, dialReq.nonce)
encoded.finish()
encoded
proc decode*(T: typedesc[DialRequest], pb: ProtoBuffer): Opt[T] =
var
addrs: seq[MultiAddress]
nonce: uint64
if not ?pb.getRepeatedField(1, addrs).toOpt():
return Opt.none(T)
if not ?pb.getField(2, nonce).toOpt():
return Opt.none(T)
Opt.some(T(addrs: addrs, nonce: nonce))
# DialResponse
proc encode*(dialResp: DialResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialResp.status.uint)
# minprotobuf casts uses float64 for fixed64 fields
dialResp.addrIdx.withValue(addrIdx):
encoded.write(2, addrIdx)
dialResp.dialStatus.withValue(dialStatus):
encoded.write(3, dialStatus.uint)
encoded.finish()
encoded
proc decode*(T: typedesc[DialResponse], pb: ProtoBuffer): Opt[T] =
var
status: uint
addrIdx: uint32
dialStatus: uint
if not ?pb.getField(1, status).toOpt():
return Opt.none(T)
var optAddrIdx = Opt.none(uint32)
if ?pb.getField(2, addrIdx).toOpt():
optAddrIdx = Opt.some(addrIdx)
var optDialStatus = Opt.none(DialStatus)
if ?pb.getField(3, dialStatus).toOpt():
optDialStatus = Opt.some(cast[DialStatus](dialStatus))
Opt.some(
T(
status: cast[ResponseStatus](status),
addrIdx: optAddrIdx,
dialStatus: optDialStatus,
)
)
# DialBack
proc encode*(dialBack: DialBack): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialBack.nonce)
encoded.finish()
encoded
proc decode*(T: typedesc[DialBack], pb: ProtoBuffer): Opt[T] =
var nonce: uint64
if not ?pb.getField(1, nonce).toOpt():
return Opt.none(T)
Opt.some(T(nonce: nonce))
# DialBackResponse
proc encode*(dialBackResp: DialBackResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialBackResp.status.uint)
encoded.finish()
encoded
proc decode*(T: typedesc[DialBackResponse], pb: ProtoBuffer): Opt[T] =
var status: uint
if not ?pb.getField(1, status).toOpt():
return Opt.none(T)
Opt.some(T(status: cast[DialBackStatus](status)))
# DialDataRequest
proc encode*(dialDataReq: DialDataRequest): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialDataReq.addrIdx)
encoded.write(2, dialDataReq.numBytes)
encoded.finish()
encoded
proc decode*(T: typedesc[DialDataRequest], pb: ProtoBuffer): Opt[T] =
var
addrIdx: uint32
numBytes: uint64
if not ?pb.getField(1, addrIdx).toOpt():
return Opt.none(T)
if not ?pb.getField(2, numBytes).toOpt():
return Opt.none(T)
Opt.some(T(addrIdx: addrIdx, numBytes: numBytes))
# DialDataResponse
proc encode*(dialDataResp: DialDataResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialDataResp.data)
encoded.finish()
encoded
proc decode*(T: typedesc[DialDataResponse], pb: ProtoBuffer): Opt[T] =
var data: seq[byte]
if not ?pb.getField(1, data).toOpt():
return Opt.none(T)
Opt.some(T(data: data))
# AutonatV2Msg
proc encode*(msg: AutonatV2Msg): ProtoBuffer =
var encoded = initProtoBuffer()
case msg.msgType
of MsgType.Unused:
doAssert false, "invalid enum variant: Unused"
of MsgType.DialRequest:
encoded.write(MsgType.DialRequest.int, msg.dialReq.encode())
of MsgType.DialResponse:
encoded.write(MsgType.DialResponse.int, msg.dialResp.encode())
of MsgType.DialDataRequest:
encoded.write(MsgType.DialDataRequest.int, msg.dialDataReq.encode())
of MsgType.DialDataResponse:
encoded.write(MsgType.DialDataResponse.int, msg.dialDataResp.encode())
encoded.finish()
encoded
proc decode*(T: typedesc[AutonatV2Msg], pb: ProtoBuffer): Opt[T] =
var
msgTypeOrd: uint32
msg: ProtoBuffer
if ?pb.getField(MsgType.DialRequest.int, msg).toOpt():
let dialReq = DialRequest.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialRequest, dialReq: dialReq))
elif ?pb.getField(MsgType.DialResponse.int, msg).toOpt():
let dialResp = DialResponse.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialResponse, dialResp: dialResp))
elif ?pb.getField(MsgType.DialDataRequest.int, msg).toOpt():
let dialDataReq = DialDataRequest.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialDataRequest, dialDataReq: dialDataReq))
elif ?pb.getField(MsgType.DialDataResponse.int, msg).toOpt():
let dialDataResp = DialDataResponse.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(
AutonatV2Msg(msgType: MsgType.DialDataResponse, dialDataResp: dialDataResp)
)
else:
Opt.none(AutonatV2Msg)
# Custom `==` is needed to compare since AutonatV2Msg is a case object
proc `==`*(a, b: AutonatV2Msg): bool =
a.msgType == b.msgType and a.encode() == b.encode()

View File

@@ -422,6 +422,6 @@ method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
warn "Stopping relay without starting it"
return fut
r.started = false
r.reservationLoop.cancel()
r.reservationLoop.cancelSoon()
r.reservationLoop = nil
fut

View File

@@ -31,7 +31,7 @@ type RelayTransport* = ref object of Transport
method start*(
self: RelayTransport, ma: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
if self.selfRunning:
trace "Relay transport already running"
return

View File

@@ -1,6 +1,10 @@
import chronos
const
IdLength* = 32 # 256-bit IDs
k* = 20 # replication parameter
DefaultReplic* = 20 ## replication parameter, aka `k` in the spec
alpha* = 10 # concurrency parameter
ttl* = 24.hours
maxBuckets* = 256
const KadCodec* = "/ipfs/kad/1.0.0"

View File

@@ -1,53 +1,451 @@
import chronos
import chronicles
import sequtils
import sets
import ../../peerid
import ./consts
import ./xordistance
import ./routingtable
import ./lookupstate
import ./requests
import ./keys
import ../protocol
import ../../switch
import ./protobuf
import ../../switch
import ../../multihash
import ../../utils/heartbeat
import std/[times, options, tables]
import results
logScope:
topics = "kad-dht"
type EntryKey* = object
data: seq[byte]
proc init*(T: typedesc[EntryKey], inner: seq[byte]): EntryKey {.gcsafe, raises: [].} =
EntryKey(data: inner)
type EntryValue* = object
data*: seq[byte] # public because needed for tests
proc init*(
T: typedesc[EntryValue], inner: seq[byte]
): EntryValue {.gcsafe, raises: [].} =
EntryValue(data: inner)
type TimeStamp* = object
# Currently a string, because for some reason, that's what is chosen at the protobuf level
# TODO: convert between RFC3339 strings and use of integers (i.e. the _correct_ way)
ts*: string # only public because needed for tests
type EntryRecord* = object
value*: EntryValue # only public because needed for tests
time*: TimeStamp # only public because needed for tests
proc init*(
T: typedesc[EntryRecord], value: EntryValue, time: Option[TimeStamp]
): EntryRecord {.gcsafe, raises: [].} =
EntryRecord(value: value, time: time.get(TimeStamp(ts: $times.now().utc)))
type LocalTable* = object
entries*: Table[EntryKey, EntryRecord] # public because needed for tests
proc init(self: typedesc[LocalTable]): LocalTable {.raises: [].} =
LocalTable()
type EntryCandidate* = object
key*: EntryKey
value*: EntryValue
type ValidatedEntry* = object
key: EntryKey
value: EntryValue
proc init*(
T: typedesc[ValidatedEntry], key: EntryKey, value: EntryValue
): ValidatedEntry {.gcsafe, raises: [].} =
ValidatedEntry(key: key, value: value)
type EntryValidator* = ref object of RootObj
method isValid*(
self: EntryValidator, key: EntryKey, val: EntryValue
): bool {.base, raises: [], gcsafe.} =
doAssert(false, "unimplimented base method")
type EntrySelector* = ref object of RootObj
method select*(
self: EntrySelector, cand: EntryRecord, others: seq[EntryRecord]
): Result[EntryRecord, string] {.base, raises: [], gcsafe.} =
doAssert(false, "EntrySelection base not implemented")
type KadDHT* = ref object of LPProtocol
switch: Switch
rng: ref HmacDrbgContext
rtable*: RoutingTable
maintenanceLoop: Future[void]
dataTable*: LocalTable
entryValidator: EntryValidator
entrySelector: EntrySelector
proc insert*(
self: var LocalTable, value: sink ValidatedEntry, time: TimeStamp
) {.raises: [].} =
debug "local table insertion", key = value.key.data, value = value.value.data
self.entries[value.key] = EntryRecord(value: value.value, time: time)
const MaxMsgSize = 4096
# Forward declaration
proc findNode*(
kad: KadDHT, targetId: Key
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).}
proc sendFindNode(
kad: KadDHT, peerId: PeerId, addrs: seq[MultiAddress], targetId: Key
): Future[Message] {.
async: (raises: [CancelledError, DialFailedError, ValueError, LPStreamError])
.} =
let conn =
if addrs.len == 0:
await kad.switch.dial(peerId, KadCodec)
else:
await kad.switch.dial(peerId, addrs, KadCodec)
defer:
await conn.close()
let msg = Message(msgType: MessageType.findNode, key: some(targetId.getBytes()))
await conn.writeLp(msg.encode().buffer)
let reply = Message.decode(await conn.readLp(MaxMsgSize)).tryGet()
if reply.msgType != MessageType.findNode:
raise newException(ValueError, "unexpected message type in reply: " & $reply)
return reply
proc waitRepliesOrTimeouts(
pendingFutures: Table[PeerId, Future[Message]]
): Future[(seq[Message], seq[PeerId])] {.async: (raises: [CancelledError]).} =
await allFutures(toSeq(pendingFutures.values))
var receivedReplies: seq[Message] = @[]
var failedPeers: seq[PeerId] = @[]
for (peerId, replyFut) in pendingFutures.pairs:
try:
receivedReplies.add(await replyFut)
except CatchableError:
failedPeers.add(peerId)
error "could not send find_node to peer", peerId, err = getCurrentExceptionMsg()
return (receivedReplies, failedPeers)
proc dispatchPutVal(
kad: KadDHT, peer: PeerId, entry: ValidatedEntry
): Future[void] {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await kad.switch.dial(peer, KadCodec)
defer:
await conn.close()
let msg = Message(
msgType: MessageType.putValue,
record: some(Record(key: some(entry.key.data), value: some(entry.value.data))),
)
await conn.writeLp(msg.encode().buffer)
let reply = Message.decode(await conn.readLp(MaxMsgSize)).valueOr:
# todo log this more meaningfully
error "putValue reply decode fail", error = error, conn = conn
return
if reply != msg:
error "unexpected change between msg and reply: ",
msg = msg, reply = reply, conn = conn
proc putValue*(
kad: KadDHT, entKey: EntryKey, value: EntryValue, timeout: Option[int]
): Future[Result[void, string]] {.async: (raises: [CancelledError]), gcsafe.} =
if not kad.entryValidator.isValid(entKey, value):
return err("invalid key/value pair")
let others: seq[EntryRecord] =
if entKey in kad.dataTable.entries:
@[kad.dataTable.entries.getOrDefault(entKey)]
else:
@[]
let candAsRec = EntryRecord.init(value, none(TimeStamp))
let confirmedRec = kad.entrySelector.select(candAsRec, others).valueOr:
error "application provided selector error (local)", msg = error
return err(error)
trace "local putval", candidate = candAsRec, others = others, selected = confirmedRec
let validEnt = ValidatedEntry.init(entKey, confirmedRec.value)
let peers = await kad.findNode(entKey.data.toKey())
# We first prime the sends so the data is ready to go
let rpcBatch = peers.mapIt(kad.dispatchPutVal(it, validEnt))
# then we do the `move`, as insert takes the data as `sink`
kad.dataTable.insert(validEnt, confirmedRec.time)
try:
# now that the all the data is where it needs to be in memory, we can dispatch the
# RPCs
await rpcBatch.allFutures().wait(chronos.seconds(timeout.get(5)))
# It's quite normal for the dispatch to timeout, as it would require all calls to get
# their response. Downstream users may desire some sort of functionality in the
# future to get rpc telemetry, but in the meantime, we just move on...
except AsyncTimeoutError:
discard
return results.ok()
# Helper function forward declaration
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.}
proc findNode*(
kad: KadDHT, targetId: Key
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).} =
## Node lookup. Iteratively search for the k closest peers to a target ID.
## Not necessarily will return the target itself
#debug "findNode", target = target
var initialPeers = kad.rtable.findClosestPeers(targetId, DefaultReplic)
var state = LookupState.init(targetId, initialPeers, kad.rtable.hasher)
var addrTable: Table[PeerId, seq[MultiAddress]] =
initTable[PeerId, seq[MultiAddress]]()
while not state.done:
let toQuery = state.selectAlphaPeers()
debug "queries", list = toQuery.mapIt(it.shortLog()), addrTab = addrTable
var pendingFutures = initTable[PeerId, Future[Message]]()
# TODO: pending futures always empty here, no?
for peer in toQuery.filterIt(
kad.switch.peerInfo.peerId != it or pendingFutures.hasKey(it)
):
state.markPending(peer)
pendingFutures[peer] = kad
.sendFindNode(peer, addrTable.getOrDefault(peer, @[]), targetId)
.wait(chronos.seconds(5))
state.activeQueries.inc
let (successfulReplies, timedOutPeers) = await waitRepliesOrTimeouts(pendingFutures)
for msg in successfulReplies:
for peer in msg.closerPeers:
let pid = PeerId.init(peer.id)
if not pid.isOk:
error "PeerId init went bad. this is unusual", data = peer.id
continue
addrTable[pid.get()] = peer.addrs
state.updateShortlist(
msg,
proc(p: PeerInfo) =
discard kad.rtable.insert(p.peerId)
# Nodes might return different addresses for a peer, so we append instead of replacing
var existingAddresses =
kad.switch.peerStore[AddressBook][p.peerId].toHashSet()
for a in p.addrs:
existingAddresses.incl(a)
kad.switch.peerStore[AddressBook][p.peerId] = existingAddresses.toSeq()
# TODO: add TTL to peerstore, otherwise we can spam it with junk
,
kad.rtable.hasher,
)
for timedOut in timedOutPeers:
state.markFailed(timedOut)
# Check for covergence: no active queries, and no other peers to be selected
state.done = checkConvergence(state, kad.switch.peerInfo.peerId)
return state.selectClosestK()
proc findPeer*(
kad: KadDHT, peer: PeerId
): Future[Result[PeerInfo, string]] {.async: (raises: [CancelledError]).} =
## Walks the key space until it finds candidate addresses for a peer Id
if kad.switch.peerInfo.peerId == peer:
# Looking for yourself.
return ok(kad.switch.peerInfo)
if kad.switch.isConnected(peer):
# Return known info about already connected peer
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
let foundNodes = await kad.findNode(peer.toKey())
if not foundNodes.contains(peer):
return err("peer not found")
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.} =
let ready = state.activeQueries == 0
let noNew = selectAlphaPeers(state).filterIt(me != it).len == 0
return ready and noNew
proc bootstrap*(
kad: KadDHT, bootstrapNodes: seq[PeerInfo]
) {.async: (raises: [CancelledError]).} =
for b in bootstrapNodes:
try:
await kad.switch.connect(b.peerId, b.addrs)
debug "connected to bootstrap peer", peerId = b.peerId
except DialFailedError as e:
# at some point will want to bubble up a Result[void, SomeErrorEnum]
error "failed to dial to bootstrap peer", peerId = b.peerId, error = e.msg
continue
let msg =
try:
await kad.sendFindNode(b.peerId, b.addrs, kad.rtable.selfId).wait(
chronos.seconds(5)
)
except CatchableError as e:
debug "send find node exception during bootstrap",
target = b.peerId, addrs = b.addrs, err = e.msg
continue
for peer in msg.closerPeers:
let p = PeerId.init(peer.id).valueOr:
debug "invalid peer id received", error = error
continue
discard kad.rtable.insert(p)
try:
kad.switch.peerStore[AddressBook][p] = peer.addrs
except:
error "this is here because an ergonomic means of keying into a table without exceptions is unknown"
# bootstrap node replied succesfully. Adding to routing table
discard kad.rtable.insert(b.peerId)
let key = PeerId.random(kad.rng).valueOr:
doAssert(false, "this should never happen")
return
discard await kad.findNode(key.toKey())
info "bootstrap lookup complete"
proc refreshBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
for i in 0 ..< kad.rtable.buckets.len:
if kad.rtable.buckets[i].isStale():
let randomKey = randomKeyInBucketRange(kad.rtable.selfId, i, kad.rng)
discard await kad.findNode(randomKey)
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
heartbeat "refresh buckets", 10.minutes:
debug "TODO: implement bucket maintenance"
heartbeat "refresh buckets", chronos.minutes(10):
await kad.refreshBuckets()
proc new*(
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
T: typedesc[KadDHT],
switch: Switch,
validator: EntryValidator,
entrySelector: EntrySelector,
rng: ref HmacDrbgContext = newRng(),
): T {.raises: [].} =
var rtable = RoutingTable.init(switch.peerInfo.peerId)
let kad = T(rng: rng, switch: switch, rtable: rtable)
var rtable = RoutingTable.init(switch.peerInfo.peerId.toKey(), Opt.none(XorDHasher))
let kad = T(
rng: rng,
switch: switch,
rtable: rtable,
dataTable: LocalTable.init(),
entryValidator: validator,
entrySelector: entrySelector,
)
kad.codec = KadCodec
kad.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while not conn.atEof:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
# TODO: handle msg.msgType
except CancelledError as exc:
raise exc
except CatchableError:
error "could not handle request",
peerId = conn.PeerId, err = getCurrentExceptionMsg()
finally:
defer:
await conn.close()
while not conn.atEof:
let buf =
try:
await conn.readLp(MaxMsgSize)
except LPStreamError as e:
debug "Read error when handling kademlia RPC", conn = conn, err = e.msg
return
let msg = Message.decode(buf).valueOr:
debug "msg decode error handling kademlia RPC", err = error
return
case msg.msgType
of MessageType.findNode:
let targetIdBytes = msg.key.valueOr:
error "findNode message without key data present", msg = msg, conn = conn
return
let targetId = PeerId.init(targetIdBytes).valueOr:
error "findNode message without valid key data", msg = msg, conn = conn
return
let closerPeers = kad.rtable
.findClosest(targetId.toKey(), DefaultReplic)
# exclude the node requester because telling a peer about itself does not reduce the distance,
.filterIt(it != conn.peerId.toKey())
let responsePb = encodeFindNodeReply(closerPeers, switch)
try:
await conn.writeLp(responsePb.buffer)
except LPStreamError as e:
debug "write error when writing kad find-node RPC reply",
conn = conn, err = e.msg
return
# Peer is useful. adding to rtable
discard kad.rtable.insert(conn.peerId)
of MessageType.putValue:
let record = msg.record.valueOr:
error "no record in message buffer", msg = msg, conn = conn
return
let (skey, svalue) =
if record.key.isSome() and record.value.isSome():
(record.key.unsafeGet(), record.value.unsafeGet())
else:
error "no key or no value in rpc buffer", msg = msg, conn = conn
return
let key = EntryKey.init(skey)
let value = EntryValue.init(svalue)
# Value sanitisation done. Start insertion process
if not kad.entryValidator.isValid(key, value):
return
let others =
if kad.dataTable.entries.contains(key):
# need to do this shenans in order to avoid exceptions.
@[kad.dataTable.entries.getOrDefault(key)]
else:
@[]
let candRec = EntryRecord.init(value, none(TimeStamp))
let selectedRec = kad.entrySelector.select(candRec, others).valueOr:
error "application provided selector error", msg = error, conn = conn
return
trace "putval handler selection",
cand = candRec, others = others, selected = selectedRec
# Assume that if selection goes with another value, that it is valid
let validated = ValidatedEntry(key: key, value: selectedRec.value)
kad.dataTable.insert(validated, selectedRec.time)
# consistent with following link, echo message without change
# https://github.com/libp2p/js-libp2p/blob/cf9aab5c841ec08bc023b9f49083c95ad78a7a07/packages/kad-dht/src/rpc/handlers/put-value.ts#L22
try:
await conn.writeLp(buf)
except LPStreamError as e:
debug "write error when writing kad find-node RPC reply",
conn = conn, err = e.msg
return
else:
error "unhandled kad-dht message type", msg = msg
return
return kad
proc setSelector*(kad: KadDHT, selector: EntrySelector) =
doAssert(selector != nil)
kad.entrySelector = selector
proc setValidator*(kad: KadDHT, validator: EntryValidator) =
doAssert(validator != nil)
kad.entryValidator = validator
method start*(
kad: KadDHT
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
@@ -65,10 +463,12 @@ method start*(
fut
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not kad.started:
return
return fut
kad.started = false
kad.maintenanceLoop.cancelSoon()
kad.maintenanceLoop = nil
return
return fut

View File

@@ -1,11 +1,10 @@
import nimcrypto/sha2
import ../../peerid
import ./consts
import chronicles
import stew/byteutils
type
KeyType* {.pure.} = enum
Unhashed
Raw
PeerId
@@ -13,25 +12,26 @@ type
case kind*: KeyType
of KeyType.PeerId:
peerId*: PeerId
of KeyType.Raw, KeyType.Unhashed:
data*: array[IdLength, byte]
of KeyType.Raw:
data*: seq[byte]
proc toKey*(s: seq[byte]): Key =
doAssert s.len == IdLength
var data: array[IdLength, byte]
for i in 0 ..< IdLength:
data[i] = s[i]
return Key(kind: KeyType.Raw, data: data)
return Key(kind: KeyType.Raw, data: s)
proc toKey*(p: PeerId): Key =
return Key(kind: KeyType.PeerId, peerId: p)
proc toPeerId*(k: Key): PeerId {.raises: [ValueError].} =
if k.kind != KeyType.PeerId:
raise newException(ValueError, "not a peerId")
k.peerId
proc getBytes*(k: Key): seq[byte] =
return
case k.kind
of KeyType.PeerId:
k.peerId.getBytes()
of KeyType.Raw, KeyType.Unhashed:
of KeyType.Raw:
@(k.data)
template `==`*(a, b: Key): bool =
@@ -41,7 +41,7 @@ proc shortLog*(k: Key): string =
case k.kind
of KeyType.PeerId:
"PeerId:" & $k.peerId
of KeyType.Raw, KeyType.Unhashed:
of KeyType.Raw:
$k.kind & ":" & toHex(k.data)
chronicles.formatIt(Key):

View File

@@ -0,0 +1,120 @@
import sequtils
import ./consts
import ./protobuf
import ./xordistance
import ./keys
import ../../[peerid, peerinfo]
import algorithm
import chronicles
type
LookupNode* = object
peerId: PeerId
distance: XorDistance
queried: bool # have we already queried this node?
pending: bool # is there an active request rn?
failed: bool # did the query timeout or error?
LookupState* = object
targetId: Key
shortlist: seq[LookupNode] # current known closest node
activeQueries*: int # how many queries in flight
alpha: int # parallelism level
repliCount: int ## aka `k` in the spec: number of closest nodes to find
done*: bool # has lookup converged
proc alreadyInShortlist(state: LookupState, peer: Peer): bool =
return state.shortlist.anyIt(it.peerId.getBytes() == peer.id)
proc updateShortlist*(
state: var LookupState,
msg: Message,
onInsert: proc(p: PeerInfo) {.gcsafe.},
hasher: Opt[XorDHasher],
) =
for newPeer in msg.closerPeers.filterIt(not alreadyInShortlist(state, it)):
let peerInfo = PeerInfo(peerId: PeerId.init(newPeer.id).get(), addrs: newPeer.addrs)
try:
onInsert(peerInfo)
state.shortlist.add(
LookupNode(
peerId: peerInfo.peerId,
distance: xorDistance(peerInfo.peerId, state.targetId, hasher),
queried: false,
pending: false,
failed: false,
)
)
except Exception as exc:
debug "could not update shortlist", err = exc.msg
state.shortlist.sort(
proc(a, b: LookupNode): int =
cmp(a.distance, b.distance)
)
state.activeQueries.dec
proc markFailed*(state: var LookupState, peerId: PeerId) =
for p in mitems(state.shortlist):
if p.peerId == peerId:
p.failed = true
p.pending = false
p.queried = true
state.activeQueries.dec
break
proc markPending*(state: var LookupState, peerId: PeerId) =
for p in mitems(state.shortlist):
if p.peerId == peerId:
p.pending = true
p.queried = true
break
proc selectAlphaPeers*(state: LookupState): seq[PeerId] =
var selected: seq[PeerId] = @[]
for p in state.shortlist:
if not p.queried and not p.failed and not p.pending:
selected.add(p.peerId)
if selected.len >= state.alpha:
break
return selected
proc init*(
T: type LookupState,
targetId: Key,
initialPeers: seq[PeerId],
hasher: Opt[XorDHasher],
): T =
var res = LookupState(
targetId: targetId,
shortlist: @[],
activeQueries: 0,
alpha: alpha,
repliCount: DefaultReplic,
done: false,
)
for p in initialPeers:
res.shortlist.add(
LookupNode(
peerId: p,
distance: xorDistance(p, targetId, hasher),
queried: false,
pending: false,
failed: false,
)
)
res.shortlist.sort(
proc(a, b: LookupNode): int =
cmp(a.distance, b.distance)
)
return res
proc selectClosestK*(state: LookupState): seq[PeerId] =
var res: seq[PeerId] = @[]
for p in state.shortlist.filterIt(not it.failed):
res.add(p.peerId)
if res.len >= state.repliCount:
break
return res

View File

@@ -39,9 +39,11 @@ type
closerPeers*: seq[Peer]
providerPeers*: seq[Peer]
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [], gcsafe.}
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
proc writeOpt*[T](
pb: var ProtoBuffer, field: int, opt: Option[T]
) {.raises: [], gcsafe.}
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
@@ -60,7 +62,7 @@ proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
pb.finish()
return pb
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
proc encode*(msg: Message): ProtoBuffer {.raises: [], gcsafe.} =
var pb = initProtoBuffer()
pb.write(1, uint32(ord(msg.msgType)))
@@ -80,11 +82,13 @@ proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
return pb
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
proc writeOpt*[T](
pb: var ProtoBuffer, field: int, opt: Option[T]
) {.raises: [], gcsafe.} =
opt.withValue(v):
pb.write(field, v)
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [], gcsafe.} =
pb.write(field, value.encode())
proc getOptionField[T: ProtoScalar | string | seq[byte]](
@@ -120,7 +124,7 @@ proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
return ok(some(p))
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[T] =
var
m: Message
key: seq[byte]
@@ -156,4 +160,4 @@ proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
peer.withValue(peer):
m.providerPeers.add(peer)
return ok(some(m))
return ok(m)

View File

@@ -0,0 +1,34 @@
import ../../peerid
import ../../switch
import ../../peerstore
import ./protobuf
import ../../protobuf/minprotobuf
import ./keys
proc encodeFindNodeReply*(
closerPeers: seq[Key], switch: Switch
): ProtoBuffer {.raises: [].} =
var msg: Message
msg.msgType = MessageType.findNode
for peer in closerPeers:
let peer =
try:
peer.toPeerId()
except ValueError:
continue
let addrs = switch.peerStore[AddressBook][peer]
if addrs.len == 0:
continue
let p = Peer(
id: peer.getBytes(),
addrs: addrs,
connection:
# TODO: this should likely be optional as it can reveal the network graph of a node
if switch.isConnected(peer):
ConnectionType.connected
else:
ConnectionType.notConnected,
)
msg.closerPeers.add(p)
return msg.encode()

View File

@@ -7,6 +7,8 @@ import ./keys
import ./xordistance
import ../../peerid
import sequtils
import ../../utils/sequninit
import results
logScope:
topics = "kad-dht rtable"
@@ -22,12 +24,16 @@ type
RoutingTable* = ref object
selfId*: Key
buckets*: seq[Bucket]
hasher*: Opt[XorDHasher]
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
return RoutingTable(selfId: selfId, buckets: @[])
proc `$`*(rt: RoutingTable): string =
"selfId(" & $rt.selfId & ") buckets(" & $rt.buckets & ")"
proc bucketIndex*(selfId, key: Key): int =
return xorDistance(selfId, key).leadingZeros
proc init*(T: typedesc[RoutingTable], selfId: Key, hasher: Opt[XorDHasher]): T =
return RoutingTable(selfId: selfId, buckets: @[], hasher: hasher)
proc bucketIndex*(selfId, key: Key, hasher: Opt[XorDHasher]): int =
return xorDistance(selfId, key, hasher).leadingZeros
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
for i, p in bucket.peers:
@@ -39,7 +45,7 @@ proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
if nodeId == rtable.selfId:
return false # No self insertion
let idx = bucketIndex(rtable.selfId, nodeId)
let idx = bucketIndex(rtable.selfId, nodeId, rtable.hasher)
if idx >= maxBuckets:
trace "cannot insert node. max buckets have been reached",
nodeId, bucketIdx = idx, maxBuckets
@@ -53,12 +59,12 @@ proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
let keyx = peerIndexInBucket(bucket, nodeId)
if keyx.isSome:
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
elif bucket.peers.len < k:
elif bucket.peers.len < DefaultReplic:
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
else:
# TODO: eviction policy goes here, rn we drop the node
trace "cannot insert node in bucket, dropping node",
nodeId, bucket = k, bucketIdx = idx
nodeId, bucket = DefaultReplic, bucketIdx = idx
return false
rtable.buckets[idx] = bucket
@@ -76,7 +82,9 @@ proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
allNodes.sort(
proc(a, b: Key): int =
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
cmp(
xorDistance(a, targetId, rtable.hasher), xorDistance(b, targetId, rtable.hasher)
)
)
return allNodes[0 ..< min(count, allNodes.len)]
@@ -112,7 +120,7 @@ proc randomKeyInBucketRange*(
let totalBits = raw.len * 8
let lsbStart = bucketIndex + 1
let lsbBytes = (totalBits - lsbStart + 7) div 8
var randomBuf = newSeqUninitialized[byte](lsbBytes)
var randomBuf = newSeqUninit[byte](lsbBytes)
hmacDrbgGenerate(rng[], randomBuf)
for i in lsbStart ..< totalBits:

View File

@@ -1,9 +1,27 @@
import ./consts
import stew/arrayOps
import ./keys
import nimcrypto/sha2
import ../../peerid
import results
type XorDistance* = array[IdLength, byte]
type XorDHasher* = proc(input: seq[byte]): array[IdLength, byte] {.
raises: [], nimcall, noSideEffect, gcsafe
.}
proc defaultHasher(
input: seq[byte]
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
return sha256.digest(input).data
# useful for testing purposes
proc noOpHasher*(
input: seq[byte]
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
var data: array[IdLength, byte]
discard data.copyFrom(input)
return data
proc countLeadingZeroBits*(b: byte): int =
for i in 0 .. 7:
@@ -31,25 +49,23 @@ proc `<`*(a, b: XorDistance): bool =
proc `<=`*(a, b: XorDistance): bool =
cmp(a, b) <= 0
proc hashFor(k: Key): seq[byte] =
proc hashFor(k: Key, hasher: Opt[XorDHasher]): seq[byte] =
return
@(
case k.kind
of KeyType.PeerId:
sha256.digest(k.peerId.getBytes()).data
hasher.get(defaultHasher)(k.peerId.getBytes())
of KeyType.Raw:
sha256.digest(k.data).data
of KeyType.Unhashed:
k.data
hasher.get(defaultHasher)(k.data)
)
proc xorDistance*(a, b: Key): XorDistance =
let hashA = a.hashFor()
let hashB = b.hashFor()
proc xorDistance*(a, b: Key, hasher: Opt[XorDHasher]): XorDistance =
let hashA = a.hashFor(hasher)
let hashB = b.hashFor(hasher)
var response: XorDistance
for i in 0 ..< hashA.len:
response[i] = hashA[i] xor hashB[i]
return response
proc xorDistance*(a: PeerId, b: Key): XorDistance =
xorDistance(a.toKey(), b)
proc xorDistance*(a: PeerId, b: Key, hasher: Opt[XorDHasher]): XorDistance =
xorDistance(a.toKey(), b, hasher)

View File

@@ -57,7 +57,8 @@ proc perf*(
statsCopy.uploadBytes += toWrite.uint
p.stats = statsCopy
await conn.close()
# Close write side of the stream (half-close) to signal EOF to server
await conn.closeWrite()
size = sizeToRead
@@ -71,6 +72,9 @@ proc perf*(
statsCopy.duration = Moment.now() - start
statsCopy.downloadBytes += toRead.uint
p.stats = statsCopy
# Close the connection after reading
await conn.close()
except CancelledError as e:
raise e
except LPStreamError as e:

View File

@@ -24,28 +24,29 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
var
sizeBuffer: array[8, byte]
size: uint64
await conn.readExactly(addr sizeBuffer[0], 8)
size = uint64.fromBytesBE(sizeBuffer)
var toReadBuffer: array[PerfSize, byte]
try:
while true:
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
except CatchableError as exc:
discard
var uploadSizeBuffer: array[8, byte]
await conn.readExactly(addr uploadSizeBuffer[0], 8)
var uploadSize = uint64.fromBytesBE(uploadSizeBuffer)
var buf: array[PerfSize, byte]
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite
var readBuffer: array[PerfSize, byte]
while not conn.atEof:
try:
let readBytes = await conn.readOnce(addr readBuffer[0], PerfSize)
if readBytes == 0:
break
except LPStreamEOFError:
break
var writeBuffer: array[PerfSize, byte]
while uploadSize > 0:
let toWrite = min(uploadSize, PerfSize)
await conn.write(writeBuffer[0 ..< toWrite])
uploadSize -= toWrite
except CancelledError as exc:
trace "cancelled perf handler"
raise exc

View File

@@ -0,0 +1,37 @@
import chronos
import std/atomics
const DefaultAlpha = 0.3
const InitialRate = 2_500_000 #bytes per second
type
ExponentialMovingAverage* = ref object
alpha: float
value: Atomic[float64]
BandwidthTracking* = ref object
download*: ExponentialMovingAverage
proc init*(T: type[ExponentialMovingAverage], alpha: float = DefaultAlpha): T =
let e = ExponentialMovingAverage(alpha: alpha)
e.value.store(InitialRate)
return e
proc init*(T: type[BandwidthTracking], alpha: float = DefaultAlpha): T =
BandwidthTracking(download: ExponentialMovingAverage())
proc update*(e: var ExponentialMovingAverage, startAt: Moment, bytes: int) =
let elapsedTime = Moment.now() - startAt
let curSample = float(bytes * 1000) / elapsedTime.milliseconds.float
let oldSample = e.value.load()
let ema = e.alpha * curSample + (1.0 - e.alpha) * oldSample
e.value.store(ema)
proc value*(e: var ExponentialMovingAverage): float =
e.value.load()
proc calculateReceiveTimeMs*(msgLen: int64, dataRate: int64 = InitialRate): int64 =
let txTime = ((msgLen * 1000) div dataRate)
#ideally (RTT * 2) + 5% TxTime ? Need many testruns to precisely adjust safety margin
let margin = 250 + (txTime.float64 * 0.05)
result = txTime + margin.int64

View File

@@ -29,10 +29,13 @@ import
../../utility,
../../switch
when defined(libp2p_gossipsub_1_4):
import ./bandwidth
import results
export results
import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat
import ./gossipsub/[types, scoring, behavior, preamblestore], ../../utils/heartbeat
export types, scoring, behavior, pubsub
@@ -51,6 +54,10 @@ declareCounter(
declareCounter(
libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant"
)
declareCounter(
libp2p_gossipsub_imreceiving_saved_messages,
"number of duplicates avoided by imreceiving",
)
declareCounter(
libp2p_gossipsub_saved_bytes,
"bytes saved by gossipsub optimizations",
@@ -222,6 +229,10 @@ method init*(g: GossipSub) =
raise exc
g.handler = handler
when defined(libp2p_gossipsub_1_4):
g.codecs &= GossipSubCodec_14
g.codecs &= GossipSubCodec_12
g.codecs &= GossipSubCodec_11
g.codecs &= GossipSubCodec_10
@@ -240,6 +251,9 @@ method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
when defined(libp2p_gossipsub_1_4):
peer.preambleBudget = PreamblePeerBudget
method onPubSubPeerEvent*(
p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent
) {.gcsafe.} =
@@ -346,11 +360,14 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
var respControl: ControlMessage
g.handleIDontWant(peer, control.idontwant)
when defined(libp2p_gossipsub_1_4):
g.handlePreamble(peer, control.preamble)
g.handleIMReceiving(peer, control.imreceiving)
let iwant = g.handleIHave(peer, control.ihave)
if iwant.messageIDs.len > 0:
respControl.iwant.add(iwant)
respControl.prune.add(g.handleGraft(peer, control.graft))
let messages = g.handleIWant(peer, control.iwant)
let (messages, msgIDs) = g.handleIWant(peer, control.iwant)
let
isPruneNotEmpty = respControl.prune.len > 0
@@ -371,13 +388,34 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
g.send(peer, RPCMsg(control: some(respControl)), isHighPriority = true)
if messages.len > 0:
for smsg in messages:
when defined(libp2p_gossipsub_1_4):
var preambles: seq[ControlPreamble]
for i, smsg in messages:
let topic = smsg.topic
if g.knownTopics.contains(topic):
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
else:
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
when defined(libp2p_gossipsub_1_4):
# should we send preamble here? (Not in specs so far)
# So receiver will send IMReciving only for preambles received from mesh members
preambles.add(
ControlPreamble(
topicID: smsg.topic,
messageID: msgIDs[i],
messageLength: smsg.data.len.uint32,
)
)
when defined(libp2p_gossipsub_1_4):
g.broadcast(
@[peer],
RPCMsg(control: some(ControlMessage(preamble: preambles))),
isHighPriority = true,
)
# iwant replies have lower priority
trace "sending iwant reply messages", peer
g.send(peer, RPCMsg(messages: messages), isHighPriority = false)
@@ -411,6 +449,34 @@ proc sendIDontWant(
isHighPriority = true,
)
when defined(libp2p_gossipsub_1_4):
const preambleMessageSizeThreshold* = 40 * 1024 # 40KiB
proc sendPreamble(
g: GossipSub, msg: Message, msgId: MessageId, toSendPeers: var HashSet[PubSubPeer]
) =
if msg.data.len < preambleMessageSizeThreshold:
return
g.broadcast(
toSendPeers.filterIt(it.codec == GossipSubCodec_14),
RPCMsg(
control: some(
ControlMessage(
preamble:
@[
ControlPreamble(
topicID: msg.topic,
messageID: msgId,
messageLength: msg.data.len.uint32,
)
]
)
)
),
isHighPriority = true,
)
const iDontWantMessageSizeThreshold* = 512
proc isLargeMessage(msg: Message, msgId: MessageId): bool =
@@ -489,6 +555,28 @@ proc validateAndRelay(
toSendPeers.exclIfIt(isMsgInIdontWant(it))
when defined(libp2p_gossipsub_1_4):
proc isMsgInIMReceiving(it: PubSubPeer): bool =
if it.heIsReceivings.hasKey(msgId):
libp2p_gossipsub_imreceiving_saved_messages.inc
return true
return false
proc deferSend(deferPeers: HashSet[PubSubPeer]) {.async.} =
let receiveTimeMs = calculateReceiveTimeMs(msg.data.len)
await sleepAsync(receiveTimeMs.milliseconds)
for deferPeer in deferPeers:
if not deferPeer.isMsgInIdontWant:
#No need to send preamble at timeout
g.broadcast(@[deferPeer], RPCMsg(messages: @[msg]), isHighPriority = false)
let allPeers = toSendPeers
toSendPeers.exclIfIt(isMsgInIMReceiving(it))
g.sendPreamble(msg, msgId, toSendPeers)
if not PullOperation:
let receivingPeers = allPeers - toSendPeers
asyncSpawn deferSend(receivingPeers)
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
@@ -602,6 +690,14 @@ method rpcHandler*(
msgId = msgIdResult.get
msgIdSalted = g.salt(msgId)
when defined(libp2p_gossipsub_1_4):
if msg.data.len > preambleMessageSizeThreshold:
g.ongoingReceives.del(msgId)
g.ongoingIWantReceives.del(msgId)
var startTime: Moment
if peer.heIsSendings.pop(msgId, startTime):
peer.bandwidthTracking.download.update(startTime, msg.data.len)
if g.addSeen(msgIdSalted):
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
@@ -629,9 +725,8 @@ method rpcHandler*(
continue
if (msg.signature.len > 0 or g.verifySignature) and not msg.verify():
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
debug "Dropping message due to failed signature verification", msg = msg
await g.punishInvalidMessage(peer, msg)
continue
@@ -794,7 +889,7 @@ method publish*(
let pubParams = publishParams.get(PublishParams())
let peers =
var peers =
if pubParams.useCustomConn:
g.makePeersForPublishUsingCustomConn(topic)
else:
@@ -836,8 +931,12 @@ method publish*(
if not pubParams.skipMCache:
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
if g.parameters.sendIDontWantOnPublish:
if isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
when defined(libp2p_gossipsub_1_4):
g.sendPreamble(msg, msgId, peers)
g.broadcast(
peers,
@@ -898,6 +997,8 @@ method start*(
g.heartbeatFut = g.heartbeat()
g.scoringHeartbeatFut = g.scoringHeartbeat()
g.directPeersLoop = g.maintainDirectPeers()
when defined(libp2p_gossipsub_1_4):
g.preambleExpirationFut = g.preambleExpirationHeartbeat()
g.started = true
fut
@@ -912,6 +1013,9 @@ method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
return fut
# stop heartbeat interval
when defined(libp2p_gossipsub_1_4):
g.preambleExpirationFut.cancelSoon()
g.directPeersLoop.cancelSoon()
g.scoringHeartbeatFut.cancelSoon()
g.heartbeatFut.cancelSoon()

View File

@@ -24,6 +24,9 @@ import
signed_envelope,
utils/heartbeat,
]
when defined(libp2p_gossipsub_1_4):
import ./preamblestore
import ../bandwidth
logScope:
topics = "libp2p gossipsub"
@@ -60,6 +63,13 @@ declareCounter(
labels = ["topic"],
)
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
declareCounter(
libp2p_gossipsub_preamble_saved_iwants,
"number of iwant requests avoided by preamble",
labels = ["topic"],
)
const MaxHeIsReceiving = 50
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
g.withPeerStats(p.peerId) do(stats: var PeerStats):
@@ -277,6 +287,11 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
for handler in g.routingRecordsHandler:
handler(peer.peerId, topic, routingRecords)
when defined(libp2p_gossipsub_1_4):
proc addPossiblePeerToQuery(g: GossipSub, peer: PubSubPeer, messageId: MessageId) =
g.ongoingReceives.addPossiblePeerToQuery(messageId, peer)
g.ongoingIWantReceives.addPossiblePeerToQuery(messageId, peer)
proc handleIHave*(
g: GossipSub, peer: PubSubPeer, ihaves: seq[ControlIHave]
): ControlIWant =
@@ -294,6 +309,14 @@ proc handleIHave*(
if peer.iHaveBudget <= 0:
break
elif msgId notin res.messageIDs:
when defined(libp2p_gossipsub_1_4):
if g.ongoingReceives.hasKey(msgId) or
g.ongoingIWantReceives.hasKey(msgId):
g.addPossiblePeerToQuery(peer, msgId)
libp2p_gossipsub_preamble_saved_iwants.inc(
labelValues = [ihave.topicID]
)
continue
res.messageIDs.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID = msgId
@@ -308,13 +331,15 @@ proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWa
if peer.iDontWants[0].len >= IDontWantMaxCount:
break
peer.iDontWants[0].incl(g.salt(messageId))
when defined(libp2p_gossipsub_1_4):
peer.heIsReceivings.del(messageId)
g.addPossiblePeerToQuery(peer, messageId)
proc handleIWant*(
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
): seq[Message] =
var
messages: seq[Message]
invalidRequests = 0
): tuple[messages: seq[Message], ids: seq[MessageId]] =
var response: tuple[messages: seq[Message], ids: seq[MessageId]]
var invalidRequests = 0
if peer.score < g.parameters.gossipThreshold:
trace "iwant: ignoring low score peer", peer, score = peer.score
else:
@@ -328,14 +353,101 @@ proc handleIWant*(
invalidRequests.inc()
if invalidRequests > 20:
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["skipped"])
return messages
return response
continue
let msg = g.mcache.get(mid).valueOr:
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["unknown"])
continue
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["correct"])
messages.add(msg)
return messages
response.messages.add(msg)
response.ids.add(mid)
return response
when defined(libp2p_gossipsub_1_4):
proc medianDownloadRate*(p: var HashSet[PubSubPeer]): float =
if p.len == 0:
return 0
let vals = p.toSeq().mapIt(it.bandwidthTracking.download.value()).sorted()
echo vals
let mid = vals.len div 2
if vals.len mod 2 == 0:
(vals[mid - 1] + vals[mid]) / 2
else:
vals[mid]
proc handlePreamble*(
g: GossipSub, peer: PubSubPeer, preambles: seq[ControlPreamble]
) =
let starts = Moment.now()
for preamble in preambles:
dec peer.preambleBudget
if peer.preambleBudget <= 0:
return
if g.hasSeen(g.salt(preamble.messageID)):
continue
elif peer.heIsSendings.hasKey(preamble.messageID):
continue
elif g.ongoingReceives.hasKey(preamble.messageID):
#TODO: add to conflicts_watch if length is different
continue
else:
peer.heIsSendings[preamble.messageID] = starts
var toSendPeers = HashSet[PubSubPeer]()
g.mesh.withValue(preamble.topicID, peers):
toSendPeers.incl(peers[])
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(preamble.topicID))
var peers = toSendPeers.filterIt(it.codec == GossipSubCodec_14)
let bytesPerSecond = peer.bandwidthTracking.download.value()
let transmissionTimeMs =
calculateReceiveTimeMs(preamble.messageLength.int64, bytesPerSecond.int64)
let expires = starts + transmissionTimeMs.milliseconds
#We send imreceiving only if received from mesh members
if peer notin peers:
if not g.ongoingIWantReceives.hasKey(preamble.messageID):
g.ongoingIWantReceives[preamble.messageID] =
PreambleInfo.init(preamble, peer, starts, expires)
trace "preamble: ignoring out of mesh peer", peer
continue
g.ongoingReceives[preamble.messageID] =
PreambleInfo.init(preamble, peer, starts, expires)
#Send imreceiving only if received from faster mesh members
if bytesPerSecond >= toSendPeers.medianDownloadRate():
g.broadcast(
peers,
RPCMsg(
control: some(
ControlMessage(
imreceiving:
@[
ControlIMReceiving(
messageID: preamble.messageID,
messageLength: preamble.messageLength,
)
]
)
)
),
isHighPriority = true,
)
proc handleIMReceiving*(
g: GossipSub, peer: PubSubPeer, imreceivings: seq[ControlIMReceiving]
) =
for imreceiving in imreceivings:
if peer.heIsReceivings.len > MaxHeIsReceiving:
break
#Ignore if message length is different
g.ongoingReceives.withValue(imreceiving.messageID, pInfo):
if pInfo.messageLength != imreceiving.messageLength:
continue
peer.heIsReceivings[imreceiving.messageID] = imreceiving.messageLength
#No need to check mcache. In that case, we might have already transmitted/transmitting
proc commitMetrics(metrics: var MeshMetrics) =
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
@@ -710,6 +822,9 @@ proc onHeartbeat(g: GossipSub) =
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
when defined(libp2p_gossipsub_1_4):
peer.preambleBudget = PreamblePeerBudget
var meshMetrics = MeshMetrics()
for t in toSeq(g.topics.keys):
@@ -777,3 +892,72 @@ proc heartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
for trigger in g.heartbeatEvents:
trace "firing heartbeat event", instance = cast[int](g)
trigger.fire()
when defined(libp2p_gossipsub_1_4):
proc preambleExpirationHeartbeat*(
g: GossipSub
) {.async: (raises: [CancelledError]).} =
heartbeat "GossipSub: Preamble Expiration", 200.milliseconds:
trace "running preamble expiration heartbeat", instance = cast[int](g)
while true:
var expiredOngoingReceive = g.ongoingReceives.popExpired(Moment.now()).valueOr:
break
if not expiredOngoingReceive.sender.isNil:
let sender = expiredOngoingReceive.sender
if g.peers.hasKey(sender.peerId):
sender.behaviourPenalty += 0.1
if PullOperation:
var possiblePeers = expiredOngoingReceive.possiblePeersToQuery()
g.rng.shuffle(possiblePeers)
var peer: PubSubPeer = nil
for peerId in possiblePeers:
try:
if g.peers.hasKey(peerId) and g.peers[peerId].codec == GossipSubCodec_14:
peer = g.peers[peerId]
break
except KeyError:
assert false, "checked with hasKey"
if peer.isNil:
trace "no peer available to send IWANT for an expiredOngoingReceive",
messageID = expiredOngoingReceive.messageId
continue
let starts = Moment.now()
g.broadcast(
@[peer],
RPCMsg(
control: some(
ControlMessage(
iwant: @[ControlIWant(messageIDs: @[expiredOngoingReceive.messageId])]
)
)
),
isHighPriority = true,
)
let bytesPerSecond = peer.bandwidthTracking.download.value()
let transmissionTimeMs = calculateReceiveTimeMs(
expiredOngoingReceive.messageLength.int64, bytesPerSecond.int64
)
let expires = starts + transmissionTimeMs.milliseconds
# Setting new data before reinserting the preamble
expiredOngoingReceive.startsAt = starts
expiredOngoingReceive.expiresAt = expires
expiredOngoingReceive.sender = peer
g.ongoingIWantReceives[expiredOngoingReceive.messageId] =
expiredOngoingReceive
while true:
let expiredOngoingIWantReceived = g.ongoingIWantReceives.popExpired(
Moment.now()
).valueOr:
break
# TODO: use expiredOngoingIWantReceived
# TODO: what should we do here?

View File

@@ -0,0 +1,104 @@
import std/[tables, heapqueue, sets, options]
import ./types
import chronos
import ../rpc/messages
import ../../../peerid
import ../pubsubpeer
proc `<`(a, b: PreambleInfo): bool =
a.expiresAt < b.expiresAt
proc init*(T: typedesc[PeerSet]): T =
PeerSet(order: @[], peers: initHashSet[PeerId]())
proc init*(
T: typedesc[PreambleInfo],
preamble: ControlPreamble,
sender: PubSubPeer,
startsAt: Moment,
expiresAt: Moment,
): T =
PreambleInfo(
messageId: preamble.messageID,
messageLength: preamble.messageLength,
topicId: preamble.topicID,
sender: sender,
startsAt: startsAt,
expiresAt: expiresAt,
peerSet: PeerSet.init(),
)
proc init*(T: typedesc[PreambleStore]): T =
result.byId = initTable[MessageId, PreambleInfo]()
result.heap = initHeapQueue[PreambleInfo]()
proc insert*(ps: var PreambleStore, msgId: MessageId, info: PreambleInfo) =
try:
if ps.byId.hasKey(msgId):
ps.byId[msgId].deleted = true
ps.byId[msgId] = info
ps.heap.push(info)
except KeyError:
assert false, "checked with hasKey"
proc hasKey*(ps: var PreambleStore, msgId: MessageId): bool =
return ps.byId.hasKey(msgId)
proc `[]`*(ps: var PreambleStore, msgId: MessageId): PreambleInfo =
ps.byId[msgId]
proc `[]=`*(ps: var PreambleStore, msgId: MessageId, entry: PreambleInfo) =
insert(ps, msgId, entry)
proc del*(ps: var PreambleStore, msgId: MessageId) =
try:
if ps.byId.hasKey(msgId):
ps.byId[msgId].deleted = true
ps.byId.del(msgId)
except KeyError:
assert false, "checked with hasKey"
proc len*(ps: var PreambleStore): int =
return ps.byId.len
proc popExpired*(ps: var PreambleStore, now: Moment): Option[PreambleInfo] =
while ps.heap.len > 0:
if ps.heap[0].deleted:
discard ps.heap.pop()
elif ps.heap[0].expiresAt <= now:
let top = ps.heap.pop()
ps.byId.del(top.messageId)
return some(top)
else:
return none(PreambleInfo)
template withValue*(ps: var PreambleStore, key: MessageId, value, body: untyped) =
try:
if ps.hasKey(key):
let value {.inject.} = ps.byId[key]
body
except system.KeyError:
assert false, "checked with in"
const maxPossiblePeersOnPeerSet = 6
proc addPossiblePeerToQuery*(
ps: var PreambleStore, msgId: MessageId, peer: PubSubPeer
) =
if not ps.hasKey(msgId):
return
try:
var preamble = ps[msgId]
if not preamble.peerSet.peers.contains(peer.peerId):
if preamble.peerSet.order.len == maxPossiblePeersOnPeerSet:
let evicted: PeerId = preamble.peerSet.order[0]
preamble.peerSet.order.delete(0)
preamble.peerSet.peers.excl(evicted)
preamble.peerSet.order.add(peer.peerId)
preamble.peerSet.peers.incl(peer.peerId)
except KeyError:
assert false, "checked with hasKey"
proc possiblePeersToQuery*(preamble: PreambleInfo): seq[PeerId] =
preamble.peerSet.order

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import chronos
import std/[options, tables, sets]
import std/[options, tables, sets, heapqueue]
import ".."/[floodsub, peertable, mcache, pubsubpeer]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress, utility]
@@ -18,6 +18,7 @@ import "../../.."/[peerid, multiaddress, utility]
export options, tables, sets
const
GossipSubCodec_14* = "/meshsub/1.4.0"
GossipSubCodec_12* = "/meshsub/1.2.0"
GossipSubCodec_11* = "/meshsub/1.1.0"
GossipSubCodec_10* = "/meshsub/1.0.0"
@@ -46,6 +47,8 @@ const
BackoffSlackTime* = 2 # seconds
PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
IHavePeerBudget* = 10
PreamblePeerBudget* = 10
PullOperation* = true
# the max amount of IHave to expose, not by spec, but go as example
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
@@ -65,6 +68,24 @@ type
meshFailurePenalty*: float64
invalidMessageDeliveries*: float64
PeerSet* = object
order*: seq[PeerId]
peers*: HashSet[PeerId]
PreambleInfo* = ref object
messageId*: MessageId
messageLength*: uint32
topicId*: string
sender*: PubSubPeer
startsAt*: Moment
expiresAt*: Moment
deleted*: bool # tombstone marker
peerSet*: PeerSet
PreambleStore* = object
byId*: Table[MessageId, PreambleInfo]
heap*: HeapQueue[PreambleInfo]
TopicParams* {.public.} = object
topicWeight*: float64
@@ -162,6 +183,7 @@ type
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
OngoingReceivesStore* = PreambleStore
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
RoutingRecordsHandler* = proc(
@@ -181,6 +203,9 @@ type
mcache*: MCache # messages cache
validationSeen*: ValidationSeenTable # peers who sent us message in validation
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
when defined(libp2p_gossipsub_1_4):
preambleExpirationFut*: Future[void]
# cancellation future for preamble expiration heartbeat interval
scoringHeartbeatFut*: Future[void]
# cancellation future for scoring heartbeat interval
heartbeatRunning*: bool
@@ -194,6 +219,11 @@ type
heartbeatEvents*: seq[AsyncEvent]
when defined(libp2p_gossipsub_1_4):
ongoingReceives*: OngoingReceivesStore # list of messages we are receiving
ongoingIWantReceives*: OngoingReceivesStore
# list of iwant replies we are receiving
MeshMetrics* = object # scratch buffers for metrics
otherPeersPerTopicMesh*: int64
otherPeersPerTopicFanout*: int64

View File

@@ -9,13 +9,25 @@
{.push raises: [].}
import std/[tables, sets, sequtils]
import std/[tables, sets, sequtils, strutils]
import ./pubsubpeer, ../../peerid
export tables, sets
type PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
proc `$`*(table: PeerTable): string =
result.add("PeerTable ")
result.add("topics (" & $table.len & ")")
for topic, peers in table:
result.add(" topic: ")
result.add($topic)
result.add(" peers: ")
result.add("(" & $peers.len & ") [")
result.add(peers.mapIt($it).join(", "))
result.add("]")
proc hasPeerId*(t: PeerTable, topic: string, peerId: PeerId): bool =
if topic in t:
try:

View File

@@ -20,7 +20,12 @@ import
../../stream/connection,
../../crypto/crypto,
../../protobuf/minprotobuf,
../../utility
../../utility,
../../utils/sequninit,
./bandwidth
when defined(libp2p_gossipsub_1_4):
import ./bandwidth
export peerid, connection, deques
@@ -121,6 +126,9 @@ type
handler*: RPCHandler
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
when defined(libp2p_gossipsub_1_4):
bandwidthTracking*: BandwidthTracking
score*: float64
sentIHaves*: Deque[HashSet[MessageId]]
iDontWants*: Deque[HashSet[SaltedId]]
@@ -134,6 +142,11 @@ type
behaviourPenalty*: float64 # the eventual penalty score
overheadRateLimitOpt*: Opt[TokenBucket]
when defined(libp2p_gossipsub_1_4):
preambleBudget*: int
heIsReceivings*: Table[MessageId, uint32]
heIsSendings*: Table[MessageId, Moment]
rpcmessagequeue: RpcMessageQueue
maxNumElementsInNonPriorityQueue*: int
# The max number of elements allowed in the non-priority queue.
@@ -159,6 +172,9 @@ proc getAgent*(peer: PubSubPeer): string =
else:
"unknown"
proc `$`*(p: PubSubPeer): string =
$p.peerId
func hash*(p: PubSubPeer): Hash =
p.peerId.hash
@@ -221,7 +237,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
conn, peer = p, closed = conn.closed, data = data.shortLog
await p.handler(p, data)
data = newSeqUninitialized[byte](0) # Release memory
data = newSeqUninit[byte](0) # Release memory
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while",
conn, peer = p, description = exc.msg
@@ -347,13 +363,10 @@ proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async: (raises: [
try:
await msgFut
trace "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
trace "Unable to send to remote", conn, description = exc.msg
except CatchableError as exc:
trace "Unexpected exception in sendMsgContinue", conn, description = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
await conn.close() # This will clean up the send connection
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledError]).} =
@@ -609,6 +622,11 @@ proc new*(
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
customConnCallbacks: customConnCallbacks,
)
when defined(libp2p_gossipsub_1_4):
result.bandwidthTracking =
BandwidthTracking(download: ExponentialMovingAverage.init())
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.iDontWants.addFirst(default(HashSet[SaltedId]))
result.startSendNonPriorityTask()

View File

@@ -41,15 +41,36 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
proc sign*(msg: Message, privateKey: PrivateKey): CryptoResult[seq[byte]] =
ok((?privateKey.sign(PubSubPrefix & encodeMessage(msg, false))).getBytes())
proc extractPublicKey(m: Message): Opt[PublicKey] =
var pubkey: PublicKey
if m.fromPeer.hasPublicKey() and m.fromPeer.extractPublicKey(pubkey):
Opt.some(pubkey)
elif m.key.len > 0 and pubkey.init(m.key):
# check if peerId extracted from m.key is the same as m.fromPeer
let derivedPeerId = PeerId.init(pubkey).valueOr:
warn "could not derive peerId from key field"
return Opt.none(PublicKey)
if derivedPeerId != m.fromPeer:
warn "peerId derived from msg.key is not the same as msg.fromPeer",
derivedPeerId = derivedPeerId, fromPeer = m.fromPeer
return Opt.none(PublicKey)
Opt.some(pubkey)
else:
Opt.none(PublicKey)
proc verify*(m: Message): bool =
if m.signature.len > 0 and m.key.len > 0:
if m.signature.len > 0:
var msg = m
msg.signature = @[]
msg.key = @[]
var remote: Signature
var key: PublicKey
if remote.init(m.signature) and key.init(m.key):
let key = m.extractPublicKey().valueOr:
warn "could not extract public key", msg = m
return false
if remote.init(m.signature):
trace "verifying signature", remoteSignature = remote
result = remote.verify(PubSubPrefix & encodeMessage(msg, false), key)

View File

@@ -63,6 +63,9 @@ type
graft*: seq[ControlGraft]
prune*: seq[ControlPrune]
idontwant*: seq[ControlIWant]
when defined(libp2p_gossipsub_1_4):
preamble*: seq[ControlPreamble]
imreceiving*: seq[ControlIMReceiving]
ControlIHave* = object
topicID*: string
@@ -79,6 +82,15 @@ type
peers*: seq[PeerInfoMsg]
backoff*: uint64
ControlPreamble* = object
topicID*: string
messageID*: MessageId
messageLength*: uint32
ControlIMReceiving* = object
messageID*: MessageId
messageLength*: uint32
RPCMsg* = object
subscriptions*: seq[SubOpts]
messages*: seq[Message]
@@ -101,13 +113,29 @@ func shortLog*(s: ControlGraft): auto =
func shortLog*(s: ControlPrune): auto =
(topic: s.topicID.shortLog)
func shortLog*(s: ControlPreamble): auto =
(topic: s.topicID.shortLog, messageID: s.messageID.shortLog)
func shortLog*(s: ControlIMReceiving): auto =
(messageID: s.messageID.shortLog)
func shortLog*(c: ControlMessage): auto =
(
ihave: mapIt(c.ihave, it.shortLog),
iwant: mapIt(c.iwant, it.shortLog),
graft: mapIt(c.graft, it.shortLog),
prune: mapIt(c.prune, it.shortLog),
)
when defined(libp2p_gossipsub_1_4):
(
ihave: mapIt(c.ihave, it.shortLog),
iwant: mapIt(c.iwant, it.shortLog),
graft: mapIt(c.graft, it.shortLog),
prune: mapIt(c.prune, it.shortLog),
preamble: mapIt(c.preamble, it.shortLog),
imreceiving: mapIt(c.imreceiving, it.shortLog),
)
else:
(
ihave: mapIt(c.ihave, it.shortLog),
iwant: mapIt(c.iwant, it.shortLog),
graft: mapIt(c.graft, it.shortLog),
prune: mapIt(c.prune, it.shortLog),
)
func shortLog*(msg: Message): auto =
(
@@ -173,11 +201,41 @@ proc byteSize(controlPrune: ControlPrune): int =
# 8 bytes for uint64
static:
expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
expectedFields(ControlPreamble, @["topicID", "messageID", "messageLength"])
proc byteSize(controlPreamble: ControlPreamble): int =
controlPreamble.topicID.len + controlPreamble.messageID.len + 4 # 4 bytes for uint32
proc byteSize*(preambles: seq[ControlPreamble]): int =
preambles.foldl(a + b.byteSize, 0)
static:
expectedFields(ControlIMReceiving, @["messageID", "messageLength"])
proc byteSize(controlIMreceiving: ControlIMReceiving): int =
controlIMreceiving.messageID.len + 4 # 4 bytes for uint32
proc byteSize*(imreceivings: seq[ControlIMReceiving]): int =
imreceivings.foldl(a + b.byteSize, 0)
when defined(libp2p_gossipsub_1_4):
static:
expectedFields(
ControlMessage,
@["ihave", "iwant", "graft", "prune", "idontwant", "preamble", "imreceiving"],
)
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0) +
control.preamble.foldl(a + b.byteSize, 0) +
control.imreceiving.foldl(a + b.byteSize, 0)
else:
static:
expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
static:
expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])

View File

@@ -77,6 +77,31 @@ proc write*(pb: var ProtoBuffer, field: int, iwant: ControlIWant) =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["iwant"])
proc write*(pb: var ProtoBuffer, field: int, preamble: ControlPreamble) =
var ipb = initProtoBuffer()
ipb.write(1, preamble.topicID)
ipb.write(2, preamble.messageID)
ipb.write(3, preamble.messageLength)
if len(ipb.buffer) > 0:
ipb.finish()
pb.write(field, ipb)
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["preamble"])
proc write*(pb: var ProtoBuffer, field: int, imreceiving: ControlIMReceiving) =
var ipb = initProtoBuffer()
ipb.write(1, imreceiving.messageID)
ipb.write(2, imreceiving.messageLength)
if ipb.buffer.len > 0:
ipb.finish()
pb.write(field, ipb)
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_write.inc(ipb.getLen().int64, labelValues = ["imreceiving"])
proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
var ipb = initProtoBuffer()
for ihave in control.ihave:
@@ -89,6 +114,11 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
ipb.write(4, prune)
for idontwant in control.idontwant:
ipb.write(5, idontwant)
when defined(libp2p_gossipsub_1_4):
for preamble in control.preamble:
ipb.write(6, preamble)
for imreceiving in control.imreceiving:
ipb.write(7, imreceiving)
if len(ipb.buffer) > 0:
ipb.finish()
pb.write(field, ipb)
@@ -197,6 +227,43 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
trace "decodeIWant: no messageIDs"
ok(control)
proc decodePreamble*(pb: ProtoBuffer): ProtoResult[ControlPreamble] {.inline.} =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["preamble"])
trace "decodePreamble: decoding message"
var control = ControlPreamble()
if ?pb.getField(1, control.topicID):
trace "decodePreamble: read topicID", topic = control.topicID
else:
trace "decodePreamble: topicID is missing"
if ?pb.getField(2, control.messageID):
trace "decodePreamble: read messageID", message_id = control.messageID
else:
trace "decodePreamble: messageID is missing"
if ?pb.getField(3, control.messageLength):
trace "decodePreamble: read message Length", message_length = control.messageLength
else:
trace "decodePreamble: message Length is missing"
ok(control)
proc decodeIMReceiving*(pb: ProtoBuffer): ProtoResult[ControlIMReceiving] {.inline.} =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["imreceiving"])
trace "decodeIMReceiving: decoding message"
var control = ControlIMReceiving()
if ?pb.getField(1, control.messageID):
trace "decodeIMReceiving: read messageID", message_id = control.messageID
else:
trace "decodeIMReceiving: messageID is missing"
if ?pb.getField(2, control.messageLength):
trace "decodeIMReceiving: read message Length",
message_length = control.messageLength
else:
trace "decodeIMReceiving: message Length is missing"
ok(control)
proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inline.} =
trace "decodeControl: decoding message"
var buffer: seq[byte]
@@ -208,6 +275,10 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inli
var graftpbs: seq[seq[byte]]
var prunepbs: seq[seq[byte]]
var idontwant: seq[seq[byte]]
when defined(libp2p_gossipsub_1_4):
var preamble: seq[seq[byte]]
var imreceiving: seq[seq[byte]]
if ?cpb.getRepeatedField(1, ihavepbs):
for item in ihavepbs:
control.ihave.add(?decodeIHave(initProtoBuffer(item)))
@@ -223,6 +294,15 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inli
if ?cpb.getRepeatedField(5, idontwant):
for item in idontwant:
control.idontwant.add(?decodeIWant(initProtoBuffer(item)))
when defined(libp2p_gossipsub_1_4):
if ?cpb.getRepeatedField(6, preamble):
for item in preamble:
control.preamble.add(?decodePreamble(initProtoBuffer(item)))
if ?cpb.getRepeatedField(7, imreceiving):
for item in imreceiving:
control.imreceiving.add(?decodeIMReceiving(initProtoBuffer(item)))
trace "decodeControl: message statistics",
graft_count = len(control.graft),
prune_count = len(control.prune),

View File

@@ -838,6 +838,6 @@ method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
warn "Stopping rendezvous without starting it"
return fut
rdv.started = false
rdv.registerDeletionLoop.cancel()
rdv.registerDeletionLoop.cancelSoon()
rdv.registerDeletionLoop = nil
fut

View File

@@ -20,7 +20,7 @@ import ../../peerid
import ../../peerinfo
import ../../protobuf/minprotobuf
import ../../utility
import ../../utils/bytesview
import ../../utils/[bytesview, sequninit]
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
@@ -315,7 +315,7 @@ proc readFrame(
if size == 0:
return
var buffer = newSeqUninitialized[byte](size)
var buffer = newSeqUninit[byte](size)
await sconn.readExactly(addr buffer[0], buffer.len)
return buffer
@@ -458,7 +458,7 @@ method write*(
let frames = (message.len + MaxPlainSize - 1) div MaxPlainSize
var
cipherFrames = newSeqUninitialized[byte](message.len + frames * FramingSize)
cipherFrames = newSeqUninit[byte](message.len + frames * FramingSize)
left = message.len
offset = 0
woffset = 0

View File

@@ -151,6 +151,19 @@ method closed*(s: ChronosStream): bool =
method atEof*(s: ChronosStream): bool =
s.client.atEof()
method closeWrite*(s: ChronosStream) {.async: (raises: []).} =
## Close the write side of the TCP connection using half-close
if not s.client.closed():
try:
await s.client.shutdownWait()
trace "Write side closed", address = $s.client.remoteAddress(), s
except TransportError:
# Ignore transport errors during shutdown
discard
except CatchableError:
# Ignore other errors during shutdown
discard
method closeImpl*(s: ChronosStream) {.async: (raises: []).} =
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s

View File

@@ -40,6 +40,12 @@ type
proc timeoutMonitor(s: Connection) {.async: (raises: []).}
method closeWrite*(s: Connection): Future[void] {.base, async: (raises: []).} =
## Close the write side of the connection
## Subclasses should implement this for their specific transport
## Default implementation just closes the entire connection
await s.close()
func shortLog*(conn: Connection): string =
try:
if conn == nil:
@@ -133,7 +139,10 @@ when defined(libp2p_agents_metrics):
var conn = s
while conn != nil:
conn.shortAgent = shortAgent
conn = conn.getWrapped()
let wrapped = conn.getWrapped()
if wrapped == conn:
break
conn = wrapped
proc new*(
C: type Connection,

View File

@@ -16,6 +16,7 @@ import std/oids
import stew/byteutils
import chronicles, chronos, metrics
import ../varint, ../peerinfo, ../multiaddress, ../utility, ../errors
import ../utils/sequninit
export errors
@@ -232,7 +233,7 @@ method readLp*(
if length == 0:
return
var res = newSeqUninitialized[byte](length)
var res = newSeqUninit[byte](length)
await s.readExactly(addr res[0], res.len)
res
@@ -251,7 +252,7 @@ method writeLp*(
.} =
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< buf.len] = msg
s.write(buf)

View File

@@ -1,94 +0,0 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import stew/bitops2
type StreamSeq* = object
# Seq adapted to the stream use case where we add data at the back and
# consume at the front in chunks. A bit like a deque but contiguous memory
# area - will try to avoid moving data unless it has to, subject to buffer
# space. The assumption is that data is typically consumed fully.
#
# See also asio::stream_buf
buf: seq[byte] # Data store
rpos: int # Reading position - valid data starts here
wpos: int # Writing position - valid data ends here
template len*(v: StreamSeq): int =
v.wpos - v.rpos
func grow(v: var StreamSeq, n: int) =
if v.rpos == v.wpos:
# All data has been consumed, reset positions
v.rpos = 0
v.wpos = 0
if v.buf.len - v.wpos < n:
if v.rpos > 0:
# We've consumed some data so we'll try to move that data to the beginning
# of the buffer, hoping that this will clear up enough capacity to avoid
# reallocation
moveMem(addr v.buf[0], addr v.buf[v.rpos], v.wpos - v.rpos)
v.wpos -= v.rpos
v.rpos = 0
if v.buf.len - v.wpos >= n:
return
# TODO this is inefficient - `setLen` will copy all data of buf, even though
# we know that only a part of it contains "valid" data
v.buf.setLen(nextPow2(max(64, v.wpos + n).uint64).int)
template prepare*(v: var StreamSeq, n: int): var openArray[byte] =
## Return a buffer that is at least `n` bytes long
mixin grow
v.grow(n)
v.buf.toOpenArray(v.wpos, v.buf.len - 1)
template commit*(v: var StreamSeq, n: int) =
## Mark `n` bytes in the buffer returned by `prepare` as ready for reading
v.wpos += n
func add*(v: var StreamSeq, data: openArray[byte]) =
## Add data - the equivalent of `buf.prepare(n) = data; buf.commit(n)`
if data.len > 0:
v.grow(data.len)
copyMem(addr v.buf[v.wpos], unsafeAddr data[0], data.len)
v.commit(data.len)
template data*(v: StreamSeq): openArray[byte] =
# Data that is ready to be consumed
# TODO a double-hash comment here breaks compile (!)
v.buf.toOpenArray(v.rpos, v.wpos - 1)
template toOpenArray*(v: StreamSeq, b, e: int): openArray[byte] =
# Data that is ready to be consumed
# TODO a double-hash comment here breaks compile (!)
v.buf.toOpenArray(v.rpos + b, v.rpos + e - b)
func consume*(v: var StreamSeq, n: int) =
## Mark `n` bytes that were returned via `data` as consumed
v.rpos += n
func consumeTo*(v: var StreamSeq, buf: var openArray[byte]): int =
let bytes = min(buf.len, v.len)
if bytes > 0:
copyMem(addr buf[0], addr v.buf[v.rpos], bytes)
v.consume(bytes)
bytes
func clear*(v: var StreamSeq) =
v.consume(v.len)
func assign*(v: var StreamSeq, buf: openArray[byte]) =
v.clear()
v.add(buf)

View File

@@ -20,6 +20,7 @@ import chronos, chronicles, metrics
import
stream/connection,
transports/transport,
transports/tcptransport,
upgrademngrs/upgrade,
multistream,
multiaddress,
@@ -273,6 +274,9 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
conn =
try:
await transport.accept()
except CancelledError as exc:
slot.release()
raise exc
except CatchableError as exc:
slot.release()
raise
@@ -351,7 +355,17 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
s.peerInfo.listenAddrs.keepItIf(it notin addrs)
if addrs.len > 0 or t.running:
startFuts.add(t.start(addrs))
let fut = t.start(addrs)
startFuts.add(fut)
if t of TcpTransport:
await fut
s.acceptFuts.add(s.accept(t))
s.peerInfo.listenAddrs &= t.addrs
# some transports require some services to be running
# in order to finish their startup process
for service in s.services:
discard await service.setup(s)
await allFutures(startFuts)
@@ -364,12 +378,11 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
for t in s.transports: # for each transport
if t.addrs.len > 0 or t.running:
if t of TcpTransport:
continue # already added previously
s.acceptFuts.add(s.accept(t))
s.peerInfo.listenAddrs &= t.addrs
for service in s.services:
discard await service.setup(s)
await s.peerInfo.update()
await s.ms.start()
s.started = true

View File

@@ -52,7 +52,7 @@ proc listenAddress(self: MemoryTransport, ma: MultiAddress): MultiAddress =
method start*(
self: MemoryTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
if self.running:
return

View File

@@ -42,6 +42,9 @@ proc new(
procCall P2PConnection(quicstream).initStream()
quicstream
method getWrapped*(self: QuicStream): P2PConnection =
self
template mapExceptions(body: untyped) =
try:
body
@@ -53,15 +56,23 @@ template mapExceptions(body: untyped) =
method readOnce*(
stream: QuicStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
try:
if stream.cached.len == 0:
if stream.cached.len == 0:
try:
stream.cached = await stream.stream.read()
result = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], result)
stream.cached = stream.cached[result ..^ 1]
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
except CatchableError as exc:
raise newLPStreamEOFError()
if stream.cached.len == 0:
raise newLPStreamEOFError()
except CancelledError as exc:
raise exc
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
raise (ref LPStreamError)(msg: "error in readOnce: " & exc.msg, parent: exc)
let toRead = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], toRead)
stream.cached = stream.cached[toRead ..^ 1]
libp2p_network_bytes.inc(toRead.int64, labelValues = ["in"])
return toRead
{.push warning[LockLevel]: off.}
method write*(
@@ -72,6 +83,13 @@ method write*(
{.pop.}
method closeWrite*(stream: QuicStream) {.async: (raises: []).} =
## Close the write side of the QUIC stream
try:
await stream.stream.closeWrite()
except CatchableError as exc:
discard
method closeImpl*(stream: QuicStream) {.async: (raises: []).} =
try:
await stream.stream.close()
@@ -82,8 +100,11 @@ method closeImpl*(stream: QuicStream) {.async: (raises: []).} =
# Session
type QuicSession* = ref object of P2PConnection
connection: QuicConnection
streams: seq[QuicStream]
method close*(session: QuicSession) {.async: (raises: []).} =
for s in session.streams:
await s.close()
safeClose(session.connection)
await procCall P2PConnection(session).close()
@@ -98,19 +119,32 @@ proc getStream*(
of Direction.Out:
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
return QuicStream.new(stream, session.observedAddr, session.peerId)
let qs = QuicStream.new(stream, session.observedAddr, session.peerId)
when defined(libp2p_agents_metrics):
qs.shortAgent = session.shortAgent
session.streams.add(qs)
return qs
except CatchableError as exc:
# TODO: incomingStream is using {.async.} with no raises
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
self
# Muxer
type QuicMuxer = ref object of Muxer
quicSession: QuicSession
handleFut: Future[void]
when defined(libp2p_agents_metrics):
method setShortAgent*(m: QuicMuxer, shortAgent: string) =
m.quicSession.shortAgent = shortAgent
for s in m.quicSession.streams:
s.shortAgent = shortAgent
m.connection.shortAgent = shortAgent
method newStream*(
m: QuicMuxer, name: string = "", lazy: bool = false
): Future[P2PConnection] {.
@@ -129,7 +163,7 @@ proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
trace "finished handling stream"
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", msg = exc.msg
trace "Exception in quic stream handler", msg = exc.msg
await chann.close()
method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
@@ -138,7 +172,7 @@ method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
let incomingStream = await m.quicSession.getStream(Direction.In)
asyncSpawn m.handleStream(incomingStream)
except CatchableError as exc:
trace "Exception in mplex handler", msg = exc.msg
trace "Exception in quic handler", msg = exc.msg
method close*(m: QuicMuxer) {.async: (raises: []).} =
try:
@@ -210,7 +244,7 @@ method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises:
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
doAssert self.listener.isNil, "start() already called"
#TODO handle multiple addr
@@ -250,7 +284,8 @@ method start*(
method stop*(transport: QuicTransport) {.async: (raises: []).} =
if transport.running:
for c in transport.connections:
let conns = transport.connections[0 .. ^1]
for c in conns:
await c.close()
await procCall Transport(transport).stop()
try:
@@ -289,12 +324,12 @@ method accept*(
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
doAssert not self.listener.isNil, "call start() before calling accept()"
if not self.running:
# stop accept only when transport is stopped (not when error occurs)
raise newException(QuicTransportAcceptStopped, "Quic transport stopped")
doAssert not self.listener.isNil, "call start() before calling accept()"
try:
let connection = await self.listener.accept()
return self.wrapConnection(connection)

View File

@@ -107,7 +107,7 @@ proc new*(
method start*(
self: TcpTransport, addrs: seq[MultiAddress]
): Future[void] {.async: (raises: [LPError, transport.TransportError]).} =
): Future[void] {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
## Start transport listening to the given addresses - for dial-only transports,
## start with an empty list

View File

@@ -801,6 +801,44 @@ cleanup:
return ret_code;
}
cert_error_t cert_new_key_t(cert_buffer *seckey, cert_key_t *out) {
BIO *bio = NULL;
cert_error_t ret_code = CERT_SUCCESS;
if (out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
struct cert_key_s *key = calloc(1, sizeof(struct cert_key_s));
if (key == NULL) {
return CERT_ERROR_MEMORY;
}
bio = BIO_new_mem_buf(seckey->data, seckey->len);
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
EVP_PKEY *pkey = d2i_PrivateKey_bio(bio, NULL);
key->pkey = pkey;
*out = key;
cleanup:
if (bio)
BIO_free(bio);
if (ret_code != CERT_SUCCESS && *out) {
if (pkey)
EVP_PKEY_free(pkey);
free(key);
*out = NULL;
}
return ret_code;
}
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
cert_format_t format) {
BIO *bio = NULL;

View File

@@ -106,6 +106,16 @@ cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
*/
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out);
/**
* Copy DER formated seckey to a cert_key_t
*
* @param seckey Private Key bytes in DER format
* @param out Pointer to store the key as cert_key_t
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_new_key_t(cert_buffer *seckey, cert_key_t *out);
/**
* Serialize a key's private key to a format
*

View File

@@ -17,6 +17,7 @@ import ../../crypto/crypto
import ../../errors
import ./certificate_ffi
import ../../../libp2p/peerid
import ../../utils/sequninit
logScope:
topics = "libp2p tls certificate"
@@ -98,7 +99,7 @@ func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
##
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
let prefixLen = P2P_SIGNING_PREFIX.len.int
let msg = newSeqUninitialized[byte](prefixLen + pubKey.len)
let msg = newSeqUninit[byte](prefixLen + pubKey.len)
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)

View File

@@ -42,6 +42,10 @@ proc cert_generate_key*(
ctx: cert_context_t, out_arg: ptr cert_key_t
): cert_error_t {.cdecl, importc: "cert_generate_key".}
proc cert_new_key_t*(
seckey: ptr cert_buffer, certKey: ptr cert_key_t
): cert_error_t {.cdecl, importc: "cert_new_key_t".}
proc cert_serialize_privk*(
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
): cert_error_t {.cdecl, importc: "cert_serialize_privk".}

Some files were not shown because too many files have changed in this diff Show More