Compare commits

...

80 Commits

Author SHA1 Message Date
Radosław Kamiński
5f6b8e86a5 test(rendezvous): error cases (#1683) 2025-09-22 15:53:47 +01:00
richΛrd
11b98b7a3f fix: add missing import (#1707) 2025-09-22 13:38:32 +00:00
richΛrd
647f76341e feat(mix): mix_protocol and entry connection (#1703) 2025-09-22 13:09:18 +00:00
Radosław Kamiński
fbf96bb2ce chore(readme): Update chat code example link (#1709) 2025-09-22 13:28:03 +01:00
richΛrd
f0aaecb743 feat(mix): mixnode (#1702) 2025-09-21 16:46:48 +00:00
richΛrd
8d3076ea99 fix: add missing import for linux/amd64 daily job (#1706) 2025-09-20 18:39:12 -04:00
richΛrd
70b7d61436 feat(mix): SURBs and fragmentation (#1700) 2025-09-19 15:56:26 -04:00
Gabriel Cruz
37bae0986c chore: remove go daemon (#1705) 2025-09-19 15:21:23 -04:00
Gabriel Cruz
b34ddab10c chore(autonat-v2): add interop tests (#1695) 2025-09-18 14:30:56 +00:00
richΛrd
e09457da12 feat(mix): sphinx (#1691) 2025-09-18 09:04:44 -04:00
vladopajic
94ad1dcbc8 chore: nimble config tidy (#1696) 2025-09-17 18:10:07 +00:00
Gabriel Cruz
5b9f2cba6f fix: autonatV2 request addresses (#1698) 2025-09-17 13:38:03 -03:00
richΛrd
59e7069c15 feat: v1.13.0 (#1673) 2025-09-17 08:18:53 -04:00
richΛrd
18a0e9c2d1 feat(mix): message (#1690) 2025-09-16 10:49:49 -04:00
richΛrd
34a9a03b73 feat(mix): serialization (#1689) 2025-09-16 14:12:19 +00:00
Gabriel Cruz
788109b4f4 fix(autonat-v2): service setting up correctly (#1694) 2025-09-16 09:10:12 +00:00
vladopajic
44aab92b3e chore(quic): better error handling in stream.write() (#1693) 2025-09-16 08:35:57 +00:00
richΛrd
ad0812b40b feat(mix): sequence number generator and tag manager (#1688) 2025-09-15 16:49:34 -03:00
richΛrd
0751f240a2 feat(mix): crypto (#1687) 2025-09-15 16:46:28 +00:00
richΛrd
d8ecf8a135 chore: add missing import (#1692) 2025-09-15 10:04:07 -04:00
Gabriel Cruz
bab863859c chore: add autonatv2 service to builder (#1686) 2025-09-12 21:23:03 +02:00
Gabriel Cruz
73d04def6f feat: add autonat v2 service (#1684) 2025-09-12 08:26:33 -04:00
richΛrd
4509ade75c feat: add skipIDontWant and skipPreamble (#1681) 2025-09-10 16:27:54 -04:00
Gabriel Cruz
b64c0f6d85 chore: rename withAutonatV2 to withAutonatV2Server (#1680) 2025-09-10 15:20:01 -04:00
vladopajic
582ba7e650 fix(pubsub): use custom conn when message is sent as lower-priority (#1679) 2025-09-10 18:34:51 +02:00
Gabriel Cruz
31ae734aff feat(autonat-v2): add client and tests (#1659) 2025-09-10 13:11:03 +00:00
Radosław Kamiński
f26ff88e6c test(rendezvous): Simplify test setup (#1677) 2025-09-10 10:21:22 +01:00
vladopajic
4fbf59ece8 fix(builders): transport param not assigned in newStandardSwitch (#1676) 2025-09-09 11:34:41 -03:00
Radosław Kamiński
62388a7a20 refactor(rendezvous): Split Rendezvous Protobuf and add tests (#1671) 2025-09-09 09:12:43 +01:00
vladopajic
27051164db chore(tests): utilize quic transport in pubsub tests (#1667) 2025-09-08 21:30:21 +00:00
Gabriel Cruz
f41009461b fix: revert reviewdog workaround (#1672) 2025-09-08 16:35:47 -04:00
vladopajic
c3faabf522 chore(quic): add tests from common interop (#1662) 2025-09-08 16:06:22 +00:00
Gabriel Cruz
10f7f5c68a chore(autonat-v2): add server config (#1669) 2025-09-08 15:23:23 +00:00
Gabriel Cruz
f345026900 fix(linters): use workaround for reviewdog bug (#1668) 2025-09-08 14:48:03 +00:00
vladopajic
5d6578a06f chore: splitRPCMsg improvements (#1665) 2025-09-08 11:06:55 -03:00
Gabriel Cruz
871a5d047f feat(autonat-v2): add server (#1658) 2025-09-04 13:27:49 -04:00
Gabriel Cruz
061195195b chore(autonat-v2): add utils (#1657) 2025-09-03 19:04:46 +00:00
Radosław Kamiński
8add5aaaab fix(rendezvous): peer registration limit (#1656) 2025-09-03 18:01:23 +01:00
Miran
dbf60b74c7 chore(ci): remove macos-13 from the matrix (#1650) 2025-09-03 11:16:37 -04:00
Radosław Kamiński
d2eaf07960 test(rendezvous): Registration TTL tests (#1655) 2025-09-02 15:43:48 +01:00
Gabriel Cruz
6e5274487e chore: pass localAddr in noise, mplex and yamux (#1654) 2025-09-01 23:38:23 +02:00
Gabriel Cruz
7ed62461d7 chore: add localAddr to Connection (#1651) 2025-09-01 20:39:08 +02:00
Radosław Kamiński
6059ee8332 test(performance): upload plots as artifacts (#1648) 2025-09-01 16:12:49 +00:00
Radosław Kamiński
4f7e232a9e fix(rendezvous): pagination offset (#1646) 2025-08-29 18:27:03 +01:00
richΛrd
5eaa43b860 fix: dont send GoAway for unknown streams and mark streams as closed on conn close (#1645) 2025-08-28 09:34:45 -04:00
richΛrd
17ed2d88df chore: temporarily disable performance plots from being published (#1647) 2025-08-28 08:20:12 -04:00
Radosław Kamiński
c7f29ed5db test(rendezvous): Refactor Rendezvous tests (#1644) 2025-08-28 09:35:04 +01:00
vladopajic
9865cc39b5 chore(perf): follow up for PR#1600 (#1620) 2025-08-26 10:00:25 -04:00
Gabriel Cruz
601f56b786 chore(autonat-v2): add message types (#1637) 2025-08-25 15:18:43 +00:00
Ben
25a8ed4d07 refactor(kad): Refine, and reduce, exception scope (#1627) 2025-08-25 11:33:26 +00:00
Radosław Kamiński
955e28ff70 test(yamux): Add unit tests - frame handling and stream initiation (#1634) 2025-08-22 12:02:54 +01:00
Radosław Kamiński
f952e6d436 test(performance): do not run publish steps on forks and fix cleanup (#1630) 2025-08-19 13:25:52 +01:00
MorganaFuture
bed83880bf fix(test): Race condition on Windows-specific daemon close (#1628)
Co-authored-by: Ben <benph@vac.dev>
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-08-18 17:09:31 -04:00
richΛrd
9bd4b7393f feat(kad-dht): findPeer (#1624) 2025-08-18 13:45:31 +00:00
Radosław Kamiński
12d1fae404 test(yamux): Add header unit tests (#1625) 2025-08-18 13:50:54 +01:00
MorganaFuture
17073dc9e0 fix(tests): prevent race condition in testgossipsubcontrolmessages (#1626) 2025-08-15 18:46:39 +00:00
vladopajic
b1649b3566 chore(quic): add length prefixed test (#1599) 2025-08-15 15:57:56 +02:00
Ben
ef20f46b47 refactor: rm dhttypes.nim (#1612) 2025-08-15 12:23:27 +00:00
Gabriel Cruz
9161529c84 fix: pubsub signature verification (#1618) 2025-08-14 20:15:02 +00:00
Ben
8b70384b6a refactor: Removal of "Unhashed" key variant (#1623)
Internal keydata is _always_ unhashed. The parts that require its data in hashed form hash it themselves using the provided hasher (with default fallback)
2025-08-14 11:22:09 +00:00
MorganaFuture
f25814a890 feat(perf): implement proper half-close semantics (#1600)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-08-13 10:08:17 -04:00
Radosław Kamiński
3d5ea1fa3c test(performance): fetch before push and improve latency history (#1617) 2025-08-13 14:22:42 +01:00
richΛrd
2114008704 fix: compilation warning on yamux due to using CatchableErr (#1616) 2025-08-12 22:11:33 +00:00
richΛrd
04796b210b fix: don't check for errors as close() will only contain futures that raise [] (#1615) 2025-08-12 21:26:22 +00:00
Ben
59faa023aa feat(kad): Initial unstable putval api (#1582) 2025-08-12 12:25:21 +02:00
vladopajic
fdebea4e14 chore(quic): fix flaky test when eof is expected (#1611) 2025-08-11 17:02:13 +00:00
vladopajic
0c188df806 fix(quic): race errors when stopping transport (#1614) 2025-08-11 15:48:37 +00:00
Radosław Kamiński
abee5326dc test(gossipsub): Performance tests - plot latency history (#1608) 2025-08-11 16:11:29 +01:00
Radosław Kamiński
71f04d1bb3 test(gossipsub): Performance tests - plot docker stats (#1597) 2025-08-11 15:45:50 +01:00
Radosław Kamiński
41ae43ae80 test(gossipsub): Performance tests - collect docker stats (#1593) 2025-08-11 14:01:38 +00:00
vladopajic
5dbf077d9e chore(pubsub): simplify prune backoff test (#1596) 2025-08-09 17:49:14 +00:00
vladopajic
b5fc7582ff fix(quic): setting shortAgent (#1609) 2025-08-08 17:21:58 +00:00
vladopajic
7f83ebb198 chore(quic): readOnce better exception handling (#1610) 2025-08-08 16:02:33 +00:00
vladopajic
ceb89986c1 chore(quic): exception msg fix (#1607) 2025-08-08 10:24:55 -03:00
vladopajic
f4ff27ca6b fix(quic): test improvement (#1595) 2025-08-06 14:34:07 -03:00
richΛrd
b517b692df chore: v1.12.0 (#1581) 2025-08-05 13:59:43 +00:00
Ben
7cfd26035a fix(kad): Skip self when iterating through findNode dialouts (#1594) 2025-08-05 12:00:09 +02:00
Radosław Kamiński
cd5fea53e3 test(gossipsub): Performance tests - more scenarios (#1585) 2025-08-01 08:33:39 +01:00
Radosław Kamiński
d9aa393761 test(gossipsub): Performance tests - aggregation script and workflow (#1577) 2025-07-31 17:59:09 +01:00
Gabriel Cruz
a4a0d9e375 ci: add nimbus compilation daily test (#1571) 2025-07-31 15:01:10 +00:00
154 changed files with 10894 additions and 4553 deletions

34
.github/actions/add_comment/action.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Add Comment
description: "Add or update comment in the PR"
runs:
using: "composite"
steps:
- name: Add/Update Comment
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const marker = "${{ env.MARKER }}";
const body = fs.readFileSync("${{ env.COMMENT_SUMMARY_PATH }}", 'utf8');
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body && c.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body,
});
}

View File

@@ -0,0 +1,24 @@
name: Generate Plots
description: "Set up Python and run script to generate plots with Docker Stats"
runs:
using: "composite"
steps:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install Python dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install matplotlib
- name: Plot Docker Stats
shell: bash
run: python performance/scripts/plot_docker_stats.py
- name: Plot Latency History
shell: bash
run: python performance/scripts/plot_latency_history.py

View File

@@ -0,0 +1,21 @@
name: Process Stats
description: "Set up Nim and run scripts to aggregate latency and process raw docker stats"
runs:
using: "composite"
steps:
- name: Set up Nim
uses: jiro4989/setup-nim-action@v2
with:
nim-version: "2.x"
repo-token: ${{ env.GITHUB_TOKEN }}
- name: Aggregate latency stats and prepare markdown for comment and summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_latency_stats ./performance/scripts/process_latency_stats.nim
- name: Process raw docker stats to csv files
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_docker_stats ./performance/scripts/process_docker_stats.nim

View File

@@ -0,0 +1,36 @@
name: Publish Latency History
description: "Publish latency history CSVs in a configurable branch and folder"
runs:
using: "composite"
steps:
- name: Clone the branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_HISTORY }}
fetch-depth: 0
- name: Commit & push latency history CSVs
shell: bash
run: |
cd "$CHECKOUT_SUBFOLDER_HISTORY"
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
mkdir -p "$PUBLISH_DIR_LATENCY_HISTORY"
cp ../$SHARED_VOLUME_PATH/$LATENCY_HISTORY_PREFIX*.csv "$PUBLISH_DIR_LATENCY_HISTORY/"
git add "$PUBLISH_DIR_LATENCY_HISTORY"
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update latency history CSVs"
git push origin "$PUBLISH_BRANCH_NAME"
fi
cd ..

View File

@@ -0,0 +1,56 @@
name: Publish Plots
description: "Publish plots in performance_plots branch and add to the workflow summary"
runs:
using: "composite"
steps:
- name: Clone the performance_plots branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_SUBPLOTS }}
fetch-depth: 0
- name: Commit & push plots
shell: bash
run: |
cd $CHECKOUT_SUBFOLDER_SUBPLOTS
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
# Remove any branch folder older than 7 days
DAYS=7
cutoff=$(( $(date +%s) - DAYS*24*3600 ))
scan_dir="${PUBLISH_DIR_PLOTS%/}"
find "$scan_dir" -mindepth 1 -maxdepth 1 -type d -print0 \
| while IFS= read -r -d $'\0' d; do \
ts=$(git log -1 --format=%ct -- "$d" 2>/dev/null || true); \
if [ -n "$ts" ] && [ "$ts" -le "$cutoff" ]; then \
echo "[cleanup] Deleting: $d"; \
rm -rf -- "$d"; \
fi; \
done
rm -rf $PUBLISH_DIR_PLOTS/$BRANCH_NAME
mkdir -p $PUBLISH_DIR_PLOTS/$BRANCH_NAME
cp ../$SHARED_VOLUME_PATH/*.png $PUBLISH_DIR_PLOTS/$BRANCH_NAME/ 2>/dev/null || true
cp ../$LATENCY_HISTORY_PATH/*.png $PUBLISH_DIR_PLOTS/ 2>/dev/null || true
git add -A "$PUBLISH_DIR_PLOTS/"
git status
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update performance plots for $BRANCH_NAME"
git push origin $PUBLISH_BRANCH_NAME
fi
- name: Add plots to GitHub Actions summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/add_plots_to_summary ./performance/scripts/add_plots_to_summary.nim

View File

@@ -25,8 +25,6 @@ jobs:
cpu: i386
- os: linux-gcc-14
cpu: amd64
- os: macos
cpu: amd64
- os: macos-14
cpu: arm64
- os: windows
@@ -45,10 +43,6 @@ jobs:
os: linux-gcc-14
builder: ubuntu-24.04
shell: bash
- platform:
os: macos
builder: macos-13
shell: bash
- platform:
os: macos-14
builder: macos-14
@@ -78,15 +72,6 @@ jobs:
shell: ${{ matrix.shell }}
nim_ref: ${{ matrix.nim.ref }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3

View File

@@ -7,7 +7,7 @@ on:
jobs:
test_amd64_latest:
name: Daily amd64 (latest dependencies)
name: Daily test amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
@@ -17,7 +17,7 @@ jobs:
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
name: Daily test amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true

View File

@@ -69,16 +69,6 @@ jobs:
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |

View File

@@ -6,8 +6,8 @@ on:
workflow_dispatch:
jobs:
test_i386:
name: Daily i386 (Linux)
test_i386_latest:
name: Daily i386 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
@@ -20,9 +20,24 @@ jobs:
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
test_i386_pinned:
name: Daily i386 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
notify-on-failure:
name: Notify Discord on Failure
needs: [test_i386]
needs: [test_i386_latest, test_i386_pinned]
if: failure()
runs-on: ubuntu-latest
steps:

39
.github/workflows/daily_nimbus.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Daily Nimbus
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
compile_nimbus:
timeout-minutes: 80
name: 'Compile Nimbus (linux-amd64)'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Compile nimbus using nim-libp2p
run: |
git clone --branch unstable --single-branch https://github.com/status-im/nimbus-eth2.git
cd nimbus-eth2
git submodule set-branch --branch ${{ github.sha }} vendor/nim-libp2p
make -j"$(nproc)"
make -j"$(nproc)" nimbus_beacon_node
notify-on-failure:
name: Notify Discord on Failure
needs: compile_nimbus
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -60,3 +60,30 @@ jobs:
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-autonatv2-interop:
name: Run AutoNATv2 interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.25"
- name: Set up Nim
uses: jiro4989/setup-nim-action@v1
with:
nim-version: "stable"
- name: Run Go and Nim together
run: |
nimble install
cd interop/autonatv2/go-peer
git clone https://github.com/libp2p/go-libp2p
cd go-libp2p
git apply ../disable-filtering-of-private-ip-addresses.patch
cd ..
go run testautonatv2.go &
cd ../nim-peer
nim r src/nim_peer.nim $(cat ../go-peer/peer.id)

94
.github/workflows/performance.yml vendored Normal file
View File

@@ -0,0 +1,94 @@
name: Performance
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
performance:
timeout-minutes: 20
strategy:
fail-fast: false
defaults:
run:
shell: bash
env:
VACP2P: "vacp2p"
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
PR_NUMBER: ${{ github.event.number }}
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
MARKER: "<!-- perf-summary-marker -->"
COMMENT_SUMMARY_PATH: "/tmp/perf-summary.md"
SHARED_VOLUME_PATH: "performance/output"
DOCKER_STATS_PREFIX: "docker_stats_"
PUBLISH_BRANCH_NAME: "performance_plots"
CHECKOUT_SUBFOLDER_SUBPLOTS: "subplots"
PUBLISH_DIR_PLOTS: "plots"
CHECKOUT_SUBFOLDER_HISTORY: "history"
PUBLISH_DIR_LATENCY_HISTORY: "latency_history"
LATENCY_HISTORY_PATH: "history/latency_history"
LATENCY_HISTORY_PREFIX: "pr"
LATENCY_HISTORY_PLOT_FILENAME: "latency_history_all_scenarios.png"
name: "Performance"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image with cache
uses: docker/build-push-action@v6
with:
context: .
file: performance/Dockerfile
tags: test-node:latest
load: true
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Run
run: |
./performance/runner.sh
- name: Process latency and docker stats
uses: ./.github/actions/process_stats
- name: Publish history
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/publish_history
- name: Generate plots
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/generate_plots
- name: Post/Update PR comment
if: github.event_name == 'pull_request'
uses: ./.github/actions/add_comment
- name: Upload performance artifacts
if: success() || failure()
uses: actions/upload-artifact@v4
with:
name: performance-artifacts
path: |
performance/output/pr*_latency.csv
performance/output/*.png
history/latency_history/*.png
if-no-files-found: ignore
retention-days: 7

View File

@@ -8,7 +8,7 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#d9a4cbccd509f7a3ee835f75b01dec29d27a0f14
quic;https://github.com/vacp2p/nim-quic@#9370190ded18d78a5a9990f57aa8cbbf947f3891
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088

View File

@@ -47,7 +47,7 @@ nimble install libp2p
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
Try out the chat example. Full code can be found [here](https://github.com/vacp2p/nim-libp2p/blob/master/examples/directchat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
@@ -81,12 +81,6 @@ Run unit tests:
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
@@ -155,8 +149,6 @@ List of packages modules implemented in nim-libp2p:
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |

View File

@@ -5,12 +5,13 @@ if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warningAsError", "UseBase:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--styleCheck:
usages
switch("warningAsError", "UseBase:on")
--styleCheck:
error
--mm:
@@ -23,7 +24,7 @@ if defined(windows) and not defined(vcc):
--define:
nimRawSetjmp
# begin Nimble config (version 1)
when fileExists("nimble.paths"):
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -1,54 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
import ../hexdump
const PubSubTopic = "test-net"
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
var peers = await api.pubsubListPeers(PubSubTopic)
echo "= List of connected and subscribed peers:"
for item in peers:
echo item.pretty()
proc dumpAllPeers(api: DaemonAPI) {.async.} =
var peers = await api.listPeers()
echo "Current connected peers count = ", len(peers)
for item in peers:
echo item.peer.pretty()
proc monitor(api: DaemonAPI) {.async.} =
while true:
echo "Dumping all peers"
await dumpAllPeers(api)
await sleepAsync(5000)
proc main() {.async.} =
echo "= Starting P2P bootnode"
var api = await newDaemonApi({DHTFull, PSGossipSub})
var id = await api.identity()
echo "= P2P bootnode ", id.peer.pretty(), " started."
let mcip4 = multiCodec("ip4")
let mcip6 = multiCodec("ip6")
echo "= You can use one of this addresses to bootstrap your nodes:"
for item in id.addresses:
if item.protoCode() == mcip4 or item.protoCode() == mcip6:
echo $item & "/ipfs/" & id.peer.pretty()
asyncSpawn monitor(api)
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty()
echo dumpHex(message.data)
await api.dumpSubscribedPeers()
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
when isMainModule:
waitFor(main())
while true:
poll()

View File

@@ -1,132 +0,0 @@
import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
## nim c -r --threads:on chat.nim
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
const ServerProtocols = @["/test-chat-stream"]
type CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
proc threadMain(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
## pipe to main thread.
var transp = fromPipe(wfd)
while true:
var line = stdin.readLine()
let res = waitFor transp.write(line & "\r\n")
proc serveThread(udata: CustomData) {.async.} =
## This procedure perform reading on pipe and sends data to remote clients.
var transp = fromPipe(udata.consoleFd)
proc remoteReader(transp: StreamTransport) {.async.} =
while true:
var line = await transp.readLine()
if len(line) == 0:
break
echo ">> ", line
while true:
try:
var line = await transp.readLine()
if line.startsWith("/connect"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
var address = MultiAddress.init(multiCodec("p2p-circuit"))
address &= MultiAddress.init(multiCodec("p2p"), peerId)
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
echo "= Connecting to peer ", $address
await udata.api.connect(peerId, @[address], 30)
echo "= Opening stream to peer chat ", parts[1]
var stream = await udata.api.openStream(peerId, ServerProtocols)
udata.remotes.add(stream.transp)
echo "= Connected to peer chat ", parts[1]
asyncSpawn remoteReader(stream.transp)
elif line.startsWith("/search"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
for item in id.addresses:
echo $item
elif line.startsWith("/consearch"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerId.init(parts[1])
echo "= Searching for peers connected to peer ", parts[1]
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
echo "= Found ", len(peers), " connected to peer ", parts[1]
for item in peers:
var peer = item.peer
var addresses = newSeq[string]()
var relay = false
for a in item.addresses:
addresses.add($a)
if a.protoName() == "/p2p-circuit":
relay = true
break
if relay:
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
else:
echo peer.pretty(), " [", addresses.join(", "), "]"
elif line.startsWith("/exit"):
break
else:
var msg = line & "\r\n"
echo "<< ", line
var pending = newSeq[Future[int]]()
for item in udata.remotes:
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData
data.remotes = newSeq[StreamTransport]()
var (rfd, wfd) = createAsyncPipe()
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
raise newException(ValueError, "Could not initialize pipe!")
data.consoleFd = rfd
data.serveFut = serveThread(data)
var thread: Thread[AsyncFD]
thread.createThread(threadMain, wfd)
echo "= Starting P2P node"
data.api = await newDaemonApi({DHTFull, Bootstrap})
await sleepAsync(3000)
var id = await data.api.identity()
proc streamHandler(api: DaemonAPI, stream: P2PStream) {.async.} =
echo "= Peer ", stream.peer.pretty(), " joined chat"
data.remotes.add(stream.transp)
while true:
var line = await stream.transp.readLine()
if len(line) == 0:
break
echo ">> ", line
await data.api.addHandler(ServerProtocols, streamHandler)
echo "= Your PeerId is ", id.peer.pretty()
await data.serveFut
when isMainModule:
waitFor(main())

View File

@@ -1,43 +0,0 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Run the build script while having the `go` command pointing to the correct Go version.
```sh
./scripts/build_p2pd.sh
```
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)

View File

@@ -1,46 +0,0 @@
import chronos, nimcrypto, strutils, os
import ../../libp2p/daemon/daemonapi
const PubSubTopic = "test-net"
proc main(bn: string) {.async.} =
echo "= Starting P2P node"
var bootnodes = bn.split(",")
var api = await newDaemonApi(
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
)
var id = await api.identity()
echo "= P2P node ", id.peer.pretty(), " started:"
for item in id.addresses:
echo item
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty(), ": "
var strdata = cast[string](message.data)
echo strdata
result = true
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
# Waiting for gossipsub interval
while true:
var peers = await api.pubsubListPeers(PubSubTopic)
if len(peers) > 0:
break
await sleepAsync(1000)
var data = "HELLO\r\n"
var msgData = cast[seq[byte]](data)
await api.pubsubPublish(PubSubTopic, msgData)
when isMainModule:
if paramCount() != 1:
echo "Please supply bootnodes!"
else:
waitFor(main(paramStr(1)))
while true:
poll()

View File

@@ -0,0 +1,76 @@
From 29bac4cd8f28abfb9efb481d800b7c2e855d9b03 Mon Sep 17 00:00:00 2001
From: Gabriel Cruz <gabe@gmelodie.com>
Date: Wed, 17 Sep 2025 10:42:14 -0300
Subject: [PATCH] disable filtering of private ip addresses
---
p2p/protocol/autonatv2/autonat.go | 24 +-----------------------
p2p/protocol/autonatv2/server.go | 9 ++++++---
2 files changed, 7 insertions(+), 26 deletions(-)
diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go
index 24883052..00a6211f 100644
--- a/p2p/protocol/autonatv2/autonat.go
+++ b/p2p/protocol/autonatv2/autonat.go
@@ -16,7 +16,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
logging "github.com/libp2p/go-libp2p/gologshim"
ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
)
const (
@@ -180,21 +179,7 @@ func (an *AutoNAT) Close() {
// GetReachability makes a single dial request for checking reachability for requested addresses
func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) {
var filteredReqs []Request
- if !an.allowPrivateAddrs {
- filteredReqs = make([]Request, 0, len(reqs))
- for _, r := range reqs {
- if manet.IsPublicAddr(r.Addr) {
- filteredReqs = append(filteredReqs, r)
- } else {
- log.Error("private address in reachability check", "address", r.Addr)
- }
- }
- if len(filteredReqs) == 0 {
- return Result{}, ErrPrivateAddrs
- }
- } else {
- filteredReqs = reqs
- }
+ filteredReqs = reqs
an.mx.Lock()
now := time.Now()
var p peer.ID
@@ -215,13 +200,6 @@ func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result,
log.Debug("reachability check failed", "peer", p, "err", err)
return res, fmt.Errorf("reachability check with %s failed: %w", p, err)
}
- // restore the correct index in case we'd filtered private addresses
- for i, r := range reqs {
- if r.Addr.Equal(res.Addr) {
- res.Idx = i
- break
- }
- }
log.Debug("reachability check successful", "peer", p)
return res, nil
}
diff --git a/p2p/protocol/autonatv2/server.go b/p2p/protocol/autonatv2/server.go
index 167d3d8e..e6d1e492 100644
--- a/p2p/protocol/autonatv2/server.go
+++ b/p2p/protocol/autonatv2/server.go
@@ -196,9 +197,6 @@ func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted {
if err != nil {
continue
}
- if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) {
- continue
- }
if !as.dialerHost.Network().CanDial(p, a) {
continue
}
--
2.51.0

View File

@@ -0,0 +1,97 @@
module go-peer
go 1.25.1
require github.com/libp2p/go-libp2p v0.43.0
replace github.com/libp2p/go-libp2p => ./go-libp2p
require (
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.16.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

View File

@@ -0,0 +1,441 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@@ -0,0 +1 @@
12D3KooWSnUDxXeeEnerD1Wf35R5b8bjTMzdAz838aDUUY8GJPGa

View File

@@ -0,0 +1,2 @@
@i
(>%ËÁø‡®PM”ܘXE~§|# õ“ýºØ®ü\íÇØ¬åsqzïÔDSݺvöLË(± Úð…•(×

View File

@@ -0,0 +1,97 @@
package main
import (
"crypto/rand"
"fmt"
"io/ioutil"
"log"
"os"
libp2p "github.com/libp2p/go-libp2p"
crypto "github.com/libp2p/go-libp2p/core/crypto"
peer "github.com/libp2p/go-libp2p/core/peer"
)
const (
privKeyFile = "peer.key"
peerIDFile = "peer.id"
)
func loadOrCreateIdentity() (crypto.PrivKey, peer.ID, error) {
if _, err := os.Stat(privKeyFile); err == nil {
// Load private key
data, err := ioutil.ReadFile(privKeyFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read private key: %w", err)
}
priv, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
return nil, "", fmt.Errorf("failed to unmarshal private key: %w", err)
}
// Load peer ID as string
peerData, err := ioutil.ReadFile(peerIDFile)
if err != nil {
return nil, "", fmt.Errorf("failed to read peer ID: %w", err)
}
pid, err := peer.Decode(string(peerData))
if err != nil {
return nil, "", fmt.Errorf("failed to decode peer ID: %w", err)
}
return priv, pid, nil
}
// Create new keypair
priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
if err != nil {
return nil, "", fmt.Errorf("failed to generate keypair: %w", err)
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
return nil, "", fmt.Errorf("failed to derive peer ID: %w", err)
}
// Save private key
privBytes, err := crypto.MarshalPrivateKey(priv)
if err != nil {
return nil, "", fmt.Errorf("failed to marshal private key: %w", err)
}
if err := ioutil.WriteFile(privKeyFile, privBytes, 0600); err != nil {
return nil, "", fmt.Errorf("failed to write private key: %w", err)
}
// Save peer ID in canonical string form
if err := ioutil.WriteFile(peerIDFile, []byte(pid.String()), 0644); err != nil {
return nil, "", fmt.Errorf("failed to write peer ID: %w", err)
}
return priv, pid, nil
}
func main() {
priv, pid, err := loadOrCreateIdentity()
if err != nil {
log.Fatalf("Identity setup failed: %v", err)
}
h, err := libp2p.New(
libp2p.Identity(priv),
libp2p.EnableAutoNATv2(),
libp2p.ListenAddrStrings(
"/ip4/0.0.0.0/tcp/4040",
"/ip6/::/tcp/4040",
),
)
if err != nil {
log.Fatalf("Failed to create host: %v", err)
}
defer h.Close()
fmt.Println("Peer ID:", pid.String())
fmt.Println("Listen addresses:", h.Addrs())
fmt.Println("AutoNATv2 client started.")
select {}
}

View File

@@ -0,0 +1,4 @@
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@@ -0,0 +1,10 @@
version = "0.1.0"
author = "Status Research & Development Gmb"
description = "AutoNATv2 peer for interop testing"
license = "MIT"
srcDir = "src"
bin = @["nim_peer"]
# Dependencies
requires "nim >= 2.3.1", "libp2p"

View File

@@ -0,0 +1,64 @@
import net, os, chronos, libp2p
import libp2p/protocols/connectivity/autonatv2/service
import libp2p/protocols/connectivity/autonatv2/types
proc waitForService(
host: string, port: Port, retries: int = 20, delay: Duration = 500.milliseconds
): Future[bool] {.async.} =
for i in 0 ..< retries:
try:
var s = newSocket()
s.connect(host, port)
s.close()
return true
except OSError:
discard
await sleepAsync(delay)
return false
proc main() {.async.} =
if paramCount() != 1:
quit("Usage: nim r src/nim_peer.nim <peerid>", 1)
# ensure go peer is started
await sleepAsync(3.seconds)
let dstPeerId = PeerId.init(paramStr(1)).get()
var src = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/3030").tryGet()])
.withAutonatV2Server()
.withAutonatV2(
serviceConfig = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds))
)
.withTcpTransport()
.withYamux()
.withNoise()
.build()
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async: (raises: [CancelledError]).} =
if networkReachability != NetworkReachability.Unknown and confidence.isSome() and
confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
let service = cast[AutonatV2Service](src.services[1])
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
await src.start()
await src.connect(dstPeerId, @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()])
await awaiter
echo service.networkReachability
when isMainModule:
if waitFor(waitForService("127.0.0.1", Port(4040))):
waitFor(main())
else:
quit("timeout waiting for service", 1)

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.11.0"
version = "1.13.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -10,7 +10,7 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 2.0.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.10",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.16",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
@@ -49,12 +49,6 @@ proc tutorialToMd(filename: string) =
task testnative, "Runs libp2p native tests":
runTest("testnative")
task testdaemon, "Runs daemon tests":
runTest("testdaemon")
task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")

View File

@@ -85,6 +85,7 @@ when defined(libp2p_autotls_support):
../crypto/rsa,
../utils/heartbeat,
../transports/transport,
../utils/ipaddr,
../transports/tcptransport,
../nameresolving/dnsresolver
@@ -150,7 +151,10 @@ when defined(libp2p_autotls_support):
if self.config.ipAddress.isNone():
try:
self.config.ipAddress = Opt.some(getPublicIPAddress())
except AutoTLSError as exc:
except ValueError as exc:
error "Failed to get public IP address", err = exc.msg
return false
except OSError as exc:
error "Failed to get public IP address", err = exc.msg
return false
self.managerFut = self.run(switch)

View File

@@ -22,7 +22,7 @@ const
type AutoTLSError* = object of LPError
when defined(libp2p_autotls_support):
import net, strutils
import strutils
from times import DateTime, toTime, toUnix
import stew/base36
import
@@ -33,36 +33,6 @@ when defined(libp2p_autotls_support):
../nameresolving/nameresolver,
./acme/client
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
# This is so that we don't need to catch Exceptions directly
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
try:
return getPrimaryIPAddr()
except Exception as exc:
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
proc isIPv4*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv4
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
let ip = $ip
try:
not (
ip.startsWith("10.") or
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
)
except ValueError as exc:
raise newException(AutoTLSError, "Failed to parse IP address", exc)
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
let ip = checkedGetPrimaryIPAddr()
if not ip.isIPv4():
raise newException(AutoTLSError, "Host does not have an IPv4 address")
if not ip.isPublic():
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
return ip
proc asMoment*(dt: DateTime): Moment =
let unixTime: int64 = dt.toTime.toUnix
return Moment.init(unixTime, Second)

View File

@@ -26,7 +26,15 @@ import
transports/[transport, tcptransport, wstransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
protocols/connectivity/[
autonat/server,
autonatv2/server,
autonatv2/service,
autonatv2/client,
relay/relay,
relay/client,
relay/rtransport,
],
connmanager,
upgrademngrs/muxedupgrade,
observedaddrmanager,
@@ -74,6 +82,9 @@ type
nameResolver: NameResolver
peerStoreCapacity: Opt[int]
autonat: bool
autonatV2ServerConfig: Opt[AutonatV2Config]
autonatV2Client: AutonatV2Client
autonatV2ServiceConfig: AutonatV2ServiceConfig
autotls: AutotlsService
circuitRelay: Relay
rdv: RendezVous
@@ -280,6 +291,19 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
b.autonat = true
b
proc withAutonatV2Server*(
b: SwitchBuilder, config: AutonatV2Config = AutonatV2Config.new()
): SwitchBuilder =
b.autonatV2ServerConfig = Opt.some(config)
b
proc withAutonatV2*(
b: SwitchBuilder, serviceConfig = AutonatV2ServiceConfig.new()
): SwitchBuilder =
b.autonatV2Client = AutonatV2Client.new(b.rng)
b.autonatV2ServiceConfig = serviceConfig
b
when defined(libp2p_autotls_support):
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
@@ -366,6 +390,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if b.enableWildcardResolver:
b.services.insert(WildcardAddressResolverService.new(), 0)
if not isNil(b.autonatV2Client):
b.services.add(
AutonatV2Service.new(
b.rng, client = b.autonatV2Client, config = b.autonatV2ServiceConfig
)
)
let switch = newSwitch(
peerInfo = peerInfo,
transports = transports,
@@ -379,9 +410,15 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
switch.mount(identify)
if not isNil(b.autonatV2Client):
b.autonatV2Client.setup(switch)
switch.mount(b.autonatV2Client)
b.autonatV2ServerConfig.withValue(config):
switch.mount(AutonatV2.new(switch, config = config))
if b.autonat:
let autonat = Autonat.new(switch)
switch.mount(autonat)
switch.mount(Autonat.new(switch))
if not isNil(b.circuitRelay):
if b.circuitRelay of RelayClient:
@@ -395,13 +432,78 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
return switch
proc newStandardSwitch*(
type TransportType* {.pure.} = enum
QUIC
TCP
Memory
proc newStandardSwitchBuilder*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): SwitchBuilder {.raises: [LPError], public.} =
## Helper for common switch configurations.
var b = SwitchBuilder
.new()
.withRng(rng)
.withSignedPeerRecord(sendSignedPeerRecord)
.withMaxConnections(maxConnections)
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity = peerStoreCapacity)
.withNameResolver(nameResolver)
.withNoise()
var addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
case transport
of TransportType.QUIC:
when defined(libp2p_quic_support):
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/0.0.0.0/udp/0/quic-v1").tryGet()]
b = b.withQuicTransport().withAddresses(addrs)
else:
raiseAssert "QUIC not supported in this build"
of TransportType.TCP:
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()]
b = b.withTcpTransport(transportFlags).withAddresses(addrs).withMplex(
inTimeout, outTimeout
)
of TransportType.Memory:
if addrs.len == 0:
addrs = @[MultiAddress.init(MemoryAutoAddress).tryGet()]
b = b.withMemoryTransport().withAddresses(addrs).withMplex(inTimeout, outTimeout)
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
@@ -412,28 +514,21 @@ proc newStandardSwitch*(
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): Switch {.raises: [LPError], public.} =
## Helper for common switch configurations.
let addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
var b = SwitchBuilder
.new()
.withAddresses(addrs)
.withRng(rng)
.withSignedPeerRecord(sendSignedPeerRecord)
.withMaxConnections(maxConnections)
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity = peerStoreCapacity)
.withMplex(inTimeout, outTimeout)
.withTcpTransport(transportFlags)
.withNameResolver(nameResolver)
.withNoise()
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b.build()
newStandardSwitchBuilder(
privKey = privKey,
addrs = addrs,
transport = transport,
transportFlags = transportFlags,
rng = rng,
secureManagers = secureManagers,
inTimeout = inTimeout,
outTimeout = outTimeout,
maxConnections = maxConnections,
maxIn = maxIn,
maxOut = maxOut,
maxConnsPerPeer = maxConnsPerPeer,
nameResolver = nameResolver,
sendSignedPeerRecord = sendSignedPeerRecord,
peerStoreCapacity = peerStoreCapacity,
)
.build()

File diff suppressed because it is too large Load Diff

View File

@@ -1,156 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos
const DefaultPoolSize* = 8 ## Default pool size
type
ConnectionFlags = enum
None
Busy
PoolItem = object
transp*: StreamTransport
flags*: set[ConnectionFlags]
PoolState = enum
Connecting
Connected
Closing
Closed
TransportPool* = ref object ## Transports pool object
transports: seq[PoolItem]
busyCount: int
state: PoolState
bufferSize: int
event: AsyncEvent
TransportPoolError* = object of AsyncError
proc waitAll[T](futs: seq[Future[T]]): Future[void] =
## Performs waiting for all Future[T].
var counter = len(futs)
var retFuture = newFuture[void]("connpool.waitAllConnections")
proc cb(udata: pointer) =
dec(counter)
if counter == 0:
retFuture.complete()
for fut in futs:
fut.addCallback(cb)
return retFuture
proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
pool.bufferSize = bufferSize
pool.transports = newSeq[PoolItem](poolsize)
var conns = newSeq[Future[StreamTransport]](poolsize)
pool.state = Connecting
for i in 0 ..< poolsize:
conns[i] = connect(address, bufferSize)
# Waiting for all connections to be established.
await waitAll(conns)
# Checking connections and preparing pool.
for i in 0 ..< poolsize:
if conns[i].failed:
raise conns[i].error
else:
let transp = conns[i].read()
let item = PoolItem(transp: transp)
pool.transports[i] = item
# Setup available connections event
pool.event = newAsyncEvent()
pool.state = Connected
result = pool
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
while true:
if pool.busyCount < len(pool.transports):
for conn in pool.transports.mitems():
if Busy notin conn.flags:
conn.flags.incl(Busy)
inc(pool.busyCount)
transp = conn.transp
break
else:
await pool.event.wait()
pool.event.clear()
if not isNil(transp):
break
else:
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
for conn in pool.transports.mitems():
if conn.transp == transp:
conn.flags.excl(Busy)
dec(pool.busyCount)
pool.event.fire()
found = true
break
if not found:
raise newException(TransportPoolError, "Transport not bound to pool!")
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
if pool.busyCount == 0:
break
else:
await pool.event.wait()
pool.event.clear()
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing
# Waiting for all transports to become available.
await pool.join()
# Closing all transports
var pending = newSeq[Future[void]](len(pool.transports))
for i in 0 ..< len(pool.transports):
let transp = pool.transports[i].transp
transp.close()
pending[i] = transp.join()
# Waiting for all transports to be closed
await waitAll(pending)
# Mark pool as `Closed`.
pool.state = Closed

View File

@@ -11,7 +11,7 @@
import chronos
import results
import peerid, stream/connection, transports/transport
import peerid, stream/connection, transports/transport, muxers/muxer
export results
@@ -49,6 +49,22 @@ method dial*(
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial,
peerId: Opt[PeerId],
hostname: string,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.base, async: (raises: [CancelledError]).} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
base, async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
method dial*(
self: Dial,
peerId: PeerId,
@@ -65,6 +81,11 @@ method dial*(
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method negotiateStream*(
self: Dial, conn: Connection, protos: seq[string]
): Future[Connection] {.base, async: (raises: [CatchableError]).} =
doAssert(false, "[Dial.negotiateStream] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.

View File

@@ -43,20 +43,20 @@ type Dialer* = ref object of Dial
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
method dialAndUpgrade*(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
if transport.handles(addrs): # check if it can dial it
trace "Dialing address", addrs, peerId = peerId.get(default(PeerId)), hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address, peerId)
await transport.dial(hostname, addrs, peerId)
except CancelledError as exc:
trace "Dialing canceled",
description = exc.msg, peerId = peerId.get(default(PeerId))
@@ -139,7 +139,7 @@ proc expandDnsAddr(
else:
result.add((resolvedAddress, peerId))
proc dialAndUpgrade(
method dialAndUpgrade*(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
@@ -284,7 +284,7 @@ method connect*(
return
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
proc negotiateStream(
method negotiateStream*(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async: (raises: [CatchableError]).} =
trace "Negotiating stream", conn, protos
@@ -292,7 +292,6 @@ proc negotiateStream(
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
return conn
method tryDial*(

View File

@@ -27,7 +27,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
quote:
for res in `futs`:
if res.failed:
let exc = res.readError()
let exc = res.error
# We still don't abort but warn
debug "A future has failed, enable trace logging for details",
error = exc.name
@@ -37,7 +37,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
for res in `futs`:
block check:
if res.failed:
let exc = res.readError()
let exc = res.error
for i in 0 ..< `nexclude`:
if exc of `exclude`[i]:
trace "A future has failed", error = exc.name, description = exc.msg

View File

@@ -843,6 +843,14 @@ proc init*(
res.data.finish()
ok(res)
proc getPart*(ma: MultiAddress, codec: MultiCodec): MaResult[MultiAddress] =
## Returns the first multiaddress in ``value`` with codec ``codec``
for part in ma:
let part = ?part
if codec == ?part.protoCode:
return ok(part)
err("no such codec in multiaddress")
proc getProtocol(name: string): MAProtocol {.inline.} =
let mc = MultiCodec.codec(name)
if mc != InvalidMultiCodec:
@@ -1119,3 +1127,32 @@ proc getRepeatedField*(
err(ProtoError.IncorrectBlob)
else:
ok(true)
proc areAddrsConsistent*(a, b: MultiAddress): bool =
## Checks if two multiaddresses have the same protocol stack.
let protosA = a.protocols().get()
let protosB = b.protocols().get()
if protosA.len != protosB.len:
return false
for idx in 0 ..< protosA.len:
let protoA = protosA[idx]
let protoB = protosB[idx]
if protoA != protoB:
if idx == 0:
# allow DNS ↔ IP at the first component
if protoB == multiCodec("dns") or protoB == multiCodec("dnsaddr"):
if not (protoA == multiCodec("ip4") or protoA == multiCodec("ip6")):
return false
elif protoB == multiCodec("dns4"):
if protoA != multiCodec("ip4"):
return false
elif protoB == multiCodec("dns6"):
if protoA != multiCodec("ip6"):
return false
else:
return false
else:
return false
true

View File

@@ -150,6 +150,10 @@ method close*(s: LPChannel) {.async: (raises: []).} =
trace "Closed channel", s, len = s.len
method closeWrite*(s: LPChannel) {.async: (raises: []).} =
## For mplex, closeWrite is the same as close - it implements half-close
await s.close()
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName

View File

@@ -95,6 +95,7 @@ proc newStreamInternal*(
result.peerId = m.connection.peerId
result.observedAddr = m.connection.observedAddr
result.localAddr = m.connection.localAddr
result.transportDir = m.connection.transportDir
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent

View File

@@ -54,6 +54,10 @@ method newStream*(
.} =
raiseAssert("[Muxer.newStream] abstract method not implemented!")
when defined(libp2p_agents_metrics):
method setShortAgent*(m: Muxer, shortAgent: string) {.base, gcsafe.} =
m.connection.shortAgent = shortAgent
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
await m.connection.close()

View File

@@ -217,7 +217,11 @@ method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
discard
await channel.actuallyClose()
proc clearQueues(channel: YamuxChannel, error: ref CatchableError = nil) =
method closeWrite*(channel: YamuxChannel) {.async: (raises: []).} =
## For yamux, closeWrite is the same as close - it implements half-close
await channel.close()
proc clearQueues(channel: YamuxChannel, error: ref LPStreamEOFError = nil) =
for toSend in channel.sendQueue:
if error.isNil():
toSend.fut.complete()
@@ -511,6 +515,7 @@ proc createStream(
stream.initStream()
stream.peerId = m.connection.peerId
stream.observedAddr = m.connection.observedAddr
stream.localAddr = m.connection.localAddr
stream.transportDir = m.connection.transportDir
when defined(libp2p_agents_metrics):
stream.shortAgent = m.connection.shortAgent
@@ -529,13 +534,13 @@ method close*(m: Yamux) {.async: (raises: []).} =
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
for toSend in channel.sendQueue:
toSend.fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.clearQueues(newLPStreamEOFError())
channel.recvWindow = 0
channel.sendWindow = 0
channel.closedLocally = true
channel.isReset = true
channel.opened = false
channel.isClosed = true
await channel.remoteClosed()
channel.receivedData.fire()
try:
@@ -607,8 +612,10 @@ method handle*(m: Yamux) {.async: (raises: []).} =
if header.length > 0:
var buffer = newSeqUninit[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
do:
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
# If we do not have a stream, likely we sent a RST and/or closed the stream
trace "unknown stream id", id = header.streamId
continue
let channel =

View File

@@ -93,6 +93,10 @@ proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
parseFullAddress(?MultiAddress.init(ma))
proc toFullAddress*(peerId: PeerId, ma: MultiAddress): MaResult[MultiAddress] =
let peerIdPart = ?MultiAddress.init(multiCodec("p2p"), peerId.data)
concat(ma, peerIdPart)
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,

View File

@@ -214,7 +214,7 @@ proc identify*(
info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
if KnownLibP2PAgentsSeq.contains(shortAgent):
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
muxer.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info, stream.observedAddr)
finally:

View File

@@ -12,7 +12,7 @@
import results
import chronos, chronicles
import ../../../switch, ../../../multiaddress, ../../../peerid
import core
import types
logScope:
topics = "libp2p autonat"

View File

@@ -20,9 +20,9 @@ import
../../../peerid,
../../../utils/[semaphore, future],
../../../errors
import core
import types
export core
export types
logScope:
topics = "libp2p autonat"

View File

@@ -14,11 +14,11 @@ import chronos, metrics
import ../../../switch
import ../../../wire
import client
from core import NetworkReachability, AutonatUnreachableError
from types import NetworkReachability, AutonatUnreachableError
import ../../../utils/heartbeat
import ../../../crypto/crypto
export core.NetworkReachability
export NetworkReachability
logScope:
topics = "libp2p autonatservice"

View File

@@ -58,6 +58,9 @@ type
NotReachable
Reachable
proc isReachable*(self: NetworkReachability): bool =
self == NetworkReachability.Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
p.id.withValue(id):

View File

@@ -0,0 +1,202 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles, tables
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
./types,
./utils
logScope:
topics = "libp2p autonat v2 client"
const
MaxAcceptedDialDataRequest* = 100 * 1024 # 100 KB
MaxDialDataResponsePayload* = 1024
DefaultDialBackTimeout* = 5.seconds
type AutonatV2Client* = ref object of LPProtocol
dialer*: Dial
dialBackTimeout: Duration
rng: ref HmacDrbgContext
expectedNonces: Table[Nonce, Opt[MultiAddress]]
proc handleDialBack(
self: AutonatV2Client, conn: Connection, dialBack: DialBack
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
debug "Handling DialBack",
conn = conn, localAddr = conn.localAddr, observedAddr = conn.observedAddr
if not self.expectedNonces.hasKey(dialBack.nonce):
error "Not expecting this nonce", nonce = dialBack.nonce
return
conn.localAddr.withValue(localAddr):
debug "Setting expectedNonces",
nonce = dialBack.nonce, localAddr = Opt.some(localAddr)
self.expectedNonces[dialBack.nonce] = Opt.some(localAddr)
else:
error "Unable to get localAddr from connection"
return
trace "Sending DialBackResponse"
await conn.writeLp(DialBackResponse(status: DialBackStatus.Ok).encode().buffer)
proc new*(
T: typedesc[AutonatV2Client],
rng: ref HmacDrbgContext,
dialBackTimeout: Duration = DefaultDialBackTimeout,
): T =
let client = T(rng: rng, dialBackTimeout: dialBackTimeout)
# handler for DialBack messages
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let dialBack = DialBack.decode(initProtoBuffer(await conn.readLp(DialBackLpSize))).valueOr:
trace "Unable to decode DialBack"
return
if not await client.handleDialBack(conn, dialBack).withTimeout(
client.dialBackTimeout
):
trace "Sending DialBackResponse timed out"
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
except LPStreamError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
client.handler = handleStream
client.codec = $AutonatV2Codec.DialBack
client
proc setup*(self: AutonatV2Client, switch: Switch) =
self.dialer = switch.dialer
proc handleDialDataRequest*(
conn: Connection, req: DialDataRequest
): Future[DialResponse] {.
async: (raises: [CancelledError, AutonatV2Error, LPStreamError])
.} =
debug "Received DialDataRequest",
numBytes = req.numBytes, maxAcceptedNumBytes = MaxAcceptedDialDataRequest
if req.numBytes > MaxAcceptedDialDataRequest:
raise newException(
AutonatV2Error, "Rejecting DialDataRequest: numBytes is greater than the maximum"
)
# send required data
var msg = AutonatV2Msg(
msgType: MsgType.DialDataResponse,
dialDataResp: DialDataResponse(data: newSeq[byte](MaxDialDataResponsePayload)),
)
let messagesToSend =
(req.numBytes + MaxDialDataResponsePayload - 1) div MaxDialDataResponsePayload
for i in 0 ..< messagesToSend:
await conn.writeLp(msg.encode().buffer)
debug "Sending DialDataResponse", i = i, messagesToSend = messagesToSend
# get DialResponse
msg = AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
debug "Received message", msgType = msg.msgType
if msg.msgType != MsgType.DialResponse:
raise
newException(AutonatV2Error, "Expecting DialResponse, but got " & $msg.msgType)
return msg.dialResp
proc checkAddrIdx(
self: AutonatV2Client, addrIdx: AddrIdx, testAddrs: seq[MultiAddress], nonce: Nonce
): bool {.raises: [AutonatV2Error].} =
debug "checking addrs", addrIdx = addrIdx, testAddrs = testAddrs, nonce = nonce
let dialBackAddrs = self.expectedNonces.getOrDefault(nonce).valueOr:
debug "Not expecting this nonce",
nonce = nonce, expectedNonces = self.expectedNonces
return false
if addrIdx.int >= testAddrs.len:
debug "addrIdx outside of testAddrs range",
addrIdx = addrIdx, testAddrs = testAddrs, testAddrsLen = testAddrs.len
return false
let dialRespAddrs = testAddrs[addrIdx]
if not areAddrsConsistent(dialRespAddrs, dialBackAddrs):
debug "Invalid addrIdx: got DialBack in another address",
addrIdx = addrIdx, dialBackAddrs = dialBackAddrs, dialRespAddrs = dialRespAddrs
return false
true
method sendDialRequest*(
self: AutonatV2Client, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
base,
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
## Dials peer with `pid` and requests that it tries connecting to `testAddrs`
let nonce = self.rng[].generate(Nonce)
self.expectedNonces[nonce] = Opt.none(MultiAddress)
var dialResp: DialResponse
try:
let conn = await self.dialer.dial(pid, @[$AutonatV2Codec.DialRequest])
defer:
await conn.close()
# send dialRequest
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialRequest,
dialReq: DialRequest(addrs: testAddrs, nonce: nonce),
).encode().buffer
)
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
dialResp =
case msg.msgType
of MsgType.DialResponse:
msg.dialResp
of MsgType.DialDataRequest:
await conn.handleDialDataRequest(msg.dialDataReq)
else:
raise newException(
AutonatV2Error,
"Expecting DialResponse or DialDataRequest, but got " & $msg.msgType,
)
debug "Received DialResponse", dialResp = dialResp
dialResp.dialStatus.withValue(dialStatus):
if dialStatus == DialStatus.Ok:
dialResp.addrIdx.withValue(addrIdx):
if not self.checkAddrIdx(addrIdx, testAddrs, nonce):
raise newException(
AutonatV2Error, "Invalid addrIdx " & $addrIdx & " in DialResponse"
)
except LPStreamRemoteClosedError as exc:
error "Stream reset by server", description = exc.msg, peer = pid
finally:
# rollback any changes
self.expectedNonces.del(nonce)
return dialResp.asAutonatV2Response(testAddrs)

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/
[
switch,
muxers/muxer,
dialer,
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
],
../../protocol,
./types,
./server
type AutonatV2Mock* = ref object of LPProtocol
config*: AutonatV2Config
response*: ProtoBuffer
proc new*(
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
): T =
let autonatV2 = T(config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
try:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
return
if msg.msgType != MsgType.DialRequest:
return
except LPStreamError:
return
try:
# return mocked message
await conn.writeLp(autonatV2.response.buffer)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError:
discard
except LPStreamError:
discard
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,45 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronos
import ../../../peerid, ../../../multiaddress, ../../../switch
import ./client, ./types
type AutonatV2ClientMock* = ref object of AutonatV2Client
response*: AutonatV2Response
dials*: int
expectedDials: int
finished*: Future[void]
proc new*(
T: typedesc[AutonatV2ClientMock], response: AutonatV2Response, expectedDials: int
): T =
return T(
dials: 0,
expectedDials: expectedDials,
finished: newFuture[void](),
response: response,
)
method sendDialRequest*(
self: AutonatV2ClientMock, pid: PeerId, testAddrs: seq[MultiAddress]
): Future[AutonatV2Response] {.
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
.} =
self.dials += 1
if self.dials == self.expectedDials:
self.finished.complete()
var ans = self.response
ans.dialResp.addrIdx.withValue(addrIdx):
ans.addrs = Opt.some(testAddrs[addrIdx])
ans

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/
[
switch,
muxers/muxer,
dialer,
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
],
../../protocol,
./types,
./server
type AutonatV2Mock* = ref object of LPProtocol
config*: AutonatV2Config
response*: ProtoBuffer
proc new*(
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
): T =
let autonatV2 = T(config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
try:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
return
if msg.msgType != MsgType.DialRequest:
return
except LPStreamError:
return
try:
# return mocked message
await conn.writeLp(autonatV2.response.buffer)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError:
discard
except LPStreamError:
discard
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,309 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results
import chronos, chronicles
import
../../../../libp2p/[
switch,
muxers/muxer,
dialer,
multiaddress,
transports/transport,
multicodec,
peerid,
protobuf/minprotobuf,
utils/ipaddr,
],
../../protocol,
./types
logScope:
topics = "libp2p autonat v2 server"
type AutonatV2Config* = object
dialTimeout: Duration
dialDataSize: uint64
amplificationAttackTimeout: Duration
allowPrivateAddresses: bool
type AutonatV2* = ref object of LPProtocol
switch*: Switch
config: AutonatV2Config
proc new*(
T: typedesc[AutonatV2Config],
dialTimeout: Duration = DefaultDialTimeout,
dialDataSize: uint64 = DefaultDialDataSize,
amplificationAttackTimeout: Duration = DefaultAmplificationAttackDialTimeout,
allowPrivateAddresses: bool = false,
): T =
T(
dialTimeout: dialTimeout,
dialDataSize: dialDataSize,
amplificationAttackTimeout: amplificationAttackTimeout,
allowPrivateAddresses: allowPrivateAddresses,
)
proc sendDialResponse(
conn: Connection,
status: ResponseStatus,
addrIdx: Opt[AddrIdx] = Opt.none(AddrIdx),
dialStatus: Opt[DialStatus] = Opt.none(DialStatus),
) {.async: (raises: [CancelledError, LPStreamError]).} =
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialResponse,
dialResp: DialResponse(status: status, addrIdx: addrIdx, dialStatus: dialStatus),
).encode().buffer
)
proc findObservedIPAddr*(
conn: Connection, req: DialRequest
): Future[Opt[MultiAddress]] {.async: (raises: [CancelledError, LPStreamError]).} =
let observedAddr = conn.observedAddr.valueOr:
await conn.sendDialResponse(ResponseStatus.EInternalError)
return Opt.none(MultiAddress)
let isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
error "Invalid observed address"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return Opt.none(MultiAddress)
if isRelayed:
error "Invalid observed address: relayed address"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return Opt.none(MultiAddress)
let hostIp = observedAddr[0].valueOr:
error "Invalid observed address"
await conn.sendDialResponse(ResponseStatus.EInternalError)
return Opt.none(MultiAddress)
return Opt.some(hostIp)
proc dialBack(
conn: Connection, nonce: Nonce
): Future[DialStatus] {.
async: (raises: [CancelledError, DialFailedError, LPStreamError])
.} =
try:
# send dial back
await conn.writeLp(DialBack(nonce: nonce).encode().buffer)
# receive DialBackResponse
let dialBackResp = DialBackResponse.decode(
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
).valueOr:
trace "DialBack failed, could not decode DialBackResponse"
return DialStatus.EDialBackError
except LPStreamRemoteClosedError as exc:
# failed because of nonce error (remote reset the stream): EDialBackError
debug "DialBack failed, remote closed the connection", description = exc.msg
return DialStatus.EDialBackError
# TODO: failed because of client or server resources: EDialError
trace "DialBack successful"
return DialStatus.Ok
proc handleDialDataResponses(
self: AutonatV2, conn: Connection
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
var dataReceived: uint64 = 0
while dataReceived < self.config.dialDataSize:
let msg = AutonatV2Msg.decode(
initProtoBuffer(await conn.readLp(DialDataResponseLpSize))
).valueOr:
raise newException(AutonatV2Error, "Received malformed message")
debug "Received message", msgType = $msg.msgType
if msg.msgType != MsgType.DialDataResponse:
raise
newException(AutonatV2Error, "Expecting DialDataResponse, got " & $msg.msgType)
let resp = msg.dialDataResp
dataReceived += resp.data.len.uint64
debug "received data",
dataReceived = resp.data.len.uint64, totalDataReceived = dataReceived
proc amplificationAttackPrevention(
self: AutonatV2, conn: Connection, addrIdx: AddrIdx
): Future[bool] {.async: (raises: [CancelledError, LPStreamError]).} =
# send DialDataRequest
await conn.writeLp(
AutonatV2Msg(
msgType: MsgType.DialDataRequest,
dialDataReq: DialDataRequest(addrIdx: addrIdx, numBytes: self.config.dialDataSize),
).encode().buffer
)
# recieve DialDataResponses until we're satisfied
try:
await self.handleDialDataResponses(conn)
except AutonatV2Error as exc:
error "Amplification attack prevention failed", description = exc.msg
return false
return true
proc canDial(self: AutonatV2, addrs: MultiAddress): bool =
let (ipv4Support, ipv6Support) = self.switch.peerInfo.listenAddrs.ipSupport()
addrs[0].withValue(addrIp):
if IP4.match(addrIp) and not ipv4Support:
return false
if IP6.match(addrIp) and not ipv6Support:
return false
try:
if not self.config.allowPrivateAddresses and isPrivate($addrIp):
return false
except ValueError:
warn "Unable to parse IP address, skipping", addrs = $addrIp
return false
for t in self.switch.transports:
if t.handles(addrs):
return true
return false
proc forceNewConnection(
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
): Future[Opt[Connection]] {.async: (raises: [CancelledError]).} =
## Bypasses connManager to force a new connection to ``pid``
## instead of reusing a preexistent one
try:
let mux = await self.switch.dialer.dialAndUpgrade(Opt.some(pid), addrs)
if mux.isNil():
return Opt.none(Connection)
return Opt.some(
await self.switch.dialer.negotiateStream(
await mux.newStream(), @[$AutonatV2Codec.DialBack]
)
)
except CancelledError as exc:
raise exc
except CatchableError:
return Opt.none(Connection)
proc chooseDialAddr(
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
): Future[(Opt[Connection], Opt[AddrIdx])] {.async: (raises: [CancelledError]).} =
for i, ma in addrs:
if self.canDial(ma):
debug "Trying to dial", chosenAddrs = ma, addrIdx = i
let conn =
try:
(await (self.forceNewConnection(pid, @[ma]).wait(self.config.dialTimeout))).valueOr:
return (Opt.none(Connection), Opt.none(AddrIdx))
except AsyncTimeoutError:
trace "Dial timed out"
return (Opt.none(Connection), Opt.some(i.AddrIdx))
return (Opt.some(conn), Opt.some(i.AddrIdx))
return (Opt.none(Connection), Opt.none(AddrIdx))
proc handleDialRequest(
self: AutonatV2, conn: Connection, req: DialRequest
) {.async: (raises: [CancelledError, LPStreamError]).} =
let observedIPAddr = (await conn.findObservedIPAddr(req)).valueOr:
trace "Could not find observed IP address"
await conn.sendDialResponse(ResponseStatus.ERequestRejected)
return
let (dialBackConnOpt, addrIdxOpt) = await self.chooseDialAddr(conn.peerId, req.addrs)
let addrIdx = addrIdxOpt.valueOr:
trace "No dialable addresses found"
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return
let dialBackConn = dialBackConnOpt.valueOr:
trace "Dial failed"
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialError),
)
return
defer:
await dialBackConn.close()
# if observed address for peer is not in address list to try
# then we perform Amplification Attack Prevention
if not ipAddrMatches(observedIPAddr, req.addrs):
debug "Starting amplification attack prevention",
observedIPAddr = observedIPAddr, testAddr = req.addrs[addrIdx]
# send DialDataRequest and wait until dataReceived is enough
if not await self.amplificationAttackPrevention(conn, addrIdx).withTimeout(
self.config.amplificationAttackTimeout
):
debug "Amplification attack prevention timeout",
timeout = self.config.amplificationAttackTimeout, peer = conn.peerId
await conn.sendDialResponse(ResponseStatus.EDialRefused)
return
debug "Sending DialBack",
nonce = req.nonce, addrIdx = addrIdx, addr = req.addrs[addrIdx]
try:
let dialStatus =
await dialBackConn.dialBack(req.nonce).wait(self.config.dialTimeout)
await conn.sendDialResponse(
ResponseStatus.Ok, addrIdx = Opt.some(addrIdx), dialStatus = Opt.some(dialStatus)
)
except DialFailedError as exc:
debug "DialBack failed", description = exc.msg
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialBackError),
)
except AsyncTimeoutError:
debug "DialBack timeout", timeout = self.config.dialTimeout
await conn.sendDialResponse(
ResponseStatus.Ok,
addrIdx = Opt.some(addrIdx),
dialStatus = Opt.some(DialStatus.EDialBackError),
)
proc new*(
T: typedesc[AutonatV2],
switch: Switch,
config: AutonatV2Config = AutonatV2Config.new(),
): T =
let autonatV2 = T(switch: switch, config: config)
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
defer:
await conn.close()
let msg =
try:
AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
trace "Unable to decode AutonatV2Msg"
return
except LPStreamError as exc:
debug "Could not receive AutonatV2Msg", description = exc.msg
return
debug "Received message", msgType = $msg.msgType
if msg.msgType != MsgType.DialRequest:
debug "Expecting DialRequest", receivedMsgType = msg.msgType
return
try:
await autonatV2.handleDialRequest(conn, msg.dialReq)
except CancelledError as exc:
raise exc
except LPStreamRemoteClosedError as exc:
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
except LPStreamError as exc:
debug "Stream Error", description = exc.msg
autonatV2.handler = handleStream
autonatV2.codec = $AutonatV2Codec.DialRequest
autonatV2

View File

@@ -0,0 +1,278 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[deques, sequtils]
import chronos, chronicles, metrics, results
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
../../../wire,
../../../utils/heartbeat,
../../../crypto/crypto,
../autonat/types,
./types,
./client
declarePublicGauge(
libp2p_autonat_v2_reachability_confidence,
"autonat v2 reachability confidence",
labels = ["reachability"],
)
logScope:
topics = "libp2p autonatv2 service"
# needed because nim 2.0 can't do proper type assertions
const noneDuration: Opt[Duration] = Opt.none(Duration)
type
AutonatV2ServiceConfig* = object
scheduleInterval: Opt[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
minConfidence: float
enableAddressMapper: bool
AutonatV2Service* = ref object of Service
config*: AutonatV2ServiceConfig
confidence: Opt[float]
newConnectedPeerHandler: PeerEventHandler
statusAndConfidenceHandler: StatusAndConfidenceHandler
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability*: NetworkReachability
answers: Deque[NetworkReachability]
client*: AutonatV2Client
rng: ref HmacDrbgContext
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatV2ServiceConfig],
scheduleInterval: Opt[Duration] = noneDuration,
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
enableAddressMapper = true,
): T =
return T(
scheduleInterval: scheduleInterval,
askNewConnectedPeers: askNewConnectedPeers,
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
enableAddressMapper: enableAddressMapper,
)
proc new*(
T: typedesc[AutonatV2Service],
rng: ref HmacDrbgContext,
client: AutonatV2Client = AutonatV2Client.new(),
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
): T =
return T(
config: config,
confidence: Opt.none(float),
networkReachability: Unknown,
answers: initDeque[NetworkReachability](),
client: client,
rng: rng,
)
proc callHandler(self: AutonatV2Service) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
proc hasEnoughIncomingSlots(switch: Switch): bool =
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
return switch.connManager.slotsAvailable(In) >= 2
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(
self: AutonatV2Service, ans: NetworkReachability
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
let oldNetworkReachability = self.networkReachability
let oldConfidence = self.confidence
if self.answers.len == self.config.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
self.confidence = Opt.none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.config.maxQueueSize
libp2p_autonat_v2_reachability_confidence.set(
value = confidence, labelValues = [$reachability]
)
if self.confidence.isNone and confidence >= self.config.minConfidence:
self.networkReachability = reachability
self.confidence = Opt.some(confidence)
debug "Current status",
currentStats = $self.networkReachability,
confidence = $self.confidence,
answers = self.answers
# Return whether anything has changed
return
self.networkReachability != oldNetworkReachability or
self.confidence != oldConfidence
proc askPeer(
self: AutonatV2Service, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
if doesPeerHaveIncomingConn(switch, peerId):
return Unknown
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peer",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
return Unknown
trace "Asking peer for reachability"
let ans =
try:
let reqAddrs = switch.peerInfo.addrs
let autonatV2Resp = await self.client.sendDialRequest(peerId, reqAddrs)
debug "AutonatV2Response", autonatV2Resp = autonatV2Resp
autonatV2Resp.reachability
except CancelledError as exc:
raise exc
except LPStreamError as exc:
debug "DialRequest stream error", description = exc.msg
Unknown
except DialFailedError as exc:
debug "DialRequest dial failed", description = exc.msg
Unknown
except AutonatV2Error as exc:
debug "DialRequest error", description = exc.msg
Unknown
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
if hasReachabilityOrConfidenceChanged:
await self.callHandler()
await switch.peerInfo.update()
return ans
proc askConnectedPeers(
self: AutonatV2Service, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
var answersFromPeers = 0
for peer in peers:
if answersFromPeers >= self.config.numPeersToAsk:
break
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peers",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
break
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(
service: AutonatV2Service, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatV2Service run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatV2Service, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if not self.networkReachability.isReachable():
return listenAddrs
var addrs = newSeq[MultiAddress]()
for listenAddr in listenAddrs:
if listenAddr.isPublicMA() or not self.networkReachability.isReachable():
addrs.add(listenAddr)
else:
addrs.add(peerStore.guessDialableAddr(listenAddr))
return addrs
method setup*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
trace "Setting up AutonatV2Service"
let hasBeenSetup = await procCall Service(self).setup(switch)
if not hasBeenSetup:
return hasBeenSetup
if self.config.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.config.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(
self: AutonatV2Service, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatV2Service"
await askConnectedPeers(self, switch)
method stop*(
self: AutonatV2Service, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
trace "Stopping AutonatV2Service"
let hasBeenStopped = await procCall Service(self).stop(switch)
if not hasBeenStopped:
return hasBeenStopped
if not isNil(self.scheduleHandle):
self.scheduleHandle.cancelSoon()
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
if self.config.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc setStatusAndConfidenceHandler*(
self: AutonatV2Service, statusAndConfidenceHandler: StatusAndConfidenceHandler
) =
self.statusAndConfidenceHandler = statusAndConfidenceHandler

View File

@@ -0,0 +1,265 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import results, chronos, chronicles
import
../../../multiaddress, ../../../peerid, ../../../protobuf/minprotobuf, ../../../switch
from ../autonat/types import NetworkReachability
export NetworkReachability
const
DefaultDialTimeout*: Duration = 15.seconds
DefaultAmplificationAttackDialTimeout*: Duration = 3.seconds
DefaultDialDataSize*: uint64 = 50 * 1024 # 50 KiB > 50 KB
AutonatV2MsgLpSize*: int = 1024
DialBackLpSize*: int = 1024
# readLp needs to receive more than 4096 bytes (since it's a DialDataResponse) + overhead
DialDataResponseLpSize*: int = 5000
type
AutonatV2Codec* {.pure.} = enum
DialRequest = "/libp2p/autonat/2/dial-request"
DialBack = "/libp2p/autonat/2/dial-back"
AutonatV2Response* = object
reachability*: NetworkReachability
dialResp*: DialResponse
addrs*: Opt[MultiAddress]
AutonatV2Error* = object of LPError
Nonce* = uint64
AddrIdx* = uint32
NumBytes* = uint64
MsgType* {.pure.} = enum
# DialBack and DialBackResponse are not defined as AutonatV2Msg as per the spec
# likely because they are expected in response to some other message
DialRequest
DialResponse
DialDataRequest
DialDataResponse
ResponseStatus* {.pure.} = enum
EInternalError = 0
ERequestRejected = 100
EDialRefused = 101
Ok = 200
DialBackStatus* {.pure.} = enum
Ok = 0
DialStatus* {.pure.} = enum
Unused = 0
EDialError = 100
EDialBackError = 101
Ok = 200
DialRequest* = object
addrs*: seq[MultiAddress]
nonce*: Nonce
DialResponse* = object
status*: ResponseStatus
addrIdx*: Opt[AddrIdx]
dialStatus*: Opt[DialStatus]
DialBack* = object
nonce*: Nonce
DialBackResponse* = object
status*: DialBackStatus
DialDataRequest* = object
addrIdx*: AddrIdx
numBytes*: NumBytes
DialDataResponse* = object
data*: seq[byte]
AutonatV2Msg* = object
case msgType*: MsgType
of MsgType.DialRequest:
dialReq*: DialRequest
of MsgType.DialResponse:
dialResp*: DialResponse
of MsgType.DialDataRequest:
dialDataReq*: DialDataRequest
of MsgType.DialDataResponse:
dialDataResp*: DialDataResponse
# DialRequest
proc encode*(dialReq: DialRequest): ProtoBuffer =
var encoded = initProtoBuffer()
for ma in dialReq.addrs:
encoded.write(1, ma.data.buffer)
encoded.write(2, dialReq.nonce)
encoded.finish()
encoded
proc decode*(T: typedesc[DialRequest], pb: ProtoBuffer): Opt[T] =
var
addrs: seq[MultiAddress]
nonce: Nonce
if not ?pb.getRepeatedField(1, addrs).toOpt():
return Opt.none(T)
if not ?pb.getField(2, nonce).toOpt():
return Opt.none(T)
Opt.some(T(addrs: addrs, nonce: nonce))
# DialResponse
proc encode*(dialResp: DialResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialResp.status.uint)
# minprotobuf casts uses float64 for fixed64 fields
dialResp.addrIdx.withValue(addrIdx):
encoded.write(2, addrIdx)
dialResp.dialStatus.withValue(dialStatus):
encoded.write(3, dialStatus.uint)
encoded.finish()
encoded
proc decode*(T: typedesc[DialResponse], pb: ProtoBuffer): Opt[T] =
var
status: uint
addrIdx: AddrIdx
dialStatus: uint
if not ?pb.getField(1, status).toOpt():
return Opt.none(T)
var optAddrIdx = Opt.none(AddrIdx)
if ?pb.getField(2, addrIdx).toOpt():
optAddrIdx = Opt.some(addrIdx)
var optDialStatus = Opt.none(DialStatus)
if ?pb.getField(3, dialStatus).toOpt():
optDialStatus = Opt.some(cast[DialStatus](dialStatus))
Opt.some(
T(
status: cast[ResponseStatus](status),
addrIdx: optAddrIdx,
dialStatus: optDialStatus,
)
)
# DialBack
proc encode*(dialBack: DialBack): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialBack.nonce)
encoded.finish()
encoded
proc decode*(T: typedesc[DialBack], pb: ProtoBuffer): Opt[T] =
var nonce: Nonce
if not ?pb.getField(1, nonce).toOpt():
return Opt.none(T)
Opt.some(T(nonce: nonce))
# DialBackResponse
proc encode*(dialBackResp: DialBackResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialBackResp.status.uint)
encoded.finish()
encoded
proc decode*(T: typedesc[DialBackResponse], pb: ProtoBuffer): Opt[T] =
var status: uint
if not ?pb.getField(1, status).toOpt():
return Opt.none(T)
Opt.some(T(status: cast[DialBackStatus](status)))
# DialDataRequest
proc encode*(dialDataReq: DialDataRequest): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialDataReq.addrIdx)
encoded.write(2, dialDataReq.numBytes)
encoded.finish()
encoded
proc decode*(T: typedesc[DialDataRequest], pb: ProtoBuffer): Opt[T] =
var
addrIdx: AddrIdx
numBytes: NumBytes
if not ?pb.getField(1, addrIdx).toOpt():
return Opt.none(T)
if not ?pb.getField(2, numBytes).toOpt():
return Opt.none(T)
Opt.some(T(addrIdx: addrIdx, numBytes: numBytes))
# DialDataResponse
proc encode*(dialDataResp: DialDataResponse): ProtoBuffer =
var encoded = initProtoBuffer()
encoded.write(1, dialDataResp.data)
encoded.finish()
encoded
proc decode*(T: typedesc[DialDataResponse], pb: ProtoBuffer): Opt[T] =
var data: seq[byte]
if not ?pb.getField(1, data).toOpt():
return Opt.none(T)
Opt.some(T(data: data))
proc protoField(msgType: MsgType): int =
case msgType
of MsgType.DialRequest: 1.int
of MsgType.DialResponse: 2.int
of MsgType.DialDataRequest: 3.int
of MsgType.DialDataResponse: 4.int
# AutonatV2Msg
proc encode*(msg: AutonatV2Msg): ProtoBuffer =
var encoded = initProtoBuffer()
case msg.msgType
of MsgType.DialRequest:
encoded.write(MsgType.DialRequest.protoField, msg.dialReq.encode())
of MsgType.DialResponse:
encoded.write(MsgType.DialResponse.protoField, msg.dialResp.encode())
of MsgType.DialDataRequest:
encoded.write(MsgType.DialDataRequest.protoField, msg.dialDataReq.encode())
of MsgType.DialDataResponse:
encoded.write(MsgType.DialDataResponse.protoField, msg.dialDataResp.encode())
encoded.finish()
encoded
proc decode*(T: typedesc[AutonatV2Msg], pb: ProtoBuffer): Opt[T] =
var
msgTypeOrd: uint32
msg: ProtoBuffer
if ?pb.getField(MsgType.DialRequest.protoField, msg).toOpt():
let dialReq = DialRequest.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialRequest, dialReq: dialReq))
elif ?pb.getField(MsgType.DialResponse.protoField, msg).toOpt():
let dialResp = DialResponse.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialResponse, dialResp: dialResp))
elif ?pb.getField(MsgType.DialDataRequest.protoField, msg).toOpt():
let dialDataReq = DialDataRequest.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(AutonatV2Msg(msgType: MsgType.DialDataRequest, dialDataReq: dialDataReq))
elif ?pb.getField(MsgType.DialDataResponse.protoField, msg).toOpt():
let dialDataResp = DialDataResponse.decode(msg).valueOr:
return Opt.none(AutonatV2Msg)
Opt.some(
AutonatV2Msg(msgType: MsgType.DialDataResponse, dialDataResp: dialDataResp)
)
else:
Opt.none(AutonatV2Msg)
# Custom `==` is needed to compare since AutonatV2Msg is a case object
proc `==`*(a, b: AutonatV2Msg): bool =
a.msgType == b.msgType and a.encode() == b.encode()

View File

@@ -0,0 +1,46 @@
{.push raises: [].}
import results
import chronos
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../protobuf/minprotobuf,
./types
proc asNetworkReachability*(self: DialResponse): NetworkReachability =
if self.status == EInternalError:
return Unknown
if self.status == ERequestRejected:
return Unknown
if self.status == EDialRefused:
return Unknown
# if got here it means a dial was attempted
let dialStatus = self.dialStatus.valueOr:
return Unknown
if dialStatus == Unused:
return Unknown
if dialStatus == EDialError:
return NotReachable
if dialStatus == EDialBackError:
return NotReachable
return Reachable
proc asAutonatV2Response*(
self: DialResponse, testAddrs: seq[MultiAddress]
): AutonatV2Response =
let addrIdx = self.addrIdx.valueOr:
return AutonatV2Response(
reachability: self.asNetworkReachability(),
dialResp: self,
addrs: Opt.none(MultiAddress),
)
AutonatV2Response(
reachability: self.asNetworkReachability(),
dialResp: self,
addrs: Opt.some(testAddrs[addrIdx]),
)

View File

@@ -1,6 +1,7 @@
import chronos
import chronicles
import sequtils
import sets
import ../../peerid
import ./consts
import ./xordistance
@@ -9,22 +10,95 @@ import ./lookupstate
import ./requests
import ./keys
import ../protocol
import ../../switch
import ./protobuf
import ../../switch
import ../../multihash
import ../../utils/heartbeat
import std/[options, tables]
import std/[times, options, tables]
import results
logScope:
topics = "kad-dht"
type EntryKey* = object
data: seq[byte]
proc init*(T: typedesc[EntryKey], inner: seq[byte]): EntryKey {.gcsafe, raises: [].} =
EntryKey(data: inner)
type EntryValue* = object
data*: seq[byte] # public because needed for tests
proc init*(
T: typedesc[EntryValue], inner: seq[byte]
): EntryValue {.gcsafe, raises: [].} =
EntryValue(data: inner)
type TimeStamp* = object
# Currently a string, because for some reason, that's what is chosen at the protobuf level
# TODO: convert between RFC3339 strings and use of integers (i.e. the _correct_ way)
ts*: string # only public because needed for tests
type EntryRecord* = object
value*: EntryValue # only public because needed for tests
time*: TimeStamp # only public because needed for tests
proc init*(
T: typedesc[EntryRecord], value: EntryValue, time: Option[TimeStamp]
): EntryRecord {.gcsafe, raises: [].} =
EntryRecord(value: value, time: time.get(TimeStamp(ts: $times.now().utc)))
type LocalTable* = object
entries*: Table[EntryKey, EntryRecord] # public because needed for tests
proc init(self: typedesc[LocalTable]): LocalTable {.raises: [].} =
LocalTable()
type EntryCandidate* = object
key*: EntryKey
value*: EntryValue
type ValidatedEntry* = object
key: EntryKey
value: EntryValue
proc init*(
T: typedesc[ValidatedEntry], key: EntryKey, value: EntryValue
): ValidatedEntry {.gcsafe, raises: [].} =
ValidatedEntry(key: key, value: value)
type EntryValidator* = ref object of RootObj
method isValid*(
self: EntryValidator, key: EntryKey, val: EntryValue
): bool {.base, raises: [], gcsafe.} =
doAssert(false, "unimplimented base method")
type EntrySelector* = ref object of RootObj
method select*(
self: EntrySelector, cand: EntryRecord, others: seq[EntryRecord]
): Result[EntryRecord, string] {.base, raises: [], gcsafe.} =
doAssert(false, "EntrySelection base not implemented")
type KadDHT* = ref object of LPProtocol
switch: Switch
rng: ref HmacDrbgContext
rtable*: RoutingTable
maintenanceLoop: Future[void]
dataTable*: LocalTable
entryValidator: EntryValidator
entrySelector: EntrySelector
proc insert*(
self: var LocalTable, value: sink ValidatedEntry, time: TimeStamp
) {.raises: [].} =
debug "local table insertion", key = value.key.data, value = value.value.data
self.entries[value.key] = EntryRecord(value: value.value, time: time)
const MaxMsgSize = 4096
# Forward declaration
proc findNode*(
kad: KadDHT, targetId: Key
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).}
proc sendFindNode(
kad: KadDHT, peerId: PeerId, addrs: seq[MultiAddress], targetId: Key
@@ -36,16 +110,13 @@ proc sendFindNode(
await kad.switch.dial(peerId, KadCodec)
else:
await kad.switch.dial(peerId, addrs, KadCodec)
defer:
await conn.close()
let msg = Message(msgType: MessageType.findNode, key: some(targetId.getBytes()))
await conn.writeLp(msg.encode().buffer)
let reply = Message.decode(await conn.readLp(MaxMsgSize)).tryGet()
if reply.msgType != MessageType.findNode:
raise newException(ValueError, "unexpected message type in reply: " & $reply)
@@ -68,30 +139,92 @@ proc waitRepliesOrTimeouts(
return (receivedReplies, failedPeers)
proc dispatchPutVal(
kad: KadDHT, peer: PeerId, entry: ValidatedEntry
): Future[void] {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await kad.switch.dial(peer, KadCodec)
defer:
await conn.close()
let msg = Message(
msgType: MessageType.putValue,
record: some(Record(key: some(entry.key.data), value: some(entry.value.data))),
)
await conn.writeLp(msg.encode().buffer)
let reply = Message.decode(await conn.readLp(MaxMsgSize)).valueOr:
# todo log this more meaningfully
error "putValue reply decode fail", error = error, conn = conn
return
if reply != msg:
error "unexpected change between msg and reply: ",
msg = msg, reply = reply, conn = conn
proc putValue*(
kad: KadDHT, entKey: EntryKey, value: EntryValue, timeout: Option[int]
): Future[Result[void, string]] {.async: (raises: [CancelledError]), gcsafe.} =
if not kad.entryValidator.isValid(entKey, value):
return err("invalid key/value pair")
let others: seq[EntryRecord] =
if entKey in kad.dataTable.entries:
@[kad.dataTable.entries.getOrDefault(entKey)]
else:
@[]
let candAsRec = EntryRecord.init(value, none(TimeStamp))
let confirmedRec = kad.entrySelector.select(candAsRec, others).valueOr:
error "application provided selector error (local)", msg = error
return err(error)
trace "local putval", candidate = candAsRec, others = others, selected = confirmedRec
let validEnt = ValidatedEntry.init(entKey, confirmedRec.value)
let peers = await kad.findNode(entKey.data.toKey())
# We first prime the sends so the data is ready to go
let rpcBatch = peers.mapIt(kad.dispatchPutVal(it, validEnt))
# then we do the `move`, as insert takes the data as `sink`
kad.dataTable.insert(validEnt, confirmedRec.time)
try:
# now that the all the data is where it needs to be in memory, we can dispatch the
# RPCs
await rpcBatch.allFutures().wait(chronos.seconds(timeout.get(5)))
# It's quite normal for the dispatch to timeout, as it would require all calls to get
# their response. Downstream users may desire some sort of functionality in the
# future to get rpc telemetry, but in the meantime, we just move on...
except AsyncTimeoutError:
discard
return results.ok()
# Helper function forward declaration
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.}
proc findNode*(
kad: KadDHT, targetId: Key
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).} =
## Node lookup. Iteratively search for the k closest peers to a target ID.
## Not necessarily will return the target itself
#debug "findNode", target = target
# TODO: should it return a single peer instead? read spec
var initialPeers = kad.rtable.findClosestPeers(targetId, DefaultReplic)
var state = LookupState.init(targetId, initialPeers)
var state = LookupState.init(targetId, initialPeers, kad.rtable.hasher)
var addrTable: Table[PeerId, seq[MultiAddress]] =
initTable[PeerId, seq[MultiAddress]]()
while not state.done:
let toQuery = state.selectAlphaPeers()
debug "queries", list = toQuery.mapIt(it.shortLog()), addrTab = addrTable
var pendingFutures = initTable[PeerId, Future[Message]]()
for peer in toQuery:
if pendingFutures.hasKey(peer):
continue
# TODO: pending futures always empty here, no?
for peer in toQuery.filterIt(
kad.switch.peerInfo.peerId != it or pendingFutures.hasKey(it)
):
state.markPending(peer)
pendingFutures[peer] = kad
.sendFindNode(peer, addrTable.getOrDefault(peer, @[]), targetId)
.wait(5.seconds)
.wait(chronos.seconds(5))
state.activeQueries.inc
@@ -99,23 +232,58 @@ proc findNode*(
for msg in successfulReplies:
for peer in msg.closerPeers:
addrTable[PeerId.init(peer.id).get()] = peer.addrs
let pid = PeerId.init(peer.id)
if not pid.isOk:
error "PeerId init went bad. this is unusual", data = peer.id
continue
addrTable[pid.get()] = peer.addrs
state.updateShortlist(
msg,
proc(p: PeerInfo) =
discard kad.rtable.insert(p.peerId)
kad.switch.peerStore[AddressBook][p.peerId] = p.addrs
# Nodes might return different addresses for a peer, so we append instead of replacing
var existingAddresses =
kad.switch.peerStore[AddressBook][p.peerId].toHashSet()
for a in p.addrs:
existingAddresses.incl(a)
kad.switch.peerStore[AddressBook][p.peerId] = existingAddresses.toSeq()
# TODO: add TTL to peerstore, otherwise we can spam it with junk
,
kad.rtable.hasher,
)
for timedOut in timedOutPeers:
state.markFailed(timedOut)
state.done = state.checkConvergence()
# Check for covergence: no active queries, and no other peers to be selected
state.done = checkConvergence(state, kad.switch.peerInfo.peerId)
return state.selectClosestK()
proc findPeer*(
kad: KadDHT, peer: PeerId
): Future[Result[PeerInfo, string]] {.async: (raises: [CancelledError]).} =
## Walks the key space until it finds candidate addresses for a peer Id
if kad.switch.peerInfo.peerId == peer:
# Looking for yourself.
return ok(kad.switch.peerInfo)
if kad.switch.isConnected(peer):
# Return known info about already connected peer
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
let foundNodes = await kad.findNode(peer.toKey())
if not foundNodes.contains(peer):
return err("peer not found")
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.} =
let ready = state.activeQueries == 0
let noNew = selectAlphaPeers(state).filterIt(me != it).len == 0
return ready and noNew
proc bootstrap*(
kad: KadDHT, bootstrapNodes: seq[PeerInfo]
) {.async: (raises: [CancelledError]).} =
@@ -123,28 +291,38 @@ proc bootstrap*(
try:
await kad.switch.connect(b.peerId, b.addrs)
debug "connected to bootstrap peer", peerId = b.peerId
except CatchableError as e:
error "failed to connect to bootstrap peer", peerId = b.peerId, error = e.msg
except DialFailedError as e:
# at some point will want to bubble up a Result[void, SomeErrorEnum]
error "failed to dial to bootstrap peer", peerId = b.peerId, error = e.msg
continue
try:
let msg =
await kad.sendFindNode(b.peerId, b.addrs, kad.rtable.selfId).wait(5.seconds)
for peer in msg.closerPeers:
let p = PeerId.init(peer.id).tryGet()
discard kad.rtable.insert(p)
let msg =
try:
await kad.sendFindNode(b.peerId, b.addrs, kad.rtable.selfId).wait(
chronos.seconds(5)
)
except CatchableError as e:
debug "send find node exception during bootstrap",
target = b.peerId, addrs = b.addrs, err = e.msg
continue
for peer in msg.closerPeers:
let p = PeerId.init(peer.id).valueOr:
debug "invalid peer id received", error = error
continue
discard kad.rtable.insert(p)
try:
kad.switch.peerStore[AddressBook][p] = peer.addrs
except:
error "this is here because an ergonomic means of keying into a table without exceptions is unknown"
# bootstrap node replied succesfully. Adding to routing table
discard kad.rtable.insert(b.peerId)
except CatchableError as e:
error "bootstrap failed for peer", peerId = b.peerId, exc = e.msg
# bootstrap node replied succesfully. Adding to routing table
discard kad.rtable.insert(b.peerId)
try:
# Adding some random node to prepopulate the table
discard await kad.findNode(PeerId.random(kad.rng).tryGet().toKey())
info "bootstrap lookup complete"
except CatchableError as e:
error "bootstrap lookup failed", error = e.msg
let key = PeerId.random(kad.rng).valueOr:
doAssert(false, "this should never happen")
return
discard await kad.findNode(key.toKey())
info "bootstrap lookup complete"
proc refreshBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
for i in 0 ..< kad.rtable.buckets.len:
@@ -153,49 +331,121 @@ proc refreshBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
discard await kad.findNode(randomKey)
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
heartbeat "refresh buckets", 10.minutes:
heartbeat "refresh buckets", chronos.minutes(10):
await kad.refreshBuckets()
proc new*(
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
T: typedesc[KadDHT],
switch: Switch,
validator: EntryValidator,
entrySelector: EntrySelector,
rng: ref HmacDrbgContext = newRng(),
): T {.raises: [].} =
var rtable = RoutingTable.init(switch.peerInfo.peerId.toKey())
let kad = T(rng: rng, switch: switch, rtable: rtable)
var rtable = RoutingTable.init(switch.peerInfo.peerId.toKey(), Opt.none(XorDHasher))
let kad = T(
rng: rng,
switch: switch,
rtable: rtable,
dataTable: LocalTable.init(),
entryValidator: validator,
entrySelector: entrySelector,
)
kad.codec = KadCodec
kad.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while not conn.atEof:
let
buf = await conn.readLp(MaxMsgSize)
msg = Message.decode(buf).tryGet()
case msg.msgType
of MessageType.findNode:
let targetIdBytes = msg.key.get()
let targetId = PeerId.init(targetIdBytes).tryGet()
let closerPeers = kad.rtable.findClosest(targetId.toKey(), DefaultReplic)
let responsePb = encodeFindNodeReply(closerPeers, switch)
await conn.writeLp(responsePb.buffer)
# Peer is useful. adding to rtable
discard kad.rtable.insert(conn.peerId)
else:
raise newException(LPError, "unhandled kad-dht message type")
except CancelledError as exc:
raise exc
except CatchableError:
discard
# TODO: figure out why this fails:
# error "could not handle request",
# peerId = conn.PeerId, err = getCurrentExceptionMsg()
finally:
defer:
await conn.close()
while not conn.atEof:
let buf =
try:
await conn.readLp(MaxMsgSize)
except LPStreamError as e:
debug "Read error when handling kademlia RPC", conn = conn, err = e.msg
return
let msg = Message.decode(buf).valueOr:
debug "msg decode error handling kademlia RPC", err = error
return
case msg.msgType
of MessageType.findNode:
let targetIdBytes = msg.key.valueOr:
error "findNode message without key data present", msg = msg, conn = conn
return
let targetId = PeerId.init(targetIdBytes).valueOr:
error "findNode message without valid key data", msg = msg, conn = conn
return
let closerPeers = kad.rtable
.findClosest(targetId.toKey(), DefaultReplic)
# exclude the node requester because telling a peer about itself does not reduce the distance,
.filterIt(it != conn.peerId.toKey())
let responsePb = encodeFindNodeReply(closerPeers, switch)
try:
await conn.writeLp(responsePb.buffer)
except LPStreamError as e:
debug "write error when writing kad find-node RPC reply",
conn = conn, err = e.msg
return
# Peer is useful. adding to rtable
discard kad.rtable.insert(conn.peerId)
of MessageType.putValue:
let record = msg.record.valueOr:
error "no record in message buffer", msg = msg, conn = conn
return
let (skey, svalue) =
if record.key.isSome() and record.value.isSome():
(record.key.unsafeGet(), record.value.unsafeGet())
else:
error "no key or no value in rpc buffer", msg = msg, conn = conn
return
let key = EntryKey.init(skey)
let value = EntryValue.init(svalue)
# Value sanitisation done. Start insertion process
if not kad.entryValidator.isValid(key, value):
return
let others =
if kad.dataTable.entries.contains(key):
# need to do this shenans in order to avoid exceptions.
@[kad.dataTable.entries.getOrDefault(key)]
else:
@[]
let candRec = EntryRecord.init(value, none(TimeStamp))
let selectedRec = kad.entrySelector.select(candRec, others).valueOr:
error "application provided selector error", msg = error, conn = conn
return
trace "putval handler selection",
cand = candRec, others = others, selected = selectedRec
# Assume that if selection goes with another value, that it is valid
let validated = ValidatedEntry(key: key, value: selectedRec.value)
kad.dataTable.insert(validated, selectedRec.time)
# consistent with following link, echo message without change
# https://github.com/libp2p/js-libp2p/blob/cf9aab5c841ec08bc023b9f49083c95ad78a7a07/packages/kad-dht/src/rpc/handlers/put-value.ts#L22
try:
await conn.writeLp(buf)
except LPStreamError as e:
debug "write error when writing kad find-node RPC reply",
conn = conn, err = e.msg
return
else:
error "unhandled kad-dht message type", msg = msg
return
return kad
proc setSelector*(kad: KadDHT, selector: EntrySelector) =
doAssert(selector != nil)
kad.entrySelector = selector
proc setValidator*(kad: KadDHT, validator: EntryValidator) =
doAssert(validator != nil)
kad.entryValidator = validator
method start*(
kad: KadDHT
): Future[void] {.async: (raises: [CancelledError], raw: true).} =

View File

@@ -1,11 +1,10 @@
import nimcrypto/sha2
import ../../peerid
import ./consts
import chronicles
import stew/byteutils
type
KeyType* {.pure.} = enum
Unhashed
Raw
PeerId
@@ -13,15 +12,11 @@ type
case kind*: KeyType
of KeyType.PeerId:
peerId*: PeerId
of KeyType.Raw, KeyType.Unhashed:
data*: array[IdLength, byte]
of KeyType.Raw:
data*: seq[byte]
proc toKey*(s: seq[byte]): Key =
doAssert s.len == IdLength
var data: array[IdLength, byte]
for i in 0 ..< IdLength:
data[i] = s[i]
return Key(kind: KeyType.Raw, data: data)
return Key(kind: KeyType.Raw, data: s)
proc toKey*(p: PeerId): Key =
return Key(kind: KeyType.PeerId, peerId: p)
@@ -36,7 +31,7 @@ proc getBytes*(k: Key): seq[byte] =
case k.kind
of KeyType.PeerId:
k.peerId.getBytes()
of KeyType.Raw, KeyType.Unhashed:
of KeyType.Raw:
@(k.data)
template `==`*(a, b: Key): bool =
@@ -46,7 +41,7 @@ proc shortLog*(k: Key): string =
case k.kind
of KeyType.PeerId:
"PeerId:" & $k.peerId
of KeyType.Raw, KeyType.Unhashed:
of KeyType.Raw:
$k.kind & ":" & toHex(k.data)
chronicles.formatIt(Key):

View File

@@ -27,7 +27,10 @@ proc alreadyInShortlist(state: LookupState, peer: Peer): bool =
return state.shortlist.anyIt(it.peerId.getBytes() == peer.id)
proc updateShortlist*(
state: var LookupState, msg: Message, onInsert: proc(p: PeerInfo) {.gcsafe.}
state: var LookupState,
msg: Message,
onInsert: proc(p: PeerInfo) {.gcsafe.},
hasher: Opt[XorDHasher],
) =
for newPeer in msg.closerPeers.filterIt(not alreadyInShortlist(state, it)):
let peerInfo = PeerInfo(peerId: PeerId.init(newPeer.id).get(), addrs: newPeer.addrs)
@@ -36,7 +39,7 @@ proc updateShortlist*(
state.shortlist.add(
LookupNode(
peerId: peerInfo.peerId,
distance: xorDistance(peerInfo.peerId, state.targetId),
distance: xorDistance(peerInfo.peerId, state.targetId, hasher),
queried: false,
pending: false,
failed: false,
@@ -77,7 +80,12 @@ proc selectAlphaPeers*(state: LookupState): seq[PeerId] =
break
return selected
proc init*(T: type LookupState, targetId: Key, initialPeers: seq[PeerId]): T =
proc init*(
T: type LookupState,
targetId: Key,
initialPeers: seq[PeerId],
hasher: Opt[XorDHasher],
): T =
var res = LookupState(
targetId: targetId,
shortlist: @[],
@@ -90,7 +98,7 @@ proc init*(T: type LookupState, targetId: Key, initialPeers: seq[PeerId]): T =
res.shortlist.add(
LookupNode(
peerId: p,
distance: xorDistance(p, targetId),
distance: xorDistance(p, targetId, hasher),
queried: false,
pending: false,
failed: false,
@@ -103,11 +111,6 @@ proc init*(T: type LookupState, targetId: Key, initialPeers: seq[PeerId]): T =
)
return res
proc checkConvergence*(state: LookupState): bool =
let ready = state.activeQueries == 0
let noNew = selectAlphaPeers(state).len == 0
return ready and noNew
proc selectClosestK*(state: LookupState): seq[PeerId] =
var res: seq[PeerId] = @[]
for p in state.shortlist.filterIt(not it.failed):

View File

@@ -8,6 +8,7 @@ import ./xordistance
import ../../peerid
import sequtils
import ../../utils/sequninit
import results
logScope:
topics = "kad-dht rtable"
@@ -23,15 +24,16 @@ type
RoutingTable* = ref object
selfId*: Key
buckets*: seq[Bucket]
hasher*: Opt[XorDHasher]
proc `$`*(rt: RoutingTable): string =
"selfId(" & $rt.selfId & ") buckets(" & $rt.buckets & ")"
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
return RoutingTable(selfId: selfId, buckets: @[])
proc init*(T: typedesc[RoutingTable], selfId: Key, hasher: Opt[XorDHasher]): T =
return RoutingTable(selfId: selfId, buckets: @[], hasher: hasher)
proc bucketIndex*(selfId, key: Key): int =
return xorDistance(selfId, key).leadingZeros
proc bucketIndex*(selfId, key: Key, hasher: Opt[XorDHasher]): int =
return xorDistance(selfId, key, hasher).leadingZeros
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
for i, p in bucket.peers:
@@ -43,7 +45,7 @@ proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
if nodeId == rtable.selfId:
return false # No self insertion
let idx = bucketIndex(rtable.selfId, nodeId)
let idx = bucketIndex(rtable.selfId, nodeId, rtable.hasher)
if idx >= maxBuckets:
trace "cannot insert node. max buckets have been reached",
nodeId, bucketIdx = idx, maxBuckets
@@ -80,7 +82,9 @@ proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
allNodes.sort(
proc(a, b: Key): int =
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
cmp(
xorDistance(a, targetId, rtable.hasher), xorDistance(b, targetId, rtable.hasher)
)
)
return allNodes[0 ..< min(count, allNodes.len)]

View File

@@ -1,9 +1,27 @@
import ./consts
import stew/arrayOps
import ./keys
import nimcrypto/sha2
import ../../peerid
import results
type XorDistance* = array[IdLength, byte]
type XorDHasher* = proc(input: seq[byte]): array[IdLength, byte] {.
raises: [], nimcall, noSideEffect, gcsafe
.}
proc defaultHasher(
input: seq[byte]
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
return sha256.digest(input).data
# useful for testing purposes
proc noOpHasher*(
input: seq[byte]
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
var data: array[IdLength, byte]
discard data.copyFrom(input)
return data
proc countLeadingZeroBits*(b: byte): int =
for i in 0 .. 7:
@@ -31,25 +49,23 @@ proc `<`*(a, b: XorDistance): bool =
proc `<=`*(a, b: XorDistance): bool =
cmp(a, b) <= 0
proc hashFor(k: Key): seq[byte] =
proc hashFor(k: Key, hasher: Opt[XorDHasher]): seq[byte] =
return
@(
case k.kind
of KeyType.PeerId:
sha256.digest(k.peerId.getBytes()).data
hasher.get(defaultHasher)(k.peerId.getBytes())
of KeyType.Raw:
sha256.digest(k.data).data
of KeyType.Unhashed:
k.data
hasher.get(defaultHasher)(k.data)
)
proc xorDistance*(a, b: Key): XorDistance =
let hashA = a.hashFor()
let hashB = b.hashFor()
proc xorDistance*(a, b: Key, hasher: Opt[XorDHasher]): XorDistance =
let hashA = a.hashFor(hasher)
let hashB = b.hashFor(hasher)
var response: XorDistance
for i in 0 ..< hashA.len:
response[i] = hashA[i] xor hashB[i]
return response
proc xorDistance*(a: PeerId, b: Key): XorDistance =
xorDistance(a.toKey(), b)
proc xorDistance*(a: PeerId, b: Key, hasher: Opt[XorDHasher]): XorDistance =
xorDistance(a.toKey(), b, hasher)

View File

@@ -0,0 +1,53 @@
import endians, nimcrypto
proc aes_ctr*(key, iv, data: openArray[byte]): seq[byte] =
## Processes 'data' using AES in CTR mode.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
var
ctx: CTR[aes128]
output = newSeq[byte](data.len)
ctx.init(key, iv)
ctx.encrypt(data, output)
ctx.clear()
output
proc advance_ctr*(iv: var openArray[byte], blocks: uint64) =
## Advances the counter in the AES-CTR IV by a specified number of blocks.
var counter: uint64
bigEndian64(addr counter, addr iv[8])
counter += blocks
bigEndian64(addr iv[8], addr counter)
proc aes_ctr_start_index*(key, iv, data: openArray[byte], startIndex: int): seq[byte] =
## Encrypts 'data' using AES in CTR mode from startIndex, without processing all preceding data.
## For CTR mode, the same function handles both encryption and decryption.
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
doAssert startIndex mod 16 == 0, "Start index must be a multiple of 16"
var advIV = @iv
# Advance the counter to the start index
let blocksToAdvance = startIndex div 16
advance_ctr(advIV, blocksToAdvance.uint64)
return aes_ctr(key, advIV, data)
proc sha256_hash*(data: openArray[byte]): array[32, byte] =
## hashes 'data' using SHA-256.
return sha256.digest(data).data
proc kdf*(key: openArray[byte]): seq[byte] =
## Returns the hash of 'key' truncated to 16 bytes.
let hash = sha256_hash(key)
return hash[0 .. 15]
proc hmac*(key, data: openArray[byte]): seq[byte] =
## Computes a HMAC for 'data' using given 'key'.
let hmac = sha256.hmac(key, data).data
return hmac[0 .. 15]

View File

@@ -0,0 +1,52 @@
import results
import bearssl/rand
import ../../crypto/curve25519
const FieldElementSize* = Curve25519KeySize
type FieldElement* = Curve25519Key
proc bytesToFieldElement*(bytes: openArray[byte]): Result[FieldElement, string] =
## Convert bytes to FieldElement
if bytes.len != FieldElementSize:
return err("Field element size must be 32 bytes")
ok(intoCurve25519Key(bytes))
proc fieldElementToBytes*(fe: FieldElement): seq[byte] =
## Convert FieldElement to bytes
fe.getBytes()
# Generate a random FieldElement
proc generateRandomFieldElement*(): Result[FieldElement, string] =
let rng = HmacDrbgContext.new()
if rng.isNil:
return err("Failed to create HmacDrbgContext with system randomness")
ok(Curve25519Key.random(rng[]))
# Generate a key pair (private key and public key are both FieldElements)
proc generateKeyPair*(): Result[tuple[privateKey, publicKey: FieldElement], string] =
let privateKey = generateRandomFieldElement().valueOr:
return err("Error in private key generation: " & error)
let publicKey = public(privateKey)
ok((privateKey, publicKey))
proc multiplyPointWithScalars*(
point: FieldElement, scalars: openArray[FieldElement]
): FieldElement =
## Multiply a given Curve25519 point with a set of scalars
var res = point
for scalar in scalars:
Curve25519.mul(res, scalar)
res
proc multiplyBasePointWithScalars*(
scalars: openArray[FieldElement]
): Result[FieldElement, string] =
## Multiply the Curve25519 base point with a set of scalars
if scalars.len <= 0:
return err("Atleast one scalar must be provided")
var res: FieldElement = public(scalars[0]) # Use the predefined base point
for i in 1 ..< scalars.len:
Curve25519.mul(res, scalars[i]) # Multiply with each scalar
ok(res)

View File

@@ -0,0 +1,133 @@
import hashes, chronos, stew/byteutils, results, chronicles
import ../../stream/connection
import ../../varint
import ../../utils/sequninit
import ./mix_protocol
from fragmentation import DataSize
type MixDialer* = proc(
msg: seq[byte], codec: string, destination: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).}
type MixEntryConnection* = ref object of Connection
destination: MixDestination
codec: string
mixDialer: MixDialer
func shortLog*(conn: MixEntryConnection): string =
if conn == nil:
"MixEntryConnection(nil)"
else:
"MixEntryConnection(" & $conn.destination & ")"
chronicles.formatIt(MixEntryConnection):
shortLog(it)
method readOnce*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readExactly*(
s: MixEntryConnection, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLine*(
s: MixEntryConnection, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readVarint*(
conn: MixEntryConnection
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method readLp*(
s: MixEntryConnection, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
# TODO: implement
raise newLPStreamEOFError()
method write*(
self: MixEntryConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.mixDialer(msg, self.codec, self.destination)
proc write*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.write(msg.toBytes())
method writeLp*(
self: MixEntryConnection, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
if msg.len() > DataSize:
let fut = newFuture[void]()
fut.fail(
newException(LPStreamError, "exceeds max msg size of " & $DataSize & " bytes")
)
return fut
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< buf.len] = msg
self.write(buf)
method writeLp*(
self: MixEntryConnection, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
self.writeLp(msg.toOpenArrayByte(0, msg.high))
proc shortLog*(self: MixEntryConnection): string {.raises: [].} =
"[MixEntryConnection] Destination: " & $self.destination
method closeImpl*(
self: MixEntryConnection
): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
return fut
func hash*(self: MixEntryConnection): Hash =
hash($self.destination)
when defined(libp2p_agents_metrics):
proc setShortAgent*(self: MixEntryConnection, shortAgent: string) =
discard
proc new*(
T: typedesc[MixEntryConnection],
srcMix: MixProtocol,
destination: MixDestination,
codec: string,
): T {.raises: [].} =
var instance = T()
instance.destination = destination
instance.codec = codec
instance.mixDialer = proc(
msg: seq[byte], codec: string, dest: MixDestination
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
await srcMix.anonymizeLocalProtocolSend(
nil, msg, codec, dest, 0 # TODO: set incoming queue for replies and surbs
)
when defined(libp2p_agents_metrics):
instance.shortAgent = connection.shortAgent
instance
proc toConnection*(
srcMix: MixProtocol, destination: MixDestination, codec: string
): Result[Connection, string] {.gcsafe, raises: [].} =
## Create a stream to send and optionally receive responses.
## Under the hood it will wrap the message in a sphinx packet
## and send it via a random mix path.
ok(MixEntryConnection.new(srcMix, destination, codec))

View File

@@ -0,0 +1,36 @@
import chronicles, chronos, metrics
import ../../builders
import ../../stream/connection
import ./mix_metrics
type ExitLayer* = object
switch: Switch
proc init*(T: typedesc[ExitLayer], switch: Switch): T =
ExitLayer(switch: switch)
proc onMessage*(
self: ExitLayer,
codec: string,
message: seq[byte],
destAddr: MultiAddress,
destPeerId: PeerId,
) {.async: (raises: [CancelledError]).} =
# If dialing destination fails, no response is returned to
# the sender, so, flow can just end here. Only log errors
# for now
# https://github.com/vacp2p/mix/issues/86
try:
let destConn = await self.switch.dial(destPeerId, @[destAddr], codec)
defer:
await destConn.close()
await destConn.write(message)
except LPStreamError as exc:
error "Stream error while writing to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "LPSTREAM_ERR"])
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["ExitLayer", "DIAL_FAILED"])
except CancelledError as exc:
raise exc

View File

@@ -0,0 +1,95 @@
import ./[serialization, seqno_generator]
import results, stew/endians2
import ../../peerid
const PaddingLengthSize* = 2
const SeqNoSize* = 4
const DataSize* = MessageSize - PaddingLengthSize - SeqNoSize
# Unpadding and reassembling messages will be handled by the top-level applications.
# Although padding and splitting messages could also be managed at that level, we
# implement it here to clarify the sender's logic.
# This is crucial as the sender is responsible for wrapping messages in Sphinx packets.
type MessageChunk* = object
paddingLength: uint16
data: seq[byte]
seqNo: uint32
proc init*(
T: typedesc[MessageChunk], paddingLength: uint16, data: seq[byte], seqNo: uint32
): T =
T(paddingLength: paddingLength, data: data, seqNo: seqNo)
proc get*(msgChunk: MessageChunk): (uint16, seq[byte], uint32) =
(msgChunk.paddingLength, msgChunk.data, msgChunk.seqNo)
proc serialize*(msgChunk: MessageChunk): seq[byte] =
let
paddingBytes = msgChunk.paddingLength.toBytesBE()
seqNoBytes = msgChunk.seqNo.toBytesBE()
doAssert msgChunk.data.len == DataSize,
"Padded data must be exactly " & $DataSize & " bytes"
return @paddingBytes & msgChunk.data & @seqNoBytes
proc deserialize*(T: typedesc[MessageChunk], data: openArray[byte]): Result[T, string] =
if data.len != MessageSize:
return err("Data must be exactly " & $MessageSize & " bytes")
let
paddingLength = uint16.fromBytesBE(data[0 .. PaddingLengthSize - 1])
chunk = data[PaddingLengthSize .. (PaddingLengthSize + DataSize - 1)]
seqNo = uint32.fromBytesBE(data[PaddingLengthSize + DataSize ..^ 1])
ok(T(paddingLength: paddingLength, data: chunk, seqNo: seqNo))
proc ceilDiv*(a, b: int): int =
(a + b - 1) div b
proc addPadding*(messageBytes: seq[byte], seqNo: SeqNo): MessageChunk =
## Pads messages smaller than DataSize
let paddingLength = uint16(DataSize - messageBytes.len)
let paddedData =
if paddingLength > 0:
let paddingBytes = newSeq[byte](paddingLength)
paddingBytes & messageBytes
else:
messageBytes
MessageChunk(paddingLength: paddingLength, data: paddedData, seqNo: seqNo)
proc addPadding*(messageBytes: seq[byte], peerId: PeerId): MessageChunk =
## Pads messages smaller than DataSize
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
messageBytes.addPadding(seqNoGen)
proc removePadding*(msgChunk: MessageChunk): Result[seq[byte], string] =
let msgLength = len(msgChunk.data) - int(msgChunk.paddingLength)
if msgLength < 0:
return err("Invalid padding length")
ok(msgChunk.data[msgChunk.paddingLength ..^ 1])
proc padAndChunkMessage*(messageBytes: seq[byte], peerId: PeerId): seq[MessageChunk] =
var seqNoGen = SeqNo.init(peerId)
seqNoGen.generate(messageBytes)
var chunks: seq[MessageChunk] = @[]
# Split to chunks
let totalChunks = max(1, ceilDiv(messageBytes.len, DataSize))
# Ensure at least one chunk is generated
for i in 0 ..< totalChunks:
let
startIdx = i * DataSize
endIdx = min(startIdx + DataSize, messageBytes.len)
chunkData = messageBytes[startIdx .. endIdx - 1]
msgChunk = chunkData.addPadding(seqNoGen)
chunks.add(msgChunk)
seqNoGen.inc()
return chunks

View File

@@ -0,0 +1,47 @@
import chronicles, results
import stew/[byteutils, leb128]
import ../../protobuf/minprotobuf
import ../../utils/sequninit
type MixMessage* = object
message*: seq[byte]
codec*: string
proc init*(T: typedesc[MixMessage], message: openArray[byte], codec: string): T =
return T(message: @message, codec: codec)
proc serialize*(mixMsg: MixMessage): seq[byte] =
let vbytes = toBytes(mixMsg.codec.len.uint64, Leb128)
doAssert vbytes.len <= 2, "serialization failed: codec length exceeds 2 bytes"
var buf = newSeqUninit[byte](vbytes.len + mixMsg.codec.len + mixMsg.message.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< mixMsg.codec.len] = mixMsg.codec.toBytes()
buf[vbytes.len + mixMsg.codec.len ..< buf.len] = mixMsg.message
buf
proc deserialize*(
T: typedesc[MixMessage], data: openArray[byte]
): Result[MixMessage, string] =
if data.len == 0:
return err("deserialization failed: data is empty")
var codecLen: int
var varintLen: int
for i in 0 ..< min(data.len, 2):
let parsed = uint16.fromBytes(data[0 ..< i], Leb128)
if parsed.len < 0 or (i == 1 and parsed.len == 0):
return err("deserialization failed: invalid codec length")
varintLen = parsed.len
codecLen = parsed.val.int
if data.len < varintLen + codecLen:
return err("deserialization failed: not enough data")
ok(
T(
codec: string.fromBytes(data[varintLen ..< varintLen + codecLen]),
message: data[varintLen + codecLen ..< data.len],
)
)

View File

@@ -0,0 +1,13 @@
{.push raises: [].}
import metrics
declarePublicCounter mix_messages_recvd, "number of mix messages received", ["type"]
declarePublicCounter mix_messages_forwarded,
"number of mix messages forwarded", ["type"]
declarePublicCounter mix_messages_error,
"number of mix messages failed processing", ["type", "error"]
declarePublicGauge mix_pool_size, "number of nodes in the pool"

View File

@@ -0,0 +1,318 @@
import os, results, strformat, sugar, sequtils
import std/streams
import ../../crypto/[crypto, curve25519, secp]
import ../../[multiaddress, multicodec, peerid, peerinfo]
import ./[serialization, curve25519, multiaddr]
const MixNodeInfoSize* =
AddrSize + (2 * FieldElementSize) + (SkRawPublicKeySize + SkRawPrivateKeySize)
const MixPubInfoSize* = AddrSize + FieldElementSize + SkRawPublicKeySize
type MixNodeInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
mixPrivKey*: FieldElement
libp2pPubKey*: SkPublicKey
libp2pPrivKey*: SkPrivateKey
proc initMixNodeInfo*(
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey, mixPrivKey: FieldElement,
libp2pPubKey: SkPublicKey,
libp2pPrivKey: SkPrivateKey,
): MixNodeInfo =
MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
proc get*(
info: MixNodeInfo
): (PeerId, MultiAddress, FieldElement, FieldElement, SkPublicKey, SkPrivateKey) =
(
info.peerId, info.multiAddr, info.mixPubKey, info.mixPrivKey, info.libp2pPubKey,
info.libp2pPrivKey,
)
proc serialize*(nodeInfo: MixNodeInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
mixPrivKeyBytes = fieldElementToBytes(nodeInfo.mixPrivKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
libp2pPrivKeyBytes = nodeInfo.libp2pPrivKey.getBytes()
return ok(
addrBytes & mixPubKeyBytes & mixPrivKeyBytes & libp2pPubKeyBytes & libp2pPrivKeyBytes
)
proc deserialize*(T: typedesc[MixNodeInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixNodeInfoSize:
return
err("Serialized Mix node info must be exactly " & $MixNodeInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let mixPrivKey = bytesToFieldElement(
data[(AddrSize + FieldElementSize) .. (AddrSize + (2 * FieldElementSize) - 1)]
).valueOr:
return err("Mix private key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(
data[
AddrSize + (2 * FieldElementSize) ..
AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize - 1
]
).valueOr:
return err("Failed to initialize libp2p public key")
let libp2pPrivKey = SkPrivateKey.init(
data[AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize ..^ 1]
).valueOr:
return err("Failed to initialize libp2p private key")
ok(
T(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: libp2pPubKey,
libp2pPrivKey: libp2pPrivKey,
)
)
proc writeToFile*(
node: MixNodeInfo, index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[void, string] =
if not dirExists(nodeInfoFolderPath):
createDir(nodeInfoFolderPath)
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix node info: " & error)
file.writeData(addr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixNodeInfo], index: int, nodeInfoFolderPath: string = "./nodeInfo"
): Result[T, string] =
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixNodeInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixNodeInfo = MixNodeInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix node info deserialize error: " & error)
return ok(dMixNodeInfo)
proc deleteNodeInfoFolder*(nodeInfoFolderPath: string = "./nodeInfo") =
## Deletes the folder that stores serialized mix node info files
## along with all its contents, if the folder exists.
if dirExists(nodeInfoFolderPath):
removeDir(nodeInfoFolderPath)
type MixPubInfo* = object
peerId*: PeerId
multiAddr*: MultiAddress
mixPubKey*: FieldElement
libp2pPubKey*: SkPublicKey
proc init*(
T: typedesc[MixPubInfo],
peerId: PeerId,
multiAddr: MultiAddress,
mixPubKey: FieldElement,
libp2pPubKey: SkPublicKey,
): T =
T(
peerId: PeerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
proc get*(info: MixPubInfo): (PeerId, MultiAddress, FieldElement, SkPublicKey) =
(info.peerId, info.multiAddr, info.mixPubKey, info.libp2pPubKey)
proc serialize*(nodeInfo: MixPubInfo): Result[seq[byte], string] =
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
return err("Error in multiaddress conversion to bytes: " & error)
let
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
return ok(addrBytes & mixPubKeyBytes & libp2pPubKeyBytes)
proc deserialize*(T: typedesc[MixPubInfo], data: openArray[byte]): Result[T, string] =
if len(data) != MixPubInfoSize:
return
err("Serialized mix public info must be exactly " & $MixPubInfoSize & " bytes")
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
return err("Error in bytes to multiaddress conversion: " & error)
let mixPubKey = bytesToFieldElement(
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
).valueOr:
return err("Mix public key deserialize error: " & error)
let libp2pPubKey = SkPublicKey.init(data[(AddrSize + FieldElementSize) ..^ 1]).valueOr:
return err("Failed to initialize libp2p public key: ")
ok(
MixPubInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
libp2pPubKey: libp2pPubKey,
)
)
proc writeToFile*(
node: MixPubInfo, index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[void, string] =
if not dirExists(pubInfoFolderPath):
createDir(pubInfoFolderPath)
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
var file = newFileStream(filename, fmWrite)
if file == nil:
return err("Failed to create file stream for " & filename)
defer:
file.close()
let serializedData = node.serialize().valueOr:
return err("Failed to serialize mix pub info: " & error)
file.writeData(unsafeAddr serializedData[0], serializedData.len)
return ok()
proc readFromFile*(
T: typedesc[MixPubInfo], index: int, pubInfoFolderPath: string = "./pubInfo"
): Result[T, string] =
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
if not fileExists(filename):
return err("File does not exist")
var file = newFileStream(filename, fmRead)
if file == nil:
return err(
"Failed to open file: " & filename &
". Check permissions or if the path is correct."
)
defer:
file.close()
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
if data.len != MixPubInfoSize:
return err(
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
" bytes, but got " & $(data.len) & " bytes."
)
let dMixPubInfo = MixPubInfo.deserialize(cast[seq[byte]](data)).valueOr:
return err("Mix pub info deserialize error: " & error)
return ok(dMixPubInfo)
proc deletePubInfoFolder*(pubInfoFolderPath: string = "./pubInfo") =
## Deletes the folder containing serialized public mix node info
## and all files inside it, if the folder exists.
if dirExists(pubInfoFolderPath):
removeDir(pubInfoFolderPath)
type MixNodes* = seq[MixNodeInfo]
proc getMixPubInfoByIndex*(self: MixNodes, index: int): Result[MixPubInfo, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
ok(
MixPubInfo(
peerId: self[index].peerId,
multiAddr: self[index].multiAddr,
mixPubKey: self[index].mixPubKey,
libp2pPubKey: self[index].libp2pPubKey,
)
)
proc generateMixNodes(
count: int, basePort: int = 4242, rng: ref HmacDrbgContext = newRng()
): Result[MixNodes, string] =
var nodes = newSeq[MixNodeInfo](count)
for i in 0 ..< count:
let keyPairResult = generateKeyPair()
if keyPairResult.isErr:
return err("Generate key pair error: " & $keyPairResult.error)
let (mixPrivKey, mixPubKey) = keyPairResult.get()
let
rng = newRng()
keyPair = SkKeyPair.random(rng[])
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: keyPair.pubkey)
peerId = PeerId.init(pubKeyProto).get()
multiAddr =
?MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{basePort + i}").tryGet().catch().mapErr(
x => x.msg
)
nodes[i] = MixNodeInfo(
peerId: peerId,
multiAddr: multiAddr,
mixPubKey: mixPubKey,
mixPrivKey: mixPrivKey,
libp2pPubKey: keyPair.pubkey,
libp2pPrivKey: keyPair.seckey,
)
ok(nodes)
proc initializeMixNodes*(count: int, basePort: int = 4242): Result[MixNodes, string] =
## Creates and initializes a set of mix nodes
let mixNodes = generateMixNodes(count, basePort).valueOr:
return err("Mix node initialization error: " & error)
return ok(mixNodes)
proc findByPeerId*(self: MixNodes, peerId: PeerId): Result[MixNodeInfo, string] =
let filteredNodes = self.filterIt(it.peerId == peerId)
if filteredNodes.len != 0:
return ok(filteredNodes[0])
return err("No node with peer id: " & $peerId)
proc initMixMultiAddrByIndex*(
self: var MixNodes, index: int, peerId: PeerId, multiAddr: MultiAddress
): Result[void, string] =
if index < 0 or index >= self.len:
return err("Index must be between 0 and " & $(self.len))
self[index].multiAddr = multiAddr
self[index].peerId = peerId
ok()

View File

@@ -0,0 +1,392 @@
import chronicles, chronos, sequtils, strutils, os, results
import std/[strformat, tables], metrics
import
./[
curve25519, fragmentation, mix_message, mix_node, sphinx, serialization,
tag_manager, mix_metrics, exit_layer, multiaddr,
]
import stew/endians2
import ../protocol
import ../../stream/[connection, lpstream]
import ../../[switch, multicodec, peerinfo]
const MixProtocolID* = "/mix/1.0.0"
## Mix Protocol defines a decentralized anonymous message routing layer for libp2p networks.
## It enables sender anonymity by routing each message through a decentralized mix overlay
## network composed of participating libp2p nodes, known as mix nodes. Each message is
## routed independently in a stateless manner, allowing other libp2p protocols to selectively
## anonymize messages without modifying their core protocol behavior.
type MixProtocol* = ref object of LPProtocol
mixNodeInfo: MixNodeInfo
pubNodeInfo: Table[PeerId, MixPubInfo]
switch: Switch
tagManager: TagManager
exitLayer: ExitLayer
rng: ref HmacDrbgContext
proc loadAllButIndexMixPubInfo*(
index, numNodes: int, pubInfoFolderPath: string = "./pubInfo"
): Result[Table[PeerId, MixPubInfo], string] =
var pubInfoTable = initTable[PeerId, MixPubInfo]()
for i in 0 ..< numNodes:
if i == index:
continue
let pubInfo = MixPubInfo.readFromFile(i, pubInfoFolderPath).valueOr:
return err("Failed to load pub info from file: " & error)
pubInfoTable[pubInfo.peerId] = pubInfo
return ok(pubInfoTable)
proc cryptoRandomInt(rng: ref HmacDrbgContext, max: int): Result[int, string] =
if max == 0:
return err("Max cannot be zero.")
let res = rng[].generate(uint64) mod uint64(max)
ok(res.int)
proc handleMixNodeConnection(
mixProto: MixProtocol, conn: Connection
) {.async: (raises: [LPStreamError, CancelledError]).} =
let receivedBytes =
try:
await conn.readLp(PacketSize)
except CancelledError as exc:
raise exc
finally:
await conn.close()
if receivedBytes.len == 0:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "NO_DATA"])
return # No data, end of stream
# Process the packet
let (peerId, multiAddr, _, mixPrivKey, _, _) = mixProto.mixNodeInfo.get()
let sphinxPacket = SphinxPacket.deserialize(receivedBytes).valueOr:
error "Sphinx packet deserialization error", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
let processedSP = processSphinxPacket(sphinxPacket, mixPrivKey, mixProto.tagManager).valueOr:
error "Failed to process Sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
return
case processedSP.status
of Exit:
mix_messages_recvd.inc(labelValues = ["Exit"])
# This is the exit node, forward to destination
let msgChunk = MessageChunk.deserialize(processedSP.messageChunk).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let unpaddedMsg = msgChunk.removePadding().valueOr:
error "Unpadding message failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
let deserialized = MixMessage.deserialize(unpaddedMsg).valueOr:
error "Deserialization failed", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
return
if processedSP.destination == Hop():
error "no destination available"
mix_messages_error.inc(labelValues = ["Exit", "NO_DESTINATION"])
return
let destBytes = processedSP.destination.get()
let (destPeerId, destAddr) = bytesToMultiAddr(destBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Exit", "INVALID_DEST"])
return
trace "Exit node - Received mix message",
peerId,
message = deserialized.message,
codec = deserialized.codec,
to = destPeerId
await mixProto.exitLayer.onMessage(
deserialized.codec, deserialized.message, destAddr, destPeerId
)
mix_messages_forwarded.inc(labelValues = ["Exit"])
of Reply:
# TODO: implement
discard
of Intermediate:
trace "# Intermediate: ", peerId, multiAddr
# Add delay
mix_messages_recvd.inc(labelValues = ["Intermediate"])
await sleepAsync(milliseconds(processedSP.delayMs))
# Forward to next hop
let nextHopBytes = processedSP.nextHop.get()
let (nextPeerId, nextAddr) = bytesToMultiAddr(nextHopBytes).valueOr:
error "Failed to convert bytes to multiaddress", err = error
mix_messages_error.inc(labelValues = ["Intermediate", "INVALID_DEST"])
return
try:
let nextHopConn =
await mixProto.switch.dial(nextPeerId, @[nextAddr], MixProtocolID)
defer:
await nextHopConn.close()
await nextHopConn.writeLp(processedSP.serializedSphinxPacket)
mix_messages_forwarded.inc(labelValues = ["Intermediate"])
except CancelledError as exc:
raise exc
except DialFailedError as exc:
error "Failed to dial next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ", err = exc.msg
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
of Duplicate:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "DUPLICATE"])
of InvalidMAC:
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_MAC"])
proc getMaxMessageSizeForCodec*(
codec: string, numberOfSurbs: uint8 = 0
): Result[int, string] =
## Computes the maximum payload size (in bytes) available for a message when encoded
## with the given `codec`
## Returns an error if the codec length would cause it to exceeds the data capacity.
let serializedMsg = MixMessage.init(@[], codec).serialize()
if serializedMsg.len > DataSize:
return err("cannot encode messages for this codec")
return ok(DataSize - serializedMsg.len)
proc sendPacket(
mixProto: MixProtocol,
multiAddrs: MultiAddress,
sphinxPacket: seq[byte],
label: string,
) {.async: (raises: [CancelledError]).} =
## Send the wrapped message to the first mix node in the selected path
let (firstMixPeerId, firstMixAddr) = parseFullAddress(multiAddrs).valueOr:
error "Invalid multiaddress", err = error
mix_messages_error.inc(labelValues = [label, "NON_RECOVERABLE"])
return
try:
let nextHopConn =
await mixProto.switch.dial(firstMixPeerId, @[firstMixAddr], @[MixProtocolID])
defer:
await nextHopConn.close()
await nextHopConn.writeLp(sphinxPacket)
except DialFailedError as exc:
error "Failed to dial next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except LPStreamError as exc:
error "Failed to write to next hop: ",
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
except CancelledError as exc:
raise exc
mix_messages_forwarded.inc(labelValues = ["Entry"])
proc buildMessage(
msg: seq[byte], codec: string, peerId: PeerId
): Result[Message, (string, string)] =
let
mixMsg = MixMessage.init(msg, codec)
serialized = mixMsg.serialize()
if serialized.len > DataSize:
return err(("message size exceeds maximum payload size", "INVALID_SIZE"))
let
paddedMsg = addPadding(serialized, peerId)
serializedMsgChunk = paddedMsg.serialize()
ok(serializedMsgChunk)
## Represents the final target of a mixnet message.
## contains the peer id and multiaddress of the destination node.
type MixDestination* = object
peerId: PeerId
address: MultiAddress
proc init*(T: typedesc[MixDestination], peerId: PeerId, address: MultiAddress): T =
## Initializes a destination object with the given peer id and multiaddress.
T(peerId: peerId, address: address)
proc `$`*(d: MixDestination): string =
$d.address & "/p2p/" & $d.peerId
proc anonymizeLocalProtocolSend*(
mixProto: MixProtocol,
incoming: AsyncQueue[seq[byte]],
msg: seq[byte],
codec: string,
destination: MixDestination,
numSurbs: uint8,
) {.async: (raises: [CancelledError, LPStreamError]).} =
mix_messages_recvd.inc(labelValues = ["Entry"])
var
multiAddrs: seq[MultiAddress] = @[]
publicKeys: seq[FieldElement] = @[]
hop: seq[Hop] = @[]
delay: seq[seq[byte]] = @[]
exitPeerId: PeerId
# Select L mix nodes at random
let numMixNodes = mixProto.pubNodeInfo.len
var numAvailableNodes = numMixNodes
debug "Destination data", destination
if mixProto.pubNodeInfo.hasKey(destination.peerId):
numAvailableNodes = numMixNodes - 1
if numAvailableNodes < PathLength:
error "No. of public mix nodes less than path length.",
numMixNodes = numAvailableNodes, pathLength = PathLength
mix_messages_error.inc(labelValues = ["Entry", "LOW_MIX_POOL"])
return
# Skip the destination peer
var pubNodeInfoKeys =
mixProto.pubNodeInfo.keys.toSeq().filterIt(it != destination.peerId)
var availableIndices = toSeq(0 ..< pubNodeInfoKeys.len)
var i = 0
while i < PathLength:
let randomIndexPosition = cryptoRandomInt(mixProto.rng, availableIndices.len).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
let selectedIndex = availableIndices[randomIndexPosition]
let randPeerId = pubNodeInfoKeys[selectedIndex]
availableIndices.del(randomIndexPosition)
# Last hop will be the exit node that will forward the request
if i == PathLength - 1:
exitPeerId = randPeerId
debug "Selected mix node: ", indexInPath = i, peerId = randPeerId
# Extract multiaddress, mix public key, and hop
let (peerId, multiAddr, mixPubKey, _) =
mixProto.pubNodeInfo.getOrDefault(randPeerId).get()
multiAddrs.add(multiAddr)
publicKeys.add(mixPubKey)
let multiAddrBytes = multiAddrToBytes(peerId, multiAddr).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_MIX_INFO"])
#TODO: should we skip and pick a different node here??
return
hop.add(Hop.init(multiAddrBytes))
# Compute delay
let delayMillisec =
if i != PathLength - 1:
cryptoRandomInt(mixProto.rng, 3).valueOr:
error "Failed to generate random number", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
else:
0 # Last hop does not require a delay
delay.add(@(delayMillisec.uint16.toBytesBE()))
i = i + 1
#Encode destination
let destAddrBytes = multiAddrToBytes(destination.peerId, destination.address).valueOr:
error "Failed to convert multiaddress to bytes", err = error
mix_messages_error.inc(labelValues = ["Entry", "INVALID_DEST"])
return
let destHop = Hop.init(destAddrBytes)
let message = buildMessage(msg, codec, mixProto.mixNodeInfo.peerId).valueOr:
error "Error building message", err = error[0]
mix_messages_error.inc(labelValues = ["Entry", error[1]])
return
# Wrap in Sphinx packet
let sphinxPacket = wrapInSphinxPacket(message, publicKeys, delay, hop, destHop).valueOr:
error "Failed to wrap in sphinx packet", err = error
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
return
# Send the wrapped message to the first mix node in the selected path
await mixProto.sendPacket(multiAddrs[0], sphinxPacket, "Entry")
proc init*(
mixProto: MixProtocol,
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
) =
mixProto.mixNodeInfo = mixNodeInfo
mixProto.pubNodeInfo = pubNodeInfo
mixProto.switch = switch
mixProto.tagManager = tagManager
mixProto.exitLayer = ExitLayer.init(switch)
mixProto.codecs = @[MixProtocolID]
mixProto.rng = rng
mixProto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
await mixProto.handleMixNodeConnection(conn)
except LPStreamError as e:
debug "Stream error", conn = conn, err = e.msg
proc new*(
T: typedesc[MixProtocol],
mixNodeInfo: MixNodeInfo,
pubNodeInfo: Table[PeerId, MixPubInfo],
switch: Switch,
tagManager: TagManager = TagManager.new(),
rng: ref HmacDrbgContext = newRng(),
): T =
let mixProto = new(T)
mixProto.init(mixNodeInfo, pubNodeInfo, switch)
mixProto
proc new*(
T: typedesc[MixProtocol],
index, numNodes: int,
switch: Switch,
nodeFolderInfoPath: string = ".",
rng: ref HmacDrbgContext = newRng(),
): Result[T, string] =
## Constructs a new `MixProtocol` instance for the mix node at `index`,
## loading its private info from `nodeInfo` and the public info of all other nodes from `pubInfo`.
let mixNodeInfo = MixNodeInfo.readFromFile(index, nodeFolderInfoPath / fmt"nodeInfo").valueOr:
return err("Failed to load mix node info for index " & $index & " - err: " & error)
let pubNodeInfo = loadAllButIndexMixPubInfo(
index, numNodes, nodeFolderInfoPath / fmt"pubInfo"
).valueOr:
return err("Failed to load mix pub info for index " & $index & " - err: " & error)
let mixProto =
MixProtocol.new(mixNodeInfo, pubNodeInfo, switch, TagManager.new(), rng)
return ok(mixProto)
proc setNodePool*(
mixProtocol: MixProtocol, mixNodeTable: Table[PeerId, MixPubInfo]
) {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo = mixNodeTable
proc getNodePoolSize*(mixProtocol: MixProtocol): int {.gcsafe, raises: [].} =
mixProtocol.pubNodeInfo.len

View File

@@ -0,0 +1,95 @@
import results, sugar, sequtils, strutils
import ./serialization
import stew/endians2
import ../../[multicodec, multiaddress, peerid]
const
PeerIdByteLen = 39 # ed25519 and secp256k1 multihash length
MinMultiAddrComponentLen = 2
MaxMultiAddrComponentLen = 5 # quic + circuit relay
# TODO: Add support for ipv6, dns, dns4, ws/wss/sni support
proc multiAddrToBytes*(
peerId: PeerId, multiAddr: MultiAddress
): Result[seq[byte], string] {.raises: [].} =
var ma = multiAddr
let sma = multiAddr.items().toSeq()
var res: seq[byte] = @[]
if not (sma.len >= MinMultiAddrComponentLen and sma.len <= MaxMultiAddrComponentLen):
return err("Invalid multiaddress format")
# Only IPV4 is supported
let isCircuitRelay = ?ma.contains(multiCodec("p2p-circuit"))
let baseP2PEndIdx = if isCircuitRelay: 3 else: 1
let baseAddr =
try:
if sma.len - 1 - baseP2PEndIdx < 0:
return err("Invalid multiaddress format")
sma[0 .. sma.len - baseP2PEndIdx].mapIt(it.tryGet()).foldl(a & b)
except LPError as exc:
return err("Could not obtain base address: " & exc.msg)
let isQuic = QUIC_V1_IP.match(baseAddr)
let isTCP = TCP_IP.match(baseAddr)
if not (isTCP or isQuic):
return err("Unsupported protocol")
# 4 bytes for the IP
let ip = ?ma.getPart(multiCodec("ip4")).value().protoArgument()
res.add(ip)
var port: string
if isQuic:
res.add(1.byte) # Protocol byte
let udpPortPart = ma.getPart(multiCodec("udp")).value()
port = $udpPortPart
elif isTCP:
res.add(0.byte) # Protocol byte
let tcpPortPart = ma.getPart(multiCodec("tcp")).value()
port = $tcpPortPart
let portNum = ?catch(port.split('/')[2].parseInt()).mapErr(x => x.msg)
res.add(portNum.uint16.toBytesBE())
if isCircuitRelay:
let relayIdPart = ?ma.getPart(multiCodec("p2p"))
let relayId = ?PeerId.init(?relayIdPart.protoArgument()).mapErr(x => $x)
if relayId.data.len != PeerIdByteLen:
return err("unsupported PeerId key type")
res.add(relayId.data)
# PeerID (39 bytes)
res.add(peerId.data)
if res.len > AddrSize:
return err("Address must be <= " & $AddrSize & " bytes")
return ok(res & newSeq[byte](AddrSize - res.len))
proc bytesToMultiAddr*(bytes: openArray[byte]): MaResult[(PeerId, MultiAddress)] =
if bytes.len != AddrSize:
return err("Address must be exactly " & $AddrSize & " bytes")
let
ip = bytes[0 .. 3].mapIt($it).join(".")
protocol = if bytes[4] == 0: "tcp" else: "udp"
quic = if bytes[4] == 1: "/quic-v1" else: ""
port = uint16.fromBytesBE(bytes[5 .. 6])
# peerId1 represents the circuit relay server addr if p2p-circuit addr, otherwise it's the node's actual peerId
peerId1Bytes = bytes[7 ..< 46]
peerId2Bytes = bytes[7 + PeerIdByteLen ..< 7 + (PeerIdByteLen * 2)]
let ma = ?MultiAddress.init("/ip4/" & ip & "/" & protocol & "/" & $port & quic)
return
if peerId2Bytes != newSeq[byte](PeerIdByteLen):
# Has circuit relay address
let relayIdMa = ?MultiAddress.init(multiCodec("p2p"), peerId1Bytes)
let p2pCircuitMa = ?MultiAddress.init(multiCodec("p2p-circuit"))
let peerId = ?PeerId.init(peerId2Bytes).mapErr(x => $x)
ok((peerId, ?(ma & relayIdMa & p2pCircuitMa).catch().mapErr(x => x.msg)))
else:
let peerId = ?PeerId.init(peerId1Bytes).mapErr(x => $x)
ok((peerId, ma))

View File

@@ -0,0 +1,29 @@
import std/endians, times
import ../../peerid
import ./crypto
import ../../utils/sequninit
type SeqNo* = uint32
proc init*(T: typedesc[SeqNo], data: seq[byte]): T =
var seqNo: SeqNo = 0
let hash = sha256_hash(data)
for i in 0 .. 3:
seqNo = seqNo or (uint32(hash[i]) shl (8 * (3 - i)))
return seqNo
proc init*(T: typedesc[SeqNo], peerId: PeerId): T =
SeqNo.init(peerId.data)
proc generate*(seqNo: var SeqNo, messageBytes: seq[byte]) =
let
currentTime = getTime().toUnix() * 1000
currentTimeBytes = newSeqUninit[byte](8)
bigEndian64(unsafeAddr currentTimeBytes[0], unsafeAddr currentTime)
let s = SeqNo.init(messageBytes & currentTimeBytes)
seqNo = (seqNo + s) mod high(uint32)
proc inc*(seqNo: var SeqNo) =
seqNo = (seqNo + 1) mod high(uint32)
# TODO: Manage sequence no. overflow in a way that it does not affect re-assembly

View File

@@ -0,0 +1,236 @@
import results
import std/sequtils
import ../../utility
const
k* = 16 # Security parameter
r* = 5 # Maximum path length
t* = 6 # t.k - combined length of next hop address and delay
AlphaSize* = 32 # Group element
BetaSize* = ((r * (t + 1)) + 1) * k # bytes
GammaSize* = 16 # Output of HMAC-SHA-256, truncated to 16 bytes
HeaderSize* = AlphaSize + BetaSize + GammaSize # Total header size
DelaySize* = 2 # Delay size
AddrSize* = (t * k) - DelaySize # Address size
PacketSize* = 4608 # Total packet size (from spec)
MessageSize* = PacketSize - HeaderSize - k # Size of the message itself
PayloadSize* = MessageSize + k # Total payload size
SurbSize* = HeaderSize + k + AddrSize
# Size of a surb packet inside the message payload
SurbLenSize* = 1 # Size of the field storing the number of surbs
SurbIdLen* = k # Size of the identifier used when sending a message with surb
DefaultSurbs* = uint8(4) # Default number of SURBs to send
type Header* = object
Alpha*: seq[byte]
Beta*: seq[byte]
Gamma*: seq[byte]
proc init*(
T: typedesc[Header], alpha: seq[byte], beta: seq[byte], gamma: seq[byte]
): T =
return T(Alpha: alpha, Beta: beta, Gamma: gamma)
proc get*(header: Header): (seq[byte], seq[byte], seq[byte]) =
(header.Alpha, header.Beta, header.Gamma)
proc serialize*(header: Header): seq[byte] =
doAssert header.Alpha.len == AlphaSize,
"Alpha must be exactly " & $AlphaSize & " bytes"
doAssert header.Beta.len == BetaSize, "Beta must be exactly " & $BetaSize & " bytes"
doAssert header.Gamma.len == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
return header.Alpha & header.Beta & header.Gamma
proc deserialize*(
T: typedesc[Header], serializedHeader: openArray[byte]
): Result[T, string] =
if len(serializedHeader) < HeaderSize:
return err("Serialized header must be exactly " & $HeaderSize & " bytes")
let header = Header(
Alpha: serializedHeader[0 .. (AlphaSize - 1)],
Beta: serializedHeader[AlphaSize .. (AlphaSize + BetaSize - 1)],
Gamma: serializedHeader[(AlphaSize + BetaSize) .. (HeaderSize - 1)],
)
ok(header)
type Message* = seq[byte]
proc serialize*(message: Message): seq[byte] =
doAssert message.len() == MessageSize,
"Message must be exactly " & $(MessageSize) & " bytes"
var res = newSeq[byte](k) # Prepend k bytes of zero padding
res.add(message)
return res
proc deserialize*(
T: typedesc[Message], serializedMessage: openArray[byte]
): Result[T, string] =
if len(serializedMessage) != PayloadSize:
return err("Serialized message must be exactly " & $PayloadSize & " bytes")
return ok(serializedMessage[k ..^ 1])
type Hop* = object
MultiAddress: seq[byte]
proc init*(T: typedesc[Hop], multiAddress: seq[byte]): T =
T(
MultiAddress:
if multiAddress == newSeq[byte](AddrSize):
@[]
else:
multiAddress
)
proc get*(hop: Hop): seq[byte] =
return hop.MultiAddress
proc serialize*(hop: Hop): seq[byte] =
if hop.MultiAddress.len == 0:
return newSeq[byte](AddrSize)
doAssert len(hop.MultiAddress) == AddrSize,
"MultiAddress must be exactly " & $AddrSize & " bytes"
proc deserialize*(T: typedesc[Hop], data: openArray[byte]): Result[T, string] =
if len(data) != AddrSize:
return err("MultiAddress must be exactly " & $AddrSize & " bytes")
ok(
T(
MultiAddress:
if data == newSeq[byte](AddrSize):
@[]
else:
@data
)
)
type RoutingInfo* = object
Addr: Hop
Delay: seq[byte]
Gamma: seq[byte]
Beta: seq[byte]
proc init*(
T: typedesc[RoutingInfo],
address: Hop,
delay: seq[byte],
gamma: seq[byte],
beta: seq[byte],
): T =
return T(Addr: address, Delay: delay, Gamma: gamma, Beta: beta)
proc getRoutingInfo*(info: RoutingInfo): (Hop, seq[byte], seq[byte], seq[byte]) =
(info.Addr, info.Delay, info.Gamma, info.Beta)
proc serialize*(info: RoutingInfo): seq[byte] =
doAssert info.Delay.len() == DelaySize,
"Delay must be exactly " & $DelaySize & " bytes"
doAssert info.Gamma.len() == GammaSize,
"Gamma must be exactly " & $GammaSize & " bytes"
let expectedBetaLen = ((r * (t + 1)) - t) * k
doAssert info.Beta.len() == expectedBetaLen,
"Beta must be exactly " & $expectedBetaLen & " bytes"
let addrBytes = info.Addr.serialize()
return addrBytes & info.Delay & info.Gamma & info.Beta
proc readBytes(
data: openArray[byte], offset: var int, readSize: Opt[int] = Opt.none(int)
): Result[seq[byte], string] =
if data.len < offset:
return err("not enough data")
readSize.withValue(size):
if data.len < offset + size:
return err("not enough data")
let slice = data[offset ..< offset + size]
offset += size
return ok(slice)
let slice = data[offset .. ^1]
offset = data.len
return ok(slice)
proc deserialize*(T: typedesc[RoutingInfo], data: openArray[byte]): Result[T, string] =
if len(data) != BetaSize + ((t + 1) * k):
return err("Data must be exactly " & $(BetaSize + ((t + 1) * k)) & " bytes")
let hop = Hop.deserialize(data[0 .. AddrSize - 1]).valueOr:
return err("Deserialize hop error: " & error)
var offset: int = AddrSize
return ok(
RoutingInfo(
Addr: hop,
Delay: ?data.readBytes(offset, Opt.some(DelaySize)),
Gamma: ?data.readBytes(offset, Opt.some(GammaSize)),
Beta: ?data.readBytes(offset, Opt.some(BetaSize)),
)
)
type SphinxPacket* = object
Hdr*: Header
Payload*: seq[byte]
proc init*(T: typedesc[SphinxPacket], header: Header, payload: seq[byte]): T =
T(Hdr: header, Payload: payload)
proc get*(packet: SphinxPacket): (Header, seq[byte]) =
(packet.Hdr, packet.Payload)
proc serialize*(packet: SphinxPacket): seq[byte] =
let headerBytes = packet.Hdr.serialize()
return headerBytes & packet.Payload
proc deserialize*(T: typedesc[SphinxPacket], data: openArray[byte]): Result[T, string] =
if len(data) != PacketSize:
return err("Sphinx packet size must be exactly " & $PacketSize & " bytes")
let header = ?Header.deserialize(data)
return ok(SphinxPacket(Hdr: header, Payload: data[HeaderSize ..^ 1]))
type
Secret* = seq[seq[byte]]
Key* = seq[byte]
SURBIdentifier* = array[SurbIdLen, byte]
SURB* = object
hop*: Hop
header*: Header
key*: Key
secret*: Opt[Secret]
proc serializeMessageWithSURBs*(
msg: seq[byte], surbs: seq[SURB]
): Result[seq[byte], string] =
if surbs.len > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
let surbBytes =
surbs.mapIt(it.hop.serialize() & it.header.serialize() & it.key).concat()
ok(byte(surbs.len) & surbBytes & msg)
proc extractSURBs*(msg: seq[byte]): Result[(seq[SURB], seq[byte]), string] =
var offset = 0
let surbsLenBytes = ?readBytes(msg, offset, Opt.some(1))
let surbsLen = int(surbsLenBytes[0])
if surbsLen > (MessageSize - SurbLenSize - 1) div SurbSize:
return err("too many SURBs")
var surbs: seq[SURB] = newSeq[SURB](surbsLen)
for i in 0 ..< surbsLen:
let hopBytes = ?readBytes(msg, offset, Opt.some(AddrSize))
let headerBytes = ?readBytes(msg, offset, Opt.some(HeaderSize))
surbs[i].hop = ?Hop.deserialize(hopBytes)
surbs[i].header = ?Header.deserialize(headerBytes)
surbs[i].key = ?readBytes(msg, offset, Opt.some(k))
let msg = ?readBytes(msg, offset)
return ok((surbs, msg))

View File

@@ -0,0 +1,371 @@
import results, sequtils, stew/endians2
import ./[crypto, curve25519, serialization, tag_manager]
import ../../crypto/crypto
import ../../utils/sequninit
const PathLength* = 3 # Path length (L)
const PaddingLength = (((t + 1) * (r - PathLength)) + 1) * k
type ProcessingStatus* = enum
Exit
Intermediate
Reply
Duplicate
InvalidMAC
proc computeAlpha(
publicKeys: openArray[FieldElement]
): Result[(seq[byte], seq[seq[byte]]), string] =
## Compute alpha, an ephemeral public value. Each mix node uses its private key and
## alpha to derive a shared session key for that hop.
## This session key is used to decrypt and process one layer of the packet.
if publicKeys.len == 0:
return err("No public keys provided")
var
s: seq[seq[byte]] = newSeq[seq[byte]](publicKeys.len)
alpha_0: seq[byte]
alpha: FieldElement
secret: FieldElement
blinders: seq[FieldElement] = @[]
let x = generateRandomFieldElement().valueOr:
return err("Generate field element error: " & error)
blinders.add(x)
for i in 0 ..< publicKeys.len:
if publicKeys[i].len != FieldElementSize:
return err("Invalid public key size: " & $i)
# Compute alpha, shared secret, and blinder
if i == 0:
alpha = multiplyBasePointWithScalars([blinders[i]]).valueOr:
return err("Multiply base point with scalars error: " & error)
alpha_0 = fieldElementToBytes(alpha)
else:
alpha = multiplyPointWithScalars(alpha, [blinders[i]])
# TODO: Optimize point multiplication by multiplying scalars first
secret = multiplyPointWithScalars(publicKeys[i], blinders)
let blinder = bytesToFieldElement(
sha256_hash(fieldElementToBytes(alpha) & fieldElementToBytes(secret))
).valueOr:
return err("Error in bytes to field element conversion: " & error)
blinders.add(blinder)
s[i] = fieldElementToBytes(secret)
return ok((alpha_0, s))
proc deriveKeyMaterial(keyName: string, s: seq[byte]): seq[byte] =
@(keyName.toOpenArrayByte(0, keyName.high)) & s
proc computeFillerStrings(s: seq[seq[byte]]): Result[seq[byte], string] =
var filler: seq[byte] = @[] # Start with an empty filler string
for i in 1 ..< s.len:
# Derive AES key and IV
let
aes_key = deriveKeyMaterial("aes_key", s[i - 1]).kdf()
iv = deriveKeyMaterial("iv", s[i - 1]).kdf()
# Compute filler string
let
fillerLength = (t + 1) * k
zeroPadding = newSeq[byte](fillerLength)
filler = aes_ctr_start_index(
aes_key, iv, filler & zeroPadding, (((t + 1) * (r - i)) + t + 2) * k
)
return ok(filler)
proc computeBetaGamma(
s: seq[seq[byte]],
hop: openArray[Hop],
delay: openArray[seq[byte]],
destHop: Hop,
id: SURBIdentifier,
): Result[tuple[beta: seq[byte], gamma: seq[byte]], string] =
## Calculates the following elements:
## - Beta: The nested encrypted routing information. It encodes the next hop address, the forwarding delay, integrity check Gamma for the next hop, and the Beta for subsequent hops.
## - Gamma: A message authentication code computed over Beta using the session key derived from Alpha. It ensures header integrity at each hop.
let sLen = s.len
var
beta: seq[byte]
gamma: seq[byte]
# Compute filler strings
let filler = computeFillerStrings(s).valueOr:
return err("Error in filler generation: " & error)
for i in countdown(sLen - 1, 0):
# Derive AES key, MAC key, and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", s[i]).kdf()
mac_key = deriveKeyMaterial("mac_key", s[i]).kdf()
beta_iv = deriveKeyMaterial("iv", s[i]).kdf()
# Compute Beta and Gamma
if i == sLen - 1:
let destBytes = destHop.serialize()
let destPadding = destBytes & delay[i] & @id & newSeq[byte](PaddingLength)
let aes = aes_ctr(beta_aes_key, beta_iv, destPadding)
beta = aes & filler
else:
let routingInfo = RoutingInfo.init(
hop[i + 1], delay[i], gamma, beta[0 .. (((r * (t + 1)) - t) * k) - 1]
)
let serializedRoutingInfo = routingInfo.serialize()
beta = aes_ctr(beta_aes_key, beta_iv, serializedRoutingInfo)
gamma = hmac(mac_key, beta).toSeq()
return ok((beta: beta, gamma: gamma))
proc computeDelta(s: seq[seq[byte]], msg: Message): Result[seq[byte], string] =
let sLen = s.len
var delta: seq[byte]
for i in countdown(sLen - 1, 0):
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", s[i]).kdf()
delta_iv = deriveKeyMaterial("delta_iv", s[i]).kdf()
# Compute Delta
if i == sLen - 1:
let serializedMsg = msg.serialize()
delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
else:
delta = aes_ctr(delta_aes_key, delta_iv, delta)
return ok(delta)
proc createSURB*(
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hops: openArray[Hop],
id: SURBIdentifier,
rng: ref HmacDrbgContext = newRng(),
): Result[SURB, string] =
if id == default(SURBIdentifier):
return err("id should be initialized")
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(s, hops, delay, Hop(), id).valueOr:
return err("Error in beta and gamma generation: " & error)
# Generate key
var key = newSeqUninit[byte](k)
rng[].generate(key)
return ok(
SURB(
hop: hops[0],
header: Header.init(alpha_0, beta_0, gamma_0),
secret: Opt.some(s),
key: key,
)
)
proc useSURB*(surb: SURB, msg: Message): SphinxPacket =
# Derive AES key and IV
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", surb.key).kdf()
delta_iv = deriveKeyMaterial("delta_iv", surb.key).kdf()
# Compute Delta
let serializedMsg = msg.serialize()
let delta = aes_ctr(delta_aes_key, delta_iv, serializedMsg)
return SphinxPacket.init(surb.header, delta)
proc processReply*(
key: seq[byte], s: seq[seq[byte]], delta_prime: seq[byte]
): Result[seq[byte], string] =
var delta = delta_prime[0 ..^ 1]
var key_prime = key
for i in 0 .. s.len:
if i != 0:
key_prime = s[i - 1]
let
delta_aes_key = deriveKeyMaterial("delta_aes_key", key_prime).kdf()
delta_iv = deriveKeyMaterial("delta_iv", key_prime).kdf()
delta = aes_ctr(delta_aes_key, delta_iv, delta)
let deserializeMsg = Message.deserialize(delta).valueOr:
return err("Message deserialization error: " & error)
return ok(deserializeMsg)
proc wrapInSphinxPacket*(
msg: Message,
publicKeys: openArray[FieldElement],
delay: openArray[seq[byte]],
hop: openArray[Hop],
destHop: Hop,
): Result[seq[byte], string] =
# Compute alpha and shared secrets
let (alpha_0, s) = computeAlpha(publicKeys).valueOr:
return err("Error in alpha generation: " & error)
# Compute beta and gamma
let (beta_0, gamma_0) = computeBetaGamma(
s, hop, delay, destHop, default(SURBIdentifier)
).valueOr:
return err("Error in beta and gamma generation: " & error)
# Compute delta
let delta_0 = computeDelta(s, msg).valueOr:
return err("Error in delta generation: " & error)
# Serialize sphinx packet
let sphinxPacket = SphinxPacket.init(Header.init(alpha_0, beta_0, gamma_0), delta_0)
let serialized = sphinxPacket.serialize()
return ok(serialized)
type ProcessedSphinxPacket* = object
case status*: ProcessingStatus
of ProcessingStatus.Exit:
destination*: Hop
messageChunk*: seq[byte]
of ProcessingStatus.Intermediate:
nextHop*: Hop
delayMs*: int
serializedSphinxPacket*: seq[byte]
of ProcessingStatus.Reply:
id*: SURBIdentifier
delta_prime*: seq[byte]
else:
discard
proc isZeros(data: seq[byte], startIdx: int, endIdx: int): bool =
doAssert 0 <= startIdx and endIdx < data.len and startIdx <= endIdx
for i in startIdx .. endIdx:
if data[i] != 0:
return false
return true
template extractSurbId(data: seq[byte]): SURBIdentifier =
const startIndex = t * k
const endIndex = startIndex + SurbIdLen - 1
doAssert data.len > startIndex and endIndex < data.len
var id: SURBIdentifier
copyMem(addr id[0], addr data[startIndex], SurbIdLen)
id
proc processSphinxPacket*(
sphinxPacket: SphinxPacket, privateKey: FieldElement, tm: var TagManager
): Result[ProcessedSphinxPacket, string] =
let
(header, payload) = sphinxPacket.get()
(alpha, beta, gamma) = header.get()
# Compute shared secret
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let
s = multiplyPointWithScalars(alphaFE, [privateKey])
sBytes = fieldElementToBytes(s)
# Check if the tag has been seen
if isTagSeen(tm, s):
return ok(ProcessedSphinxPacket(status: Duplicate))
# Compute MAC
let mac_key = deriveKeyMaterial("mac_key", sBytes).kdf()
if not (hmac(mac_key, beta).toSeq() == gamma):
# If MAC not verified
return ok(ProcessedSphinxPacket(status: InvalidMAC))
# Store the tag as seen
addTag(tm, s)
# Derive AES key and IV
let
beta_aes_key = deriveKeyMaterial("aes_key", sBytes).kdf()
beta_iv = deriveKeyMaterial("iv", sBytes).kdf()
delta_aes_key = deriveKeyMaterial("delta_aes_key", sBytes).kdf()
delta_iv = deriveKeyMaterial("delta_iv", sBytes).kdf()
# Compute delta
let delta_prime = aes_ctr(delta_aes_key, delta_iv, payload)
# Compute B
let zeroPadding = newSeq[byte]((t + 1) * k)
let B = aes_ctr(beta_aes_key, beta_iv, beta & zeroPadding)
# Check if B has the required prefix for the original message
if B.isZeros((t + 1) * k, ((t + 1) * k) + PaddingLength - 1):
let hop = Hop.deserialize(B[0 .. AddrSize - 1]).valueOr:
return err(error)
if B.isZeros(AddrSize, ((t + 1) * k) - 1):
if delta_prime.isZeros(0, k - 1):
let msg = Message.deserialize(delta_prime).valueOr:
return err("Message deserialization error: " & error)
return ok(
ProcessedSphinxPacket(
status: Exit, destination: hop, messageChunk: msg[0 .. MessageSize - 1]
)
)
else:
return err("delta_prime should be all zeros")
elif B.isZeros(0, (t * k) - 1):
return ok(
ProcessedSphinxPacket(
status: Reply, id: B.extractSurbId(), delta_prime: delta_prime
)
)
else:
# Extract routing information from B
let routingInfo = RoutingInfo.deserialize(B).valueOr:
return err("Routing info deserialization error: " & error)
let (address, delay, gamma_prime, beta_prime) = routingInfo.getRoutingInfo()
# Compute alpha
let blinder = bytesToFieldElement(sha256_hash(alpha & sBytes)).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alphaFE = bytesToFieldElement(alpha).valueOr:
return err("Error in bytes to field element conversion: " & error)
let alpha_prime = multiplyPointWithScalars(alphaFE, [blinder])
# Serialize sphinx packet
let sphinxPkt = SphinxPacket.init(
Header.init(fieldElementToBytes(alpha_prime), beta_prime, gamma_prime),
delta_prime,
)
return ok(
ProcessedSphinxPacket(
status: Intermediate,
nextHop: address,
delayMs: uint16.fromBytes(delay).int,
serializedSphinxPacket: sphinxPkt.serialize(),
)
)

View File

@@ -0,0 +1,28 @@
import tables, locks
import ./curve25519
type TagManager* = ref object
lock: Lock
seenTags: Table[FieldElement, bool]
proc new*(T: typedesc[TagManager]): T =
let tm = T()
tm.seenTags = initTable[FieldElement, bool]()
initLock(tm.lock)
return tm
proc addTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags[tag] = true
proc isTagSeen*(tm: TagManager, tag: FieldElement): bool {.gcsafe.} =
withLock tm.lock:
return tm.seenTags.contains(tag)
proc removeTag*(tm: TagManager, tag: FieldElement) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.del(tag)
proc clearTags*(tm: TagManager) {.gcsafe.} =
withLock tm.lock:
tm.seenTags.clear()

View File

@@ -12,8 +12,6 @@
import chronos, chronicles, sequtils
import stew/endians2
import ./core, ../../stream/connection
when defined(libp2p_quic_support):
import ../../transports/quictransport
logScope:
topics = "libp2p perf"
@@ -59,13 +57,8 @@ proc perf*(
statsCopy.uploadBytes += toWrite.uint
p.stats = statsCopy
# Close connection after writing for TCP, but not for QUIC
when defined(libp2p_quic_support):
if not (conn of QuicStream):
await conn.close()
# For QUIC streams, don't close yet - let server manage lifecycle
else:
await conn.close()
# Close write side of the stream (half-close) to signal EOF to server
await conn.closeWrite()
size = sizeToRead
@@ -80,10 +73,8 @@ proc perf*(
statsCopy.downloadBytes += toRead.uint
p.stats = statsCopy
# Close QUIC connections after read phase
when defined(libp2p_quic_support):
if conn of QuicStream:
await conn.close()
# Close the connection after reading
await conn.close()
except CancelledError as e:
raise e
except LPStreamError as e:

View File

@@ -14,8 +14,6 @@
import chronos, chronicles
import stew/endians2
import ./core, ../protocol, ../../stream/connection, ../../utility
when defined(libp2p_quic_support):
import ../../transports/quictransport
export chronicles, connection
@@ -26,50 +24,29 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
var
sizeBuffer: array[8, byte]
size: uint64
await conn.readExactly(addr sizeBuffer[0], 8)
size = uint64.fromBytesBE(sizeBuffer)
var toReadBuffer: array[PerfSize, byte]
try:
# Different handling for QUIC vs TCP streams
when defined(libp2p_quic_support):
if conn of QuicStream:
# QUIC needs timeout-based approach to detect end of upload
while not conn.atEof:
let readFut = conn.readOnce(addr toReadBuffer[0], PerfSize)
let read = readFut.read()
if read == 0:
break
bytesRead += read
else:
# TCP streams handle EOF properly
while true:
let read = await conn.readOnce(addr toReadBuffer[0], PerfSize)
if read == 0:
break
bytesRead += read
else:
# TCP streams handle EOF properly
while true:
let read = await conn.readOnce(addr toReadBuffer[0], PerfSize)
if read == 0:
break
bytesRead += read
except CatchableError:
discard
var uploadSizeBuffer: array[8, byte]
await conn.readExactly(addr uploadSizeBuffer[0], 8)
var uploadSize = uint64.fromBytesBE(uploadSizeBuffer)
var buf: array[PerfSize, byte]
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite
var readBuffer: array[PerfSize, byte]
while not conn.atEof:
try:
let readBytes = await conn.readOnce(addr readBuffer[0], PerfSize)
if readBytes == 0:
break
except LPStreamEOFError:
break
var writeBuffer: array[PerfSize, byte]
while uploadSize > 0:
let toWrite = min(uploadSize, PerfSize)
await conn.write(writeBuffer[0 ..< toWrite])
uploadSize -= toWrite
except CancelledError as exc:
trace "cancelled perf handler"
raise exc

View File

@@ -725,9 +725,8 @@ method rpcHandler*(
continue
if (msg.signature.len > 0 or g.verifySignature) and not msg.verify():
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
debug "Dropping message due to failed signature verification", msg = msg
await g.punishInvalidMessage(peer, msg)
continue
@@ -933,11 +932,12 @@ method publish*(
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish:
if isLargeMessage(msg, msgId):
if not pubParams.skipIDontWant and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
when defined(libp2p_gossipsub_1_4):
g.sendPreamble(msg, msgId, peers)
if not pubParams.skipPreamble:
g.sendPreamble(msg, msgId, peers)
g.broadcast(
peers,

View File

@@ -146,9 +146,24 @@ type
## This callback can be used to reject topic we're not interested in
PublishParams* = object
## Used to indicate whether a message will be broadcasted using a custom connection
## defined when instantiating Pubsub, or if it will use the normal connection
useCustomConn*: bool
## Can be used to avoid having the node reply to IWANT messages when initially
## broadcasting a message, it's only after relaying its own message that the
## node will reply to IWANTs
skipMCache*: bool
## Determines whether an IDontWant message will be sent for the current message
## when it is published if it's a large message.
skipIDontWant*: bool
when defined(libp2p_gossipsub_1_4):
## Determines whether a Preamble message will be sent for the current message
## when it is published if it's a large message
skipPreamble*: bool
PubSub* {.public.} = ref object of LPProtocol
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info

View File

@@ -92,11 +92,17 @@ type
# have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
QueuedMessage = object
# Messages sent as lower-priority are queued and sent in other location
# in which we need to keep arguments like `useCustomConn`.
data: seq[byte]
useCustomConn: bool
RpcMessageQueue* = ref object
# Tracks async tasks for sending high-priority peer-published messages.
sendPriorityQueue: Deque[Future[void]]
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
nonPriorityQueue: AsyncQueue[seq[byte]]
nonPriorityQueue: AsyncQueue[QueuedMessage]
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
@@ -465,7 +471,9 @@ proc sendEncoded*(
else:
Future[void].completed()
else:
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(
QueuedMessage(data: msg, useCustomConn: useCustomConn)
)
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
f
@@ -478,19 +486,16 @@ iterator splitRPCMsg(
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
var currentRPCMsg = rpcMsg
currentRPCMsg.messages = newSeq[Message]()
var currentSize = byteSize(currentRPCMsg)
var currentRPCMsg = RPCMsg()
var currentSize = 0
for msg in rpcMsg.messages:
let msgSize = byteSize(msg)
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize):
# Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
if currentSize + msgSize > maxSize:
if msgSize > maxSize:
warn "message too big to sent", peer, rpcMsg = shortLog(msg)
continue # Skip this message
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
@@ -502,11 +507,9 @@ iterator splitRPCMsg(
currentSize += msgSize
# Check if there is a non-empty currentRPCMsg left to be added
if currentSize > 0 and currentRPCMsg.messages.len > 0:
if currentRPCMsg.messages.len > 0:
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(
p: PubSubPeer,
@@ -542,8 +545,11 @@ proc send*(
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
# Messages should not exceed 90% of maxMessageSize. Guessing 10% protobuf overhead.
let maxEncodedMsgSize = (p.maxMessageSize * 90) div 100
if encoded.len > maxEncodedMsgSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, maxEncodedMsgSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
else:
# If the message size is within limits, send it as is
@@ -573,7 +579,7 @@ proc sendNonPriorityTask(p: PubSubPeer) {.async: (raises: [CancelledError]).} =
discard await race(p.rpcmessagequeue.sendPriorityQueue[^1])
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
await p.sendMsg(msg.data, msg.useCustomConn)
proc startSendNonPriorityTask(p: PubSubPeer) =
debug "starting sendNonPriorityTask", p
@@ -595,7 +601,7 @@ proc stopSendNonPriorityTask*(p: PubSubPeer) =
proc new(T: typedesc[RpcMessageQueue]): T =
return T(
sendPriorityQueue: initDeque[Future[void]](),
nonPriorityQueue: newAsyncQueue[seq[byte]](),
nonPriorityQueue: newAsyncQueue[QueuedMessage](),
)
proc new*(

View File

@@ -41,15 +41,36 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
proc sign*(msg: Message, privateKey: PrivateKey): CryptoResult[seq[byte]] =
ok((?privateKey.sign(PubSubPrefix & encodeMessage(msg, false))).getBytes())
proc extractPublicKey(m: Message): Opt[PublicKey] =
var pubkey: PublicKey
if m.fromPeer.hasPublicKey() and m.fromPeer.extractPublicKey(pubkey):
Opt.some(pubkey)
elif m.key.len > 0 and pubkey.init(m.key):
# check if peerId extracted from m.key is the same as m.fromPeer
let derivedPeerId = PeerId.init(pubkey).valueOr:
warn "could not derive peerId from key field"
return Opt.none(PublicKey)
if derivedPeerId != m.fromPeer:
warn "peerId derived from msg.key is not the same as msg.fromPeer",
derivedPeerId = derivedPeerId, fromPeer = m.fromPeer
return Opt.none(PublicKey)
Opt.some(pubkey)
else:
Opt.none(PublicKey)
proc verify*(m: Message): bool =
if m.signature.len > 0 and m.key.len > 0:
if m.signature.len > 0:
var msg = m
msg.signature = @[]
msg.key = @[]
var remote: Signature
var key: PublicKey
if remote.init(m.signature) and key.init(m.key):
let key = m.extractPublicKey().valueOr:
warn "could not extract public key", msg = m
return false
if remote.init(m.signature):
trace "verifying signature", remoteSignature = remote
result = remote.verify(PubSubPrefix & encodeMessage(msg, false), key)

View File

@@ -1,843 +1,3 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import ./rendezvous/rendezvous
{.push raises: [].}
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protocol,
../protobuf/minprotobuf,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore,
../discovery/discoverymngr
export chronicles
logScope:
topics = "libp2p discovery rendezvous"
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
type
MessageType {.pure.} = enum
Register = 0
RegisterResponse = 1
Unregister = 2
Discover = 3
DiscoverResponse = 4
ResponseStatus = enum
Ok = 0
InvalidNamespace = 100
InvalidSignedPeerRecord = 101
InvalidTTL = 102
InvalidCookie = 103
NotAuthorized = 200
InternalError = 300
Unavailable = 400
Cookie = object
offset: uint64
ns: Opt[string]
Register = object
ns: string
signedPeerRecord: seq[byte]
ttl: Opt[uint64] # in seconds
RegisterResponse = object
status: ResponseStatus
text: Opt[string]
ttl: Opt[uint64] # in seconds
Unregister = object
ns: string
Discover = object
ns: Opt[string]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
DiscoverResponse = object
registrations: seq[Register]
cookie: Opt[seq[byte]]
status: ResponseStatus
text: Opt[string]
Message = object
msgType: MessageType
register: Opt[Register]
registerResponse: Opt[RegisterResponse]
unregister: Opt[Unregister]
discover: Opt[Discover]
discoverResponse: Opt[DiscoverResponse]
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
r.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
rr.text.withValue(text):
result.write(2, text)
rr.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(u: Unregister): ProtoBuffer =
result = initProtoBuffer()
result.write(1, u.ns)
result.finish()
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
result.write(3, cookie)
result.finish()
proc encode(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in dr.registrations:
result.write(1, reg.encode())
dr.cookie.withValue(cookie):
result.write(2, cookie)
result.write(3, dr.status.uint)
dr.text.withValue(text):
result.write(4, text)
result.finish()
proc encode(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
msg.register.withValue(register):
result.write(2, register.encode())
msg.registerResponse.withValue(registerResponse):
result.write(3, registerResponse.encode())
msg.unregister.withValue(unregister):
result.write(4, unregister.encode())
msg.discover.withValue(discover):
result.write(5, discover.encode())
msg.discoverResponse.withValue(discoverResponse):
result.write(6, discoverResponse.encode())
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr():
return Opt.none(Register)
if r3.get(false):
r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
text: string
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd):
return Opt.none(RegisterResponse)
if r2.get(false):
rr.text = Opt.some(text)
if r3.get(false):
rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr():
return Opt.none(Unregister)
Opt.some(u)
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
cookie: seq[byte]
statusOrd: uint
text: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRepeatedField(1, registrations)
r2 = pb.getField(2, cookie)
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd):
return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false):
dr.cookie = Opt.some(cookie)
if r4.get(false):
dr.text = Opt.some(text)
Opt.some(dr)
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let pb = initProtoBuffer(buf)
?pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd):
return Opt.none(Message)
if ?pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone():
return Opt.none(Message)
if ?pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone():
return Opt.none(Message)
if ?pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone():
return Opt.none(Message)
if ?pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone():
return Opt.none(Message)
if ?pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone():
return Opt.none(Message)
Opt.some(msg)
type
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration: Moment
peerId: PeerId
data: Register
RendezVous* = ref object of LPProtocol
# Registered needs to be an offsetted sequence
# because we need stable index for the cookies.
registered: OffsettedSeq[RegisteredData]
# Namespaces is a table whose key is a salted namespace and
# the value is the index sequence corresponding to this
# namespace in the offsettedqueue.
namespaces: Table[string, seq[int]]
rng: ref HmacDrbgContext
salt: string
defaultDT: Moment
registerDeletionLoop: Future[void]
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
# + make the heartbeat sleep duration "smarter"
sema: AsyncSemaphore
peers: seq[PeerId]
cookiesSaved: Table[PeerId, Table[string, seq[byte]]]
switch: Switch
minDuration: Duration
maxDuration: Duration
minTTL: uint64
maxTTL: uint64
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0:
return err("Empty peer record")
let signedEnv = ?SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl))),
)
)
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(
DiscoverResponse(
status: Ok, registrations: s, cookie: Opt.some(cookie.encode().buffer)
)
),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
let n = Moment.now()
for data in rdv.registered:
if data.peerId == peerId and data.expiration > n:
result.inc()
proc save(
rdv: RendezVous, ns: string, peerId: PeerId, r: Register, update: bool = true
) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false:
return
rdv.registered[index].expiration = rdv.defaultDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
data: r,
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
libp2p_rendezvous_registered.inc()
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
let nsSalted = u.ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.defaultDT
libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else:
Cookie(offset: rdv.registered.low().uint64 - 1)
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get() or
cookie.offset < rdv.registered.low().uint64 or
cookie.offset > rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var offset = namespaces[^1]
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
offset = index
break
if reg.expiration < n or index.uint64 <= cookie.offset:
continue
limit.dec()
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", description = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
nsSalted = ns & rdv.salt
n = Moment.now()
try:
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
continue
res.data
except KeyError as exc:
@[]
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.valueOr:
debug "Discover response is empty"
return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
let ttl = r.ttl.get(rdv.maxTTL + 1)
if ttl > rdv.maxTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]:
continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
rdv.registered[index].expiration = rdv.defaultDT
except KeyError:
return
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", description = exc.msg
let futs = collect(newSeq()):
for peer in peerIds:
unsubscribePeer(peer)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(
T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
if minDuration < 1.minutes:
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
if maxDuration > 72.hours:
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
if minDuration >= maxDuration:
raise newException(RendezVousError, "Minimum TTL longer than maximum")
let
minTTL = minDuration.seconds.uint64
maxTTL = maxDuration.seconds.uint64
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](1),
defaultDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize),
minDuration: minDuration,
maxDuration: maxDuration,
minTTL: minTTL,
maxTTL: maxTTL,
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
case msg.msgType
of MessageType.Register:
await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister:
rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover:
await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
finally:
await conn.close()
rdv.handler = handleStream
rdv.codec = RendezVousCodec
return rdv
proc new*(
T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
total += data.len
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(
rdv: RendezVous
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return fut
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
fut
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return fut
rdv.started = false
rdv.registerDeletionLoop.cancelSoon()
rdv.registerDeletionLoop = nil
fut
export rendezvous

View File

@@ -0,0 +1,275 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import results
import stew/objects
import ../../protobuf/minprotobuf
type
MessageType* {.pure.} = enum
Register = 0
RegisterResponse = 1
Unregister = 2
Discover = 3
DiscoverResponse = 4
ResponseStatus* = enum
Ok = 0
InvalidNamespace = 100
InvalidSignedPeerRecord = 101
InvalidTTL = 102
InvalidCookie = 103
NotAuthorized = 200
InternalError = 300
Unavailable = 400
Cookie* = object
offset*: uint64
ns*: Opt[string]
Register* = object
ns*: string
signedPeerRecord*: seq[byte]
ttl*: Opt[uint64] # in seconds
RegisterResponse* = object
status*: ResponseStatus
text*: Opt[string]
ttl*: Opt[uint64] # in seconds
Unregister* = object
ns*: string
Discover* = object
ns*: Opt[string]
limit*: Opt[uint64]
cookie*: Opt[seq[byte]]
DiscoverResponse* = object
registrations*: seq[Register]
cookie*: Opt[seq[byte]]
status*: ResponseStatus
text*: Opt[string]
Message* = object
msgType*: MessageType
register*: Opt[Register]
registerResponse*: Opt[RegisterResponse]
unregister*: Opt[Unregister]
discover*: Opt[Discover]
discoverResponse*: Opt[DiscoverResponse]
proc encode*(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode*(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
r.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode*(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
rr.text.withValue(text):
result.write(2, text)
rr.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode*(u: Unregister): ProtoBuffer =
result = initProtoBuffer()
result.write(1, u.ns)
result.finish()
proc encode*(d: Discover): ProtoBuffer =
result = initProtoBuffer()
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
result.write(3, cookie)
result.finish()
proc encode*(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in dr.registrations:
result.write(1, reg.encode())
dr.cookie.withValue(cookie):
result.write(2, cookie)
result.write(3, dr.status.uint)
dr.text.withValue(text):
result.write(4, text)
result.finish()
proc encode*(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
msg.register.withValue(register):
result.write(2, register.encode())
msg.registerResponse.withValue(registerResponse):
result.write(3, registerResponse.encode())
msg.unregister.withValue(unregister):
result.write(4, unregister.encode())
msg.discover.withValue(discover):
result.write(5, discover.encode())
msg.discoverResponse.withValue(discoverResponse):
result.write(6, discoverResponse.encode())
result.finish()
proc decode*(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode*(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr():
return Opt.none(Register)
if r3.get(false):
r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode*(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
text: string
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd):
return Opt.none(RegisterResponse)
if r2.get(false):
rr.text = Opt.some(text)
if r3.get(false):
rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode*(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr():
return Opt.none(Unregister)
Opt.some(u)
proc decode*(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode*(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
cookie: seq[byte]
statusOrd: uint
text: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRepeatedField(1, registrations)
r2 = pb.getField(2, cookie)
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd):
return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false):
dr.cookie = Opt.some(cookie)
if r4.get(false):
dr.text = Opt.some(text)
Opt.some(dr)
proc decode*(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let pb = initProtoBuffer(buf)
?pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd):
return Opt.none(Message)
if ?pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone():
return Opt.none(Message)
if ?pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone():
return Opt.none(Message)
if ?pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone():
return Opt.none(Message)
if ?pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone():
return Opt.none(Message)
if ?pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone():
return Opt.none(Message)
Opt.some(msg)

View File

@@ -0,0 +1,589 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protobuf,
../protocol,
../../protobuf/minprotobuf,
../../switch,
../../routing_record,
../../utils/heartbeat,
../../stream/connection,
../../utils/offsettedseq,
../../utils/semaphore,
../../discovery/discoverymngr
export chronicles
logScope:
topics = "libp2p discovery rendezvous"
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
const
RendezVousCodec* = "/rendezvous/1.0.0"
# Default minimum TTL per libp2p spec
MinimumDuration* = 2.hours
# Lower validation limit to accommodate Waku requirements
MinimumAcceptedDuration = 1.minutes
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer* = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
type
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration*: Moment
peerId*: PeerId
data*: Register
RendezVous* = ref object of LPProtocol
# Registered needs to be an offsetted sequence
# because we need stable index for the cookies.
registered*: OffsettedSeq[RegisteredData]
# Namespaces is a table whose key is a salted namespace and
# the value is the index sequence corresponding to this
# namespace in the offsettedqueue.
namespaces*: Table[string, seq[int]]
rng: ref HmacDrbgContext
salt: string
expiredDT: Moment
registerDeletionLoop: Future[void]
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
# + make the heartbeat sleep duration "smarter"
sema: AsyncSemaphore
peers: seq[PeerId]
cookiesSaved*: Table[PeerId, Table[string, seq[byte]]]
switch*: Switch
minDuration: Duration
maxDuration: Duration
minTTL: uint64
maxTTL: uint64
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0:
return err("Empty peer record")
let signedEnv = ?SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl))),
)
)
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(
DiscoverResponse(
status: Ok, registrations: s, cookie: Opt.some(cookie.encode().buffer)
)
),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
for data in rdv.registered:
if data.peerId == peerId:
result.inc()
proc save(
rdv: RendezVous, ns: string, peerId: PeerId, r: Register, update: bool = true
) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false:
return
rdv.registered[index].expiration = rdv.expiredDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
data: r,
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
libp2p_rendezvous_registered.inc()
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
let nsSalted = u.ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.expiredDT
libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else:
# Start from the current lowest index (inclusive)
Cookie(offset: rdv.registered.low().uint64)
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get():
# Namespace changed: start from the beginning of that namespace
cookie = Cookie(offset: rdv.registered.low().uint64)
elif cookie.offset < rdv.registered.low().uint64:
# Cookie behind available range: reset to current low
cookie.offset = rdv.registered.low().uint64
elif cookie.offset > (rdv.registered.high() + 1).uint64:
# Cookie ahead of available range: reset to one past current high (empty page)
cookie.offset = (rdv.registered.high() + 1).uint64
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var nextOffset = cookie.offset
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
break
if reg.expiration < n or index.uint64 < cookie.offset:
continue
limit.dec()
nextOffset = index.uint64 + 1
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: nextOffset, ns: d.ns))
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", description = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
nsSalted = ns & rdv.salt
n = Moment.now()
try:
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
continue
res.data
except KeyError as exc:
@[]
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.valueOr:
debug "Discover response is empty"
return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
let ttl = r.ttl.get(rdv.maxTTL + 1)
if ttl > rdv.maxTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]:
continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
rdv.registered[index].expiration = rdv.expiredDT
except KeyError:
return
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", description = exc.msg
let futs = collect(newSeq()):
for peer in peerIds:
unsubscribePeer(peer)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(
T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
if minDuration < MinimumAcceptedDuration:
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
if maxDuration > MaximumDuration:
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
if minDuration >= maxDuration:
raise newException(RendezVousError, "Minimum TTL longer than maximum")
let
minTTL = minDuration.seconds.uint64
maxTTL = maxDuration.seconds.uint64
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](),
expiredDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize),
minDuration: minDuration,
maxDuration: maxDuration,
minTTL: minTTL,
maxTTL: maxTTL,
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
case msg.msgType
of MessageType.Register:
await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister:
rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover:
await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
finally:
await conn.close()
rdv.handler = handleStream
rdv.codec = RendezVousCodec
return rdv
proc new*(
T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister*(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
total += data.len
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(
rdv: RendezVous
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return fut
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
fut
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return fut
rdv.started = false
rdv.registerDeletionLoop.cancelSoon()
rdv.registerDeletionLoop = nil
fut

View File

@@ -583,7 +583,8 @@ method handshake*(
)
conn.peerId = pid
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
var tmp =
NoiseConnection.new(conn, conn.peerId, conn.observedAddr, conn.localAddr)
if initiator:
tmp.readCs = handshakeRes.cs2
tmp.writeCs = handshakeRes.cs1

View File

@@ -51,12 +51,14 @@ proc new*(
conn: Connection,
peerId: PeerId,
observedAddr: Opt[MultiAddress],
localAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
): T =
result = T(
stream: conn,
peerId: peerId,
observedAddr: observedAddr,
localAddr: localAddr,
closeEvent: conn.closeEvent,
timeout: timeout,
dir: conn.dir,

View File

@@ -62,8 +62,15 @@ proc init*(
dir: Direction,
timeout = DefaultChronosStreamTimeout,
observedAddr: Opt[MultiAddress],
localAddr: Opt[MultiAddress],
): ChronosStream =
result = C(client: client, timeout: timeout, dir: dir, observedAddr: observedAddr)
result = C(
client: client,
timeout: timeout,
dir: dir,
observedAddr: observedAddr,
localAddr: localAddr,
)
result.initStream()
template withExceptions(body: untyped) =
@@ -151,6 +158,19 @@ method closed*(s: ChronosStream): bool =
method atEof*(s: ChronosStream): bool =
s.client.atEof()
method closeWrite*(s: ChronosStream) {.async: (raises: []).} =
## Close the write side of the TCP connection using half-close
if not s.client.closed():
try:
await s.client.shutdownWait()
trace "Write side closed", address = $s.client.remoteAddress(), s
except TransportError:
# Ignore transport errors during shutdown
discard
except CatchableError:
# Ignore other errors during shutdown
discard
method closeImpl*(s: ChronosStream) {.async: (raises: []).} =
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s

View File

@@ -33,6 +33,7 @@ type
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: Opt[MultiAddress]
localAddr*: Opt[MultiAddress]
protocol*: string # protocol used by the connection, used as metrics tag
transportDir*: Direction # underlying transport (usually socket) direction
when defined(libp2p_agents_metrics):
@@ -40,6 +41,12 @@ type
proc timeoutMonitor(s: Connection) {.async: (raises: []).}
method closeWrite*(s: Connection): Future[void] {.base, async: (raises: []).} =
## Close the write side of the connection
## Subclasses should implement this for their specific transport
## Default implementation just closes the entire connection
await s.close()
func shortLog*(conn: Connection): string =
try:
if conn == nil:
@@ -133,13 +140,17 @@ when defined(libp2p_agents_metrics):
var conn = s
while conn != nil:
conn.shortAgent = shortAgent
conn = conn.getWrapped()
let wrapped = conn.getWrapped()
if wrapped == conn:
break
conn = wrapped
proc new*(
C: type Connection,
peerId: PeerId,
dir: Direction,
observedAddr: Opt[MultiAddress],
observedAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
localAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
timeout: Duration = DefaultConnectionTimeout,
timeoutHandler: TimeoutHandler = nil,
): Connection =
@@ -149,6 +160,7 @@ proc new*(
timeout: timeout,
timeoutHandler: timeoutHandler,
observedAddr: observedAddr,
localAddr: localAddr,
)
result.initStream()

View File

@@ -36,48 +36,65 @@ type QuicStream* = ref object of P2PConnection
cached: seq[byte]
proc new(
_: type QuicStream, stream: Stream, oaddr: Opt[MultiAddress], peerId: PeerId
_: type QuicStream,
stream: Stream,
oaddr: Opt[MultiAddress],
laddr: Opt[MultiAddress],
peerId: PeerId,
): QuicStream =
let quicstream = QuicStream(stream: stream, observedAddr: oaddr, peerId: peerId)
let quicstream =
QuicStream(stream: stream, observedAddr: oaddr, localAddr: laddr, peerId: peerId)
procCall P2PConnection(quicstream).initStream()
quicstream
method getWrapped*(self: QuicStream): P2PConnection =
nil
template mapExceptions(body: untyped) =
try:
body
except QuicError:
raise newLPStreamEOFError()
except CatchableError:
raise newLPStreamEOFError()
self
method readOnce*(
stream: QuicStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
try:
if stream.cached.len == 0:
if stream.cached.len == 0:
try:
stream.cached = await stream.stream.read()
if stream.cached.len == 0:
raise newLPStreamEOFError()
except CancelledError as exc:
raise exc
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
raise (ref LPStreamError)(msg: "error in readOnce: " & exc.msg, parent: exc)
result = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], result)
stream.cached = stream.cached[result ..^ 1]
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
except CatchableError as exc:
raise newLPStreamEOFError()
let toRead = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], toRead)
stream.cached = stream.cached[toRead ..^ 1]
libp2p_network_bytes.inc(toRead.int64, labelValues = ["in"])
return toRead
{.push warning[LockLevel]: off.}
method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
try:
await stream.stream.write(bytes)
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
except QuicError:
raise newLPStreamEOFError()
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
(ref LPStreamError)(msg: "error in quic stream write: " & exc.msg, parent: exc)
{.pop.}
method closeWrite*(stream: QuicStream) {.async: (raises: []).} =
## Close the write side of the QUIC stream
try:
await stream.stream.closeWrite()
except CatchableError as exc:
discard
method closeImpl*(stream: QuicStream) {.async: (raises: []).} =
try:
await stream.stream.close()
@@ -108,7 +125,11 @@ proc getStream*(
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
let qs = QuicStream.new(stream, session.observedAddr, session.peerId)
let qs =
QuicStream.new(stream, session.observedAddr, session.localAddr, session.peerId)
when defined(libp2p_agents_metrics):
qs.shortAgent = session.shortAgent
session.streams.add(qs)
return qs
except CatchableError as exc:
@@ -116,13 +137,20 @@ proc getStream*(
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
self
# Muxer
type QuicMuxer = ref object of Muxer
quicSession: QuicSession
handleFut: Future[void]
when defined(libp2p_agents_metrics):
method setShortAgent*(m: QuicMuxer, shortAgent: string) =
m.quicSession.shortAgent = shortAgent
for s in m.quicSession.streams:
s.shortAgent = shortAgent
m.connection.shortAgent = shortAgent
method newStream*(
m: QuicMuxer, name: string = "", lazy: bool = false
): Future[P2PConnection] {.
@@ -130,6 +158,8 @@ method newStream*(
.} =
try:
return await m.quicSession.getStream(Direction.Out)
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
@@ -141,7 +171,7 @@ proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
trace "finished handling stream"
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", msg = exc.msg
trace "Exception in quic stream handler", msg = exc.msg
await chann.close()
method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
@@ -150,7 +180,7 @@ method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
let incomingStream = await m.quicSession.getStream(Direction.In)
asyncSpawn m.handleStream(incomingStream)
except CatchableError as exc:
trace "Exception in mplex handler", msg = exc.msg
trace "Exception in quic handler", msg = exc.msg
method close*(m: QuicMuxer) {.async: (raises: []).} =
try:
@@ -167,7 +197,7 @@ type CertGenerator =
type QuicTransport* = ref object of Transport
listener: Listener
client: QuicClient
client: Opt[QuicClient]
privateKey: PrivateKey
connections: seq[P2PConnection]
rng: ref HmacDrbgContext
@@ -220,27 +250,33 @@ method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises:
return false
QUIC_V1.match(address)
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
doAssert self.listener.isNil, "start() already called"
#TODO handle multiple addr
proc makeConfig(self: QuicTransport): TLSConfig =
let pubkey = self.privateKey.getPublicKey().valueOr:
doAssert false, "could not obtain public key"
return
try:
if self.rng.isNil:
self.rng = newRng()
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
return tlsConfig
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
self.client = QuicClient.init(tlsConfig, rng = self.rng)
self.listener =
QuicServer.init(tlsConfig, rng = self.rng).listen(initTAddress(addrs[0]).tryGet)
proc getRng(self: QuicTransport): ref HmacDrbgContext =
if self.rng.isNil:
self.rng = newRng()
return self.rng
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
doAssert self.listener.isNil, "start() already called"
# TODO(#1663): handle multiple addr
try:
self.listener = QuicServer.init(self.makeConfig(), rng = self.getRng()).listen(
initTAddress(addrs[0]).tryGet
)
await procCall Transport(self).start(addrs)
self.addrs[0] =
MultiAddress.init(self.listener.localAddress(), IPPROTO_UDP).tryGet() &
@@ -261,27 +297,36 @@ method start*(
self.running = true
method stop*(transport: QuicTransport) {.async: (raises: []).} =
if transport.running:
for c in transport.connections:
await c.close()
await procCall Transport(transport).stop()
let conns = transport.connections[0 .. ^1]
for c in conns:
await c.close()
if not transport.listener.isNil:
try:
await transport.listener.stop()
except CatchableError as exc:
trace "Error shutting down Quic transport", description = exc.msg
transport.listener.destroy()
transport.running = false
transport.listener = nil
transport.client = Opt.none(QuicClient)
await procCall Transport(transport).stop()
proc wrapConnection(
transport: QuicTransport, connection: QuicConnection
): QuicSession {.raises: [TransportOsError, MaError].} =
let
remoteAddr = connection.remoteAddress()
observedAddr =
MultiAddress.init(remoteAddr, IPPROTO_UDP).get() &
MultiAddress.init(connection.remoteAddress(), IPPROTO_UDP).get() &
MultiAddress.init("/quic-v1").get()
session = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
localAddr =
MultiAddress.init(connection.localAddress(), IPPROTO_UDP).get() &
MultiAddress.init("/quic-v1").get()
session = QuicSession(
connection: connection,
observedAddr: Opt.some(observedAddr),
localAddr: Opt.some(localAddr),
)
session.initStream()
@@ -301,12 +346,12 @@ method accept*(
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
doAssert not self.listener.isNil, "call start() before calling accept()"
if not self.running:
# stop accept only when transport is stopped (not when error occurs)
raise newException(QuicTransportAcceptStopped, "Quic transport stopped")
doAssert not self.listener.isNil, "call start() before calling accept()"
try:
let connection = await self.listener.accept()
return self.wrapConnection(connection)
@@ -330,7 +375,11 @@ method dial*(
async: (raises: [transport.TransportError, CancelledError])
.} =
try:
let quicConnection = await self.client.dial(initTAddress(address).tryGet)
if not self.client.isSome:
self.client = Opt.some(QuicClient.init(self.makeConfig(), rng = self.getRng()))
let client = self.client.get()
let quicConnection = await client.dial(initTAddress(address).tryGet)
return self.wrapConnection(quicConnection)
except CancelledError as e:
raise e

View File

@@ -47,6 +47,7 @@ proc connHandler*(
self: TcpTransport,
client: StreamTransport,
observedAddr: Opt[MultiAddress],
localAddr: Opt[MultiAddress],
dir: Direction,
): Connection =
trace "Handling tcp connection",
@@ -59,6 +60,7 @@ proc connHandler*(
client = client,
dir = dir,
observedAddr = observedAddr,
localAddr = localAddr,
timeout = self.connectionsTimeout,
)
)
@@ -267,18 +269,22 @@ method accept*(
safeCloseWait(transp)
raise newTransportClosedError()
let remote =
let (localAddr, observedAddr) =
try:
transp.remoteAddress
(
MultiAddress.init(transp.localAddress).expect(
"Can initialize from local address"
),
MultiAddress.init(transp.remoteAddress).expect(
"Can initialize from remote address"
),
)
except TransportOsError as exc:
# The connection had errors / was closed before `await` returned control
safeCloseWait(transp)
debug "Cannot read remote address", description = exc.msg
debug "Cannot read address", description = exc.msg
return nil
let observedAddr =
MultiAddress.init(remote).expect("Can initialize from remote address")
self.connHandler(transp, Opt.some(observedAddr), Direction.In)
self.connHandler(transp, Opt.some(observedAddr), Opt.some(localAddr), Direction.In)
method dial*(
self: TcpTransport,
@@ -320,14 +326,17 @@ method dial*(
safeCloseWait(transp)
raise newTransportClosedError()
let observedAddr =
let (observedAddr, localAddr) =
try:
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
(
MultiAddress.init(transp.remoteAddress).expect("remote address is valid"),
MultiAddress.init(transp.localAddress).expect("local address is valid"),
)
except TransportOsError as exc:
safeCloseWait(transp)
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
self.connHandler(transp, Opt.some(observedAddr), Opt.some(localAddr), Direction.Out)
method handles*(t: TcpTransport, address: MultiAddress): bool {.raises: [].} =
if procCall Transport(t).handles(address):

View File

@@ -237,7 +237,9 @@ method dial*(
try:
transp = await connectToTorServer(self.transportAddress)
await dialPeer(transp, address)
return self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
return self.tcpTransport.connHandler(
transp, Opt.none(MultiAddress), Opt.none(MultiAddress), Direction.Out
)
except CancelledError as e:
safeCloseWait(transp)
raise e

View File

@@ -18,9 +18,9 @@ import
../multicodec,
../muxers/muxer,
../upgrademngrs/upgrade,
../protocols/connectivity/autonat/core
../protocols/connectivity/autonat/types
export core.NetworkReachability
export types.NetworkReachability
logScope:
topics = "libp2p transport"

View File

@@ -55,10 +55,16 @@ proc new*(
session: WSSession,
dir: Direction,
observedAddr: Opt[MultiAddress],
localAddr: Opt[MultiAddress],
timeout = 10.minutes,
): T =
let stream =
T(session: session, timeout: timeout, dir: dir, observedAddr: observedAddr)
let stream = T(
session: session,
timeout: timeout,
dir: dir,
observedAddr: observedAddr,
localAddr: localAddr,
)
stream.initStream()
return stream
@@ -212,11 +218,9 @@ method stop*(self: WsTransport) {.async: (raises: []).} =
trace "Stopping WS transport"
await procCall Transport(self).stop() # call base
checkFutures(
await allFinished(
self.connections[Direction.In].mapIt(it.close()) &
self.connections[Direction.Out].mapIt(it.close())
)
discard await allFinished(
self.connections[Direction.In].mapIt(it.close()) &
self.connections[Direction.Out].mapIt(it.close())
)
var toWait: seq[Future[void]]
@@ -241,9 +245,8 @@ proc connHandler(
self: WsTransport, stream: WSSession, secure: bool, dir: Direction
): Future[Connection] {.async: (raises: [CatchableError]).} =
## Returning CatchableError is fine because we later handle different exceptions.
##
let observedAddr =
let (observedAddr, localAddr) =
try:
let
codec =
@@ -252,15 +255,19 @@ proc connHandler(
else:
MultiAddress.init("/ws")
remoteAddr = stream.stream.reader.tsource.remoteAddress
localAddr = stream.stream.reader.tsource.localAddress
MultiAddress.init(remoteAddr).tryGet() & codec.tryGet()
(
MultiAddress.init(remoteAddr).tryGet() & codec.tryGet(),
MultiAddress.init(localAddr).tryGet() & codec.tryGet(),
)
except CatchableError as exc:
trace "Failed to create observedAddr", description = exc.msg
trace "Failed to create observedAddr or listenAddr", description = exc.msg
if not (isNil(stream) and stream.stream.reader.closed):
safeClose(stream)
raise exc
let conn = WsStream.new(stream, dir, Opt.some(observedAddr))
let conn = WsStream.new(stream, dir, Opt.some(observedAddr), Opt.some(localAddr))
self.connections[dir].add(conn)
proc onClose() {.async: (raises: []).} =

74
libp2p/utils/ipaddr.nim Normal file
View File

@@ -0,0 +1,74 @@
import net, strutils
import ../switch, ../multiaddress, ../multicodec
proc isIPv4*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv4
proc isIPv6*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv6
proc isPrivate*(ip: string): bool {.raises: [ValueError].} =
ip.startsWith("10.") or
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
proc isPrivate*(ip: IpAddress): bool {.raises: [ValueError].} =
isPrivate($ip)
proc isPublic*(ip: string): bool {.raises: [ValueError].} =
not isPrivate(ip)
proc isPublic*(ip: IpAddress): bool {.raises: [ValueError].} =
isPublic($ip)
proc getPublicIPAddress*(): IpAddress {.raises: [OSError, ValueError].} =
let ip =
try:
getPrimaryIPAddr()
except OSError as exc:
raise exc
except ValueError as exc:
raise exc
except Exception as exc:
raise newException(OSError, "Could not get primary IP address")
if not ip.isIPv4():
raise newException(ValueError, "Host does not have an IPv4 address")
if not ip.isPublic():
raise newException(ValueError, "Host does not have a public IPv4 address")
ip
proc ipAddrMatches*(
lookup: MultiAddress, addrs: seq[MultiAddress], ip4: bool = true
): bool =
## Checks ``lookup``'s IP is in any of addrs
let ipType =
if ip4:
multiCodec("ip4")
else:
multiCodec("ip6")
let lookup = lookup.getPart(ipType).valueOr:
return false
for ma in addrs:
ma[0].withValue(ipAddr):
if ipAddr == lookup:
return true
false
proc ipSupport*(addrs: seq[MultiAddress]): (bool, bool) =
## Returns ipv4 and ipv6 support status of a list of MultiAddresses
var ipv4 = false
var ipv6 = false
for ma in addrs:
ma[0].withValue(addrIp):
if IP4.match(addrIp):
ipv4 = true
elif IP6.match(addrIp):
ipv6 = true
(ipv4, ipv6)

View File

@@ -150,57 +150,6 @@ proc createStreamServer*[T](
except CatchableError as exc:
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
## protocol information.
##
## Returns ``asyncInvalidSocket`` on error.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
socktype: SockType = SockType.SOCK_STREAM
protocol: Protocol = Protocol.IPPROTO_TCP
let address = initTAddress(ma).tryGet()
if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}:
if ma[1].tryGet().protoCode().tryGet() == multiCodec("udp"):
socktype = SockType.SOCK_DGRAM
protocol = Protocol.IPPROTO_UDP
elif ma[1].tryGet().protoCode().tryGet() == multiCodec("tcp"):
socktype = SockType.SOCK_STREAM
protocol = Protocol.IPPROTO_TCP
elif address.family in {AddressFamily.Unix}:
socktype = SockType.SOCK_STREAM
protocol = cast[Protocol](0)
else:
return asyncInvalidSocket
try:
createAsyncSocket(address.getDomain(), socktype, protocol)
except CatchableError as exc:
raise newException(
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
##
var
saddr: Sockaddr_storage
slen: SockLen
let address = initTAddress(ma).tryGet()
toSAddr(address, saddr, slen)
if bindSocket(SocketHandle(sock), cast[ptr SockAddr](addr saddr), slen) == 0:
result = true
else:
result = false
proc getLocalAddress*(sock: AsyncFD): TransportAddress =
## Retrieve local socket ``sock`` address.
##

View File

@@ -17,7 +17,8 @@ WORKDIR /node
COPY --from=build /node/performance/main /node/main
RUN chmod +x main
RUN chmod +x main \
&& apk add --no-cache curl iproute2
VOLUME ["/output"]

View File

@@ -1,4 +1 @@
import chronos
import ./scenarios
waitFor(baseTest())

View File

@@ -11,7 +11,8 @@ fi
# Clean up output
output_dir="$(pwd)/performance/output"
mkdir -p "$output_dir"
rm -f "$output_dir"/*.json
rm -rf "$output_dir"
mkdir -p "$output_dir/sync"
# Run Test Nodes
container_names=()
@@ -21,10 +22,12 @@ for ((i = 0; i < $PEERS; i++)); do
hostname="$hostname_prefix$i"
docker run -d \
--cap-add=NET_ADMIN \
--name "$hostname" \
-e NODE_ID="$i" \
-e HOSTNAME_PREFIX="$hostname_prefix" \
-v "$output_dir:/output" \
-v /var/run/docker.sock:/var/run/docker.sock \
--hostname="$hostname" \
--network="$network" \
test-node > /dev/null

View File

@@ -1,23 +1,37 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import metrics
import metrics/chronos_httpserver
import os
import osproc
import strformat
import strutils
import ../libp2p
import ../libp2p/protocols/ping
import ../tests/helpers
import ./utils
from nativesockets import getHostname
proc baseTest*() {.async.} =
proc baseTest*(scenarioName = "Base test") {.async.} =
# --- Scenario ---
let scenario = scenarioName
const
# --- Scenario ---
scenario = "Base test"
nodeCount = 10
publisherCount = 10
publisherCount = 5
peerLimit = 5
msgCount = 200
msgInterval = 20 # ms
msgSize = 500 # bytes
warmupCount = 20
msgCount = 100
msgInterval = 100 # ms
msgSize = 200 # bytes
warmupCount = 10
# --- Node Setup ---
let
@@ -26,6 +40,17 @@ proc baseTest*() {.async.} =
hostname = getHostname()
rng = libp2p.newRng()
if nodeId == 0:
clearSyncFiles()
# --- Collect docker stats for one publishing and one non-publishing node ---
var dockerStatsProc: Process = nil
if nodeId == 0 or nodeId == publisherCount + 1:
let dockerStatsLogPath = getDockerStatsLogPath(scenario, nodeId)
dockerStatsProc = startDockerStatsProcess(nodeId, dockerStatsLogPath)
defer:
dockerStatsProc.stopDockerStatsProcess()
let (switch, gossipSub, pingProtocol) = setupNode(nodeId, rng)
gossipSub.setGossipSubParams()
@@ -41,13 +66,15 @@ proc baseTest*() {.async.} =
defer:
await switch.stop()
info "Node started, waiting 5s",
info "Node started, synchronizing",
scenario,
nodeId,
address = switch.peerInfo.addrs,
peerId = switch.peerInfo.peerId,
isPublisher = nodeId <= publisherCount,
hostname = hostname
await sleepAsync(5.seconds)
await syncNodes("started", nodeId, nodeCount)
# --- Peer Discovery & Connection ---
var peersAddresses = resolvePeersAddresses(nodeCount, hostnamePrefix, nodeId)
@@ -55,21 +82,115 @@ proc baseTest*() {.async.} =
await connectPeers(switch, peersAddresses, peerLimit, nodeId)
info "Mesh populated, waiting 5s",
info "Mesh populated, synchronizing",
nodeId, meshSize = gossipSub.mesh.getOrDefault(topic).len
await sleepAsync(5.seconds)
await syncNodes("mesh", nodeId, nodeCount)
# --- Message Publishing ---
let sentMessages = await publishMessagesWithWarmup(
gossipSub, warmupCount, msgCount, msgInterval, msgSize, publisherCount, nodeId
)
info "Waiting 2 seconds for message delivery"
await sleepAsync(2.seconds)
info "Waiting for message delivery, synchronizing"
await syncNodes("published", nodeId, nodeCount)
# --- Performance summary ---
let stats = getStats(receivedMessages[], sentMessages)
let stats = getStats(scenario, receivedMessages[], sentMessages)
info "Performance summary", nodeId, stats = $stats
let outputPath = "/output/" & hostname & ".json"
writeResultsToJson(outputPath, scenario, stats)
await syncNodes("finished", nodeId, nodeCount)
suite "Network Performance Tests":
teardown:
checkTrackers()
asyncTest "Base Test":
await baseTest()
asyncTest "Latency Test":
const
latency = 100
jitter = 20
discard execShellCommand(
fmt"{enableTcCommand} netem delay {latency}ms {jitter}ms distribution normal"
)
await baseTest(fmt"Latency {latency}ms {jitter}ms")
discard execShellCommand(disableTcCommand)
asyncTest "Packet Loss Test":
const packetLoss = 5
discard execShellCommand(fmt"{enableTcCommand} netem loss {packetLoss}%")
await baseTest(fmt"Packet Loss {packetLoss}%")
discard execShellCommand(disableTcCommand)
asyncTest "Low Bandwidth Test":
const
rate = "256kbit"
burst = "8kbit"
limit = "5000"
discard
execShellCommand(fmt"{enableTcCommand} tbf rate {rate} burst {burst} limit {limit}")
await baseTest(fmt"Low Bandwidth rate {rate} burst {burst} limit {limit}")
discard execShellCommand(disableTcCommand)
asyncTest "Packet Reorder Test":
const
reorderPercent = 15
reorderCorr = 40
delay = 2
discard execShellCommand(
fmt"{enableTcCommand} netem delay {delay}ms reorder {reorderPercent}% {reorderCorr}%"
)
await baseTest(
fmt"Packet Reorder {reorderPercent}% {reorderCorr}% with {delay}ms delay"
)
discard execShellCommand(disableTcCommand)
asyncTest "Burst Loss Test":
const
lossPercent = 8
lossCorr = 30
discard execShellCommand(fmt"{enableTcCommand} netem loss {lossPercent}% {lossCorr}%")
await baseTest(fmt"Burst Loss {lossPercent}% {lossCorr}%")
discard execShellCommand(disableTcCommand)
asyncTest "Duplication Test":
const duplicatePercent = 2
discard execShellCommand(fmt"{enableTcCommand} netem duplicate {duplicatePercent}%")
await baseTest(fmt"Duplication {duplicatePercent}%")
discard execShellCommand(disableTcCommand)
asyncTest "Corruption Test":
const corruptPercent = 0.5
discard execShellCommand(fmt"{enableTcCommand} netem corrupt {corruptPercent}%")
await baseTest(fmt"Corruption {corruptPercent}%")
discard execShellCommand(disableTcCommand)
asyncTest "Queue Limit Test":
const queueLimit = 5
discard execShellCommand(fmt"{enableTcCommand} netem limit {queueLimit}")
await baseTest(fmt"Queue Limit {queueLimit}")
discard execShellCommand(disableTcCommand)
asyncTest "Combined Network Conditions Test":
discard execShellCommand(
"tc qdisc add dev eth0 root handle 1:0 tbf rate 2mbit burst 32kbit limit 25000"
)
discard execShellCommand(
"tc qdisc add dev eth0 parent 1:1 handle 10: netem delay 100ms 20ms distribution normal loss 5% 20% reorder 10% 30% duplicate 0.5% corrupt 0.05% limit 20"
)
await baseTest("Combined Network Conditions")
discard execShellCommand(disableTcCommand)

View File

@@ -0,0 +1,87 @@
import os
import algorithm
import sequtils
import strformat
import strutils
import tables
proc getImgUrlBase(repo: string, publishBranchName: string, plotsPath: string): string =
&"https://raw.githubusercontent.com/{repo}/refs/heads/{publishBranchName}/{plotsPath}"
proc extractTestName(base: string): string =
let parts = base.split("_")
if parts.len >= 2:
parts[^2]
else:
base
proc makeImgTag(imgUrl: string, width: int): string =
&"<img src=\"{imgUrl}\" width=\"{width}\" style=\"margin-right:10px;\" />"
proc prepareLatencyHistoryImage(
imgUrlBase: string, latencyHistoryFilePath: string, width: int = 600
): string =
let latencyImgUrl = &"{imgUrlBase}/{latencyHistoryFilePath}"
makeImgTag(latencyImgUrl, width)
proc prepareDockerStatsImages(
plotDir: string, imgUrlBase: string, branchName: string, width: int = 450
): Table[string, seq[string]] =
## Groups docker stats plot images by test name and returns HTML <img> tags.
var grouped: Table[string, seq[string]]
for path in walkFiles(&"{plotDir}/*.png"):
let plotFile = path.splitPath.tail
let testName = extractTestName(plotFile)
let imgUrl = &"{imgUrlBase}/{branchName}/{plotFile}"
let imgTag = makeImgTag(imgUrl, width)
discard grouped.hasKeyOrPut(testName, @[])
grouped[testName].add(imgTag)
grouped
proc buildSummary(
plotDir: string,
repo: string,
branchName: string,
publishBranchName: string,
plotsPath: string,
latencyHistoryFilePath: string,
): string =
let imgUrlBase = getImgUrlBase(repo, publishBranchName, plotsPath)
var buf: seq[string]
# Latency History section
buf.add("## Latency History")
buf.add(prepareLatencyHistoryImage(imgUrlBase, latencyHistoryFilePath) & "<br>")
buf.add("")
# Performance Plots section
let grouped = prepareDockerStatsImages(plotDir, imgUrlBase, branchName)
buf.add(&"## Performance Plots for {branchName}")
for test in grouped.keys.toSeq().sorted():
let imgs = grouped[test]
buf.add(&"### {test}")
buf.add(imgs.join(" ") & "<br>")
buf.join("\n")
proc main() =
let summaryPath = getEnv("GITHUB_STEP_SUMMARY", "/tmp/step_summary.md")
let repo = getEnv("GITHUB_REPOSITORY", "vacp2p/nim-libp2p")
let branchName = getEnv("BRANCH_NAME", "")
let publishBranchName = getEnv("PUBLISH_BRANCH_NAME", "performance_plots")
let plotsPath = getEnv("PLOTS_PATH", "plots")
let latencyHistoryFilePath =
getEnv("LATENCY_HISTORY_PLOT_FILENAME", "latency_history_all_scenarios.png")
let checkoutSubfolder = getEnv("CHECKOUT_SUBFOLDER", "subplots")
let plotDir = &"{checkoutSubfolder}/{plotsPath}/{branchName}"
let summary = buildSummary(
plotDir, repo, branchName, publishBranchName, plotsPath, latencyHistoryFilePath
)
writeFile(summaryPath, summary)
echo summary
main()

Some files were not shown because too many files have changed in this diff Show More