mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 08:08:03 -05:00
Compare commits
109 Commits
pin-websoc
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f6b8e86a5 | ||
|
|
11b98b7a3f | ||
|
|
647f76341e | ||
|
|
fbf96bb2ce | ||
|
|
f0aaecb743 | ||
|
|
8d3076ea99 | ||
|
|
70b7d61436 | ||
|
|
37bae0986c | ||
|
|
b34ddab10c | ||
|
|
e09457da12 | ||
|
|
94ad1dcbc8 | ||
|
|
5b9f2cba6f | ||
|
|
59e7069c15 | ||
|
|
18a0e9c2d1 | ||
|
|
34a9a03b73 | ||
|
|
788109b4f4 | ||
|
|
44aab92b3e | ||
|
|
ad0812b40b | ||
|
|
0751f240a2 | ||
|
|
d8ecf8a135 | ||
|
|
bab863859c | ||
|
|
73d04def6f | ||
|
|
4509ade75c | ||
|
|
b64c0f6d85 | ||
|
|
582ba7e650 | ||
|
|
31ae734aff | ||
|
|
f26ff88e6c | ||
|
|
4fbf59ece8 | ||
|
|
62388a7a20 | ||
|
|
27051164db | ||
|
|
f41009461b | ||
|
|
c3faabf522 | ||
|
|
10f7f5c68a | ||
|
|
f345026900 | ||
|
|
5d6578a06f | ||
|
|
871a5d047f | ||
|
|
061195195b | ||
|
|
8add5aaaab | ||
|
|
dbf60b74c7 | ||
|
|
d2eaf07960 | ||
|
|
6e5274487e | ||
|
|
7ed62461d7 | ||
|
|
6059ee8332 | ||
|
|
4f7e232a9e | ||
|
|
5eaa43b860 | ||
|
|
17ed2d88df | ||
|
|
c7f29ed5db | ||
|
|
9865cc39b5 | ||
|
|
601f56b786 | ||
|
|
25a8ed4d07 | ||
|
|
955e28ff70 | ||
|
|
f952e6d436 | ||
|
|
bed83880bf | ||
|
|
9bd4b7393f | ||
|
|
12d1fae404 | ||
|
|
17073dc9e0 | ||
|
|
b1649b3566 | ||
|
|
ef20f46b47 | ||
|
|
9161529c84 | ||
|
|
8b70384b6a | ||
|
|
f25814a890 | ||
|
|
3d5ea1fa3c | ||
|
|
2114008704 | ||
|
|
04796b210b | ||
|
|
59faa023aa | ||
|
|
fdebea4e14 | ||
|
|
0c188df806 | ||
|
|
abee5326dc | ||
|
|
71f04d1bb3 | ||
|
|
41ae43ae80 | ||
|
|
5dbf077d9e | ||
|
|
b5fc7582ff | ||
|
|
7f83ebb198 | ||
|
|
ceb89986c1 | ||
|
|
f4ff27ca6b | ||
|
|
b517b692df | ||
|
|
7cfd26035a | ||
|
|
cd5fea53e3 | ||
|
|
d9aa393761 | ||
|
|
a4a0d9e375 | ||
|
|
c8b406d6ed | ||
|
|
f0125a62df | ||
|
|
9bf2636186 | ||
|
|
01a33ebe5c | ||
|
|
c1cd31079b | ||
|
|
9f9f38e314 | ||
|
|
f83638eb82 | ||
|
|
882cb5dfe3 | ||
|
|
81310df2a2 | ||
|
|
34110a37d7 | ||
|
|
1035e4f314 | ||
|
|
d08bad5893 | ||
|
|
7bdba4909f | ||
|
|
e71c7caf82 | ||
|
|
45476bdd6b | ||
|
|
c7ee7b950d | ||
|
|
87b3d2c864 | ||
|
|
19b4c20e2f | ||
|
|
514bd4b5f5 | ||
|
|
46d936b80c | ||
|
|
80bf27c6bb | ||
|
|
6576c5c3bf | ||
|
|
2e6b1d2738 | ||
|
|
9e6c4cb4d2 | ||
|
|
5f256049ab | ||
|
|
e29ca73386 | ||
|
|
577809750a | ||
|
|
46a5430cc2 | ||
|
|
d8b9f59c5e |
34
.github/actions/add_comment/action.yml
vendored
Normal file
34
.github/actions/add_comment/action.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Add Comment
|
||||
description: "Add or update comment in the PR"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Add/Update Comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const marker = "${{ env.MARKER }}";
|
||||
const body = fs.readFileSync("${{ env.COMMENT_SUMMARY_PATH }}", 'utf8');
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
const existing = comments.find(c => c.body && c.body.startsWith(marker));
|
||||
if (existing) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: existing.id,
|
||||
body,
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body,
|
||||
});
|
||||
}
|
||||
49
.github/actions/discord_notify/action.yml
vendored
Normal file
49
.github/actions/discord_notify/action.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Discord Failure Notification
|
||||
description: "Send Discord notification when CI jobs fail"
|
||||
inputs:
|
||||
webhook_url:
|
||||
description: "Discord webhook URL"
|
||||
required: true
|
||||
workflow_name:
|
||||
description: "Name of the workflow that failed"
|
||||
required: false
|
||||
default: ${{ github.workflow }}
|
||||
branch:
|
||||
description: "Branch name"
|
||||
required: false
|
||||
default: ${{ github.ref_name }}
|
||||
repository:
|
||||
description: "Repository name"
|
||||
required: false
|
||||
default: ${{ github.repository }}
|
||||
run_id:
|
||||
description: "GitHub run ID"
|
||||
required: false
|
||||
default: ${{ github.run_id }}
|
||||
server_url:
|
||||
description: "GitHub server URL"
|
||||
required: false
|
||||
default: ${{ github.server_url }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Send Discord notification
|
||||
shell: bash
|
||||
run: |
|
||||
curl -H "Content-Type: application/json" \
|
||||
-X POST \
|
||||
-d "{
|
||||
\"embeds\": [{
|
||||
\"title\": \"${{ inputs.workflow_name }} Job Failed\",
|
||||
\"url\": \"${{ inputs.server_url }}/${{ inputs.repository }}/actions/runs/${{ inputs.run_id }}\",
|
||||
\"description\": \"The workflow has failed on branch \`${{ inputs.branch }}\`\",
|
||||
\"color\": 15158332,
|
||||
\"fields\": [
|
||||
{\"name\": \"Repository\", \"value\": \"${{ inputs.repository }}\", \"inline\": true},
|
||||
{\"name\": \"Branch\", \"value\": \"${{ inputs.branch }}\", \"inline\": true}
|
||||
],
|
||||
\"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%S.000Z)\"
|
||||
}]
|
||||
}" \
|
||||
"${{ inputs.webhook_url }}"
|
||||
24
.github/actions/generate_plots/action.yml
vendored
Normal file
24
.github/actions/generate_plots/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Generate Plots
|
||||
description: "Set up Python and run script to generate plots with Docker Stats"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install Python dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install matplotlib
|
||||
|
||||
- name: Plot Docker Stats
|
||||
shell: bash
|
||||
run: python performance/scripts/plot_docker_stats.py
|
||||
|
||||
- name: Plot Latency History
|
||||
shell: bash
|
||||
run: python performance/scripts/plot_latency_history.py
|
||||
2
.github/actions/install_nim/action.yml
vendored
2
.github/actions/install_nim/action.yml
vendored
@@ -8,7 +8,7 @@ inputs:
|
||||
default: "amd64"
|
||||
nim_ref:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
default: "version-2-0"
|
||||
shell:
|
||||
description: "Shell to run commands in"
|
||||
default: "bash --noprofile --norc -e -o pipefail"
|
||||
|
||||
21
.github/actions/process_stats/action.yml
vendored
Normal file
21
.github/actions/process_stats/action.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Process Stats
|
||||
description: "Set up Nim and run scripts to aggregate latency and process raw docker stats"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Nim
|
||||
uses: jiro4989/setup-nim-action@v2
|
||||
with:
|
||||
nim-version: "2.x"
|
||||
repo-token: ${{ env.GITHUB_TOKEN }}
|
||||
|
||||
- name: Aggregate latency stats and prepare markdown for comment and summary
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/process_latency_stats ./performance/scripts/process_latency_stats.nim
|
||||
|
||||
- name: Process raw docker stats to csv files
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/process_docker_stats ./performance/scripts/process_docker_stats.nim
|
||||
36
.github/actions/publish_history/action.yml
vendored
Normal file
36
.github/actions/publish_history/action.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Publish Latency History
|
||||
description: "Publish latency history CSVs in a configurable branch and folder"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clone the branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.PUBLISH_BRANCH_NAME }}
|
||||
path: ${{ env.CHECKOUT_SUBFOLDER_HISTORY }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push latency history CSVs
|
||||
shell: bash
|
||||
run: |
|
||||
cd "$CHECKOUT_SUBFOLDER_HISTORY"
|
||||
git fetch origin "$PUBLISH_BRANCH_NAME"
|
||||
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
|
||||
|
||||
mkdir -p "$PUBLISH_DIR_LATENCY_HISTORY"
|
||||
|
||||
cp ../$SHARED_VOLUME_PATH/$LATENCY_HISTORY_PREFIX*.csv "$PUBLISH_DIR_LATENCY_HISTORY/"
|
||||
git add "$PUBLISH_DIR_LATENCY_HISTORY"
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git commit -m "Update latency history CSVs"
|
||||
git push origin "$PUBLISH_BRANCH_NAME"
|
||||
fi
|
||||
|
||||
cd ..
|
||||
56
.github/actions/publish_plots/action.yml
vendored
Normal file
56
.github/actions/publish_plots/action.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Publish Plots
|
||||
description: "Publish plots in performance_plots branch and add to the workflow summary"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clone the performance_plots branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.PUBLISH_BRANCH_NAME }}
|
||||
path: ${{ env.CHECKOUT_SUBFOLDER_SUBPLOTS }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push plots
|
||||
shell: bash
|
||||
run: |
|
||||
cd $CHECKOUT_SUBFOLDER_SUBPLOTS
|
||||
git fetch origin "$PUBLISH_BRANCH_NAME"
|
||||
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
|
||||
|
||||
# Remove any branch folder older than 7 days
|
||||
DAYS=7
|
||||
cutoff=$(( $(date +%s) - DAYS*24*3600 ))
|
||||
scan_dir="${PUBLISH_DIR_PLOTS%/}"
|
||||
find "$scan_dir" -mindepth 1 -maxdepth 1 -type d -print0 \
|
||||
| while IFS= read -r -d $'\0' d; do \
|
||||
ts=$(git log -1 --format=%ct -- "$d" 2>/dev/null || true); \
|
||||
if [ -n "$ts" ] && [ "$ts" -le "$cutoff" ]; then \
|
||||
echo "[cleanup] Deleting: $d"; \
|
||||
rm -rf -- "$d"; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
rm -rf $PUBLISH_DIR_PLOTS/$BRANCH_NAME
|
||||
mkdir -p $PUBLISH_DIR_PLOTS/$BRANCH_NAME
|
||||
|
||||
cp ../$SHARED_VOLUME_PATH/*.png $PUBLISH_DIR_PLOTS/$BRANCH_NAME/ 2>/dev/null || true
|
||||
cp ../$LATENCY_HISTORY_PATH/*.png $PUBLISH_DIR_PLOTS/ 2>/dev/null || true
|
||||
git add -A "$PUBLISH_DIR_PLOTS/"
|
||||
|
||||
git status
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git commit -m "Update performance plots for $BRANCH_NAME"
|
||||
git push origin $PUBLISH_BRANCH_NAME
|
||||
fi
|
||||
|
||||
- name: Add plots to GitHub Actions summary
|
||||
shell: bash
|
||||
run: |
|
||||
nim c -r -d:release -o:/tmp/add_plots_to_summary ./performance/scripts/add_plots_to_summary.nim
|
||||
17
.github/workflows/ci.yml
vendored
17
.github/workflows/ci.yml
vendored
@@ -25,15 +25,11 @@ jobs:
|
||||
cpu: i386
|
||||
- os: linux-gcc-14
|
||||
cpu: amd64
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: macos-14
|
||||
cpu: arm64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
nim:
|
||||
- ref: version-1-6
|
||||
memory_management: refc
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
- ref: version-2-2
|
||||
@@ -47,10 +43,6 @@ jobs:
|
||||
os: linux-gcc-14
|
||||
builder: ubuntu-24.04
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos-14
|
||||
builder: macos-14
|
||||
@@ -80,15 +72,6 @@ jobs:
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
25
.github/workflows/daily_amd64.yml
vendored
25
.github/workflows/daily_amd64.yml
vendored
@@ -7,25 +7,36 @@ on:
|
||||
|
||||
jobs:
|
||||
test_amd64_latest:
|
||||
name: Daily amd64 (latest dependencies)
|
||||
name: Daily test amd64 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
test_amd64_pinned:
|
||||
name: Daily amd64 (pinned dependencies)
|
||||
name: Daily test amd64 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
cpu: "['amd64']"
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [test_amd64_latest, test_amd64_pinned]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
10
.github/workflows/daily_common.yml
vendored
10
.github/workflows/daily_common.yml
vendored
@@ -69,16 +69,6 @@ jobs:
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.16.0'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies (pinned)
|
||||
if: ${{ inputs.pinned_deps }}
|
||||
run: |
|
||||
|
||||
35
.github/workflows/daily_i386.yml
vendored
35
.github/workflows/daily_i386.yml
vendored
@@ -6,18 +6,45 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_i386:
|
||||
name: Daily i386 (Linux)
|
||||
test_i386_latest:
|
||||
name: Daily i386 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
test_i386_pinned:
|
||||
name: Daily i386 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [test_i386_latest, test_i386_pinned]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
39
.github/workflows/daily_nimbus.yml
vendored
Normal file
39
.github/workflows/daily_nimbus.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Daily Nimbus
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
compile_nimbus:
|
||||
timeout-minutes: 80
|
||||
name: 'Compile Nimbus (linux-amd64)'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Compile nimbus using nim-libp2p
|
||||
run: |
|
||||
git clone --branch unstable --single-branch https://github.com/status-im/nimbus-eth2.git
|
||||
cd nimbus-eth2
|
||||
git submodule set-branch --branch ${{ github.sha }} vendor/nim-libp2p
|
||||
|
||||
make -j"$(nproc)"
|
||||
make -j"$(nproc)" nimbus_beacon_node
|
||||
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: compile_nimbus
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
|
||||
12
.github/workflows/dependencies.yml
vendored
12
.github/workflows/dependencies.yml
vendored
@@ -50,4 +50,16 @@ jobs:
|
||||
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
|
||||
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
notify-on-failure:
|
||||
name: Notify Discord on Failure
|
||||
needs: [bumper]
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Discord notification
|
||||
uses: ./.github/actions/discord_notify
|
||||
with:
|
||||
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
2
.github/workflows/documentation.yml
vendored
2
.github/workflows/documentation.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: '1.6.x'
|
||||
nim-version: '2.2.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
|
||||
2
.github/workflows/examples.yml
vendored
2
.github/workflows/examples.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
shell: bash
|
||||
os: linux
|
||||
cpu: amd64
|
||||
nim_ref: version-1-6
|
||||
nim_ref: version-2-2
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
|
||||
27
.github/workflows/interop.yml
vendored
27
.github/workflows/interop.yml
vendored
@@ -60,3 +60,30 @@ jobs:
|
||||
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
run-autonatv2-interop:
|
||||
name: Run AutoNATv2 interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.25"
|
||||
|
||||
- name: Set up Nim
|
||||
uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: "stable"
|
||||
|
||||
- name: Run Go and Nim together
|
||||
run: |
|
||||
nimble install
|
||||
cd interop/autonatv2/go-peer
|
||||
git clone https://github.com/libp2p/go-libp2p
|
||||
cd go-libp2p
|
||||
git apply ../disable-filtering-of-private-ip-addresses.patch
|
||||
cd ..
|
||||
go run testautonatv2.go &
|
||||
cd ../nim-peer
|
||||
nim r src/nim_peer.nim $(cat ../go-peer/peer.id)
|
||||
|
||||
94
.github/workflows/performance.yml
vendored
Normal file
94
.github/workflows/performance.yml
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
name: Performance
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
performance:
|
||||
timeout-minutes: 20
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
VACP2P: "vacp2p"
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
MARKER: "<!-- perf-summary-marker -->"
|
||||
COMMENT_SUMMARY_PATH: "/tmp/perf-summary.md"
|
||||
SHARED_VOLUME_PATH: "performance/output"
|
||||
DOCKER_STATS_PREFIX: "docker_stats_"
|
||||
PUBLISH_BRANCH_NAME: "performance_plots"
|
||||
CHECKOUT_SUBFOLDER_SUBPLOTS: "subplots"
|
||||
PUBLISH_DIR_PLOTS: "plots"
|
||||
CHECKOUT_SUBFOLDER_HISTORY: "history"
|
||||
PUBLISH_DIR_LATENCY_HISTORY: "latency_history"
|
||||
LATENCY_HISTORY_PATH: "history/latency_history"
|
||||
LATENCY_HISTORY_PREFIX: "pr"
|
||||
LATENCY_HISTORY_PLOT_FILENAME: "latency_history_all_scenarios.png"
|
||||
|
||||
name: "Performance"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker Image with cache
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: performance/Dockerfile
|
||||
tags: test-node:latest
|
||||
load: true
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Run
|
||||
run: |
|
||||
./performance/runner.sh
|
||||
|
||||
- name: Process latency and docker stats
|
||||
uses: ./.github/actions/process_stats
|
||||
|
||||
- name: Publish history
|
||||
if: github.repository_owner == env.VACP2P
|
||||
uses: ./.github/actions/publish_history
|
||||
|
||||
- name: Generate plots
|
||||
if: github.repository_owner == env.VACP2P
|
||||
uses: ./.github/actions/generate_plots
|
||||
|
||||
- name: Post/Update PR comment
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: ./.github/actions/add_comment
|
||||
|
||||
- name: Upload performance artifacts
|
||||
if: success() || failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: performance-artifacts
|
||||
path: |
|
||||
performance/output/pr*_latency.csv
|
||||
performance/output/*.png
|
||||
history/latency_history/*.png
|
||||
if-no-files-found: ignore
|
||||
retention-days: 7
|
||||
4
.pinned
4
.pinned
@@ -1,5 +1,5 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#61759a5e8df8f4d68bcd1b4b8c1adab3e72bbd8d
|
||||
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
@@ -8,7 +8,7 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
|
||||
quic;https://github.com/vacp2p/nim-quic@#9370190ded18d78a5a9990f57aa8cbbf947f3891
|
||||
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
|
||||
|
||||
19
README.md
19
README.md
@@ -39,7 +39,7 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
|
||||
|
||||
## Install
|
||||
|
||||
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
|
||||
> The currently supported Nim versions are 2.0 and 2.2.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
@@ -47,7 +47,7 @@ nimble install libp2p
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
|
||||
|
||||
## Getting Started
|
||||
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
Try out the chat example. Full code can be found [here](https://github.com/vacp2p/nim-libp2p/blob/master/examples/directchat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
@@ -71,18 +71,16 @@ git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install -dy
|
||||
```
|
||||
You can use `nix develop` to start a shell with Nim and Nimble.
|
||||
|
||||
nimble 0.20.1 is required for running `testnative`. At time of writing, this is not available in nixpkgs: If using `nix develop`, follow up with `nimble install nimble`, and use that (typically `~/.nimble/bin/nimble`).
|
||||
|
||||
### Testing
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
|
||||
|
||||
If you only want to run tests that don't require `go-libp2p-daemon`, use:
|
||||
```
|
||||
nimble testnative
|
||||
```
|
||||
|
||||
For a list of all available test suites, use:
|
||||
```
|
||||
@@ -97,6 +95,7 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
- **Join the Conversation**. Connect with other contributors in our [community channel](https://discord.com/channels/1204447718093750272/1351621032263417946). Ask questions, share ideas, get support, and stay informed about the latest updates from the maintainers.
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
@@ -150,8 +149,6 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
|
||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
|
||||
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
||||
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
||||
| **Transports** | |
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
@@ -195,7 +192,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
|
||||
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `2.0 & 2.2`
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -5,12 +5,13 @@ if dirExists("nimbledeps/pkgs2"):
|
||||
switch("NimblePath", "nimbledeps/pkgs2")
|
||||
|
||||
switch("warningAsError", "UnusedImport:on")
|
||||
switch("warningAsError", "UseBase:on")
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
|
||||
--styleCheck:
|
||||
usages
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:
|
||||
error
|
||||
--mm:
|
||||
@@ -23,7 +24,7 @@ if defined(windows) and not defined(vcc):
|
||||
--define:
|
||||
nimRawSetjmp
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when fileExists("nimble.paths"):
|
||||
# begin Nimble config (version 2)
|
||||
when withDir(thisDir(), system.fileExists("nimble.paths")):
|
||||
include "nimble.paths"
|
||||
# end Nimble config
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Circuit Relay example
|
||||
##
|
||||
## Circuit Relay can be used when a node cannot reach another node
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
import chronos, nimcrypto, strutils
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
import ../hexdump
|
||||
|
||||
const PubSubTopic = "test-net"
|
||||
|
||||
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
|
||||
var peers = await api.pubsubListPeers(PubSubTopic)
|
||||
echo "= List of connected and subscribed peers:"
|
||||
for item in peers:
|
||||
echo item.pretty()
|
||||
|
||||
proc dumpAllPeers(api: DaemonAPI) {.async.} =
|
||||
var peers = await api.listPeers()
|
||||
echo "Current connected peers count = ", len(peers)
|
||||
for item in peers:
|
||||
echo item.peer.pretty()
|
||||
|
||||
proc monitor(api: DaemonAPI) {.async.} =
|
||||
while true:
|
||||
echo "Dumping all peers"
|
||||
await dumpAllPeers(api)
|
||||
await sleepAsync(5000)
|
||||
|
||||
proc main() {.async.} =
|
||||
echo "= Starting P2P bootnode"
|
||||
var api = await newDaemonApi({DHTFull, PSGossipSub})
|
||||
var id = await api.identity()
|
||||
echo "= P2P bootnode ", id.peer.pretty(), " started."
|
||||
let mcip4 = multiCodec("ip4")
|
||||
let mcip6 = multiCodec("ip6")
|
||||
echo "= You can use one of this addresses to bootstrap your nodes:"
|
||||
for item in id.addresses:
|
||||
if item.protoCode() == mcip4 or item.protoCode() == mcip6:
|
||||
echo $item & "/ipfs/" & id.peer.pretty()
|
||||
|
||||
asyncSpawn monitor(api)
|
||||
|
||||
proc pubsubLogger(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async.} =
|
||||
let msglen = len(message.data)
|
||||
echo "= Recieved pubsub message with length ",
|
||||
msglen, " bytes from peer ", message.peer.pretty()
|
||||
echo dumpHex(message.data)
|
||||
await api.dumpSubscribedPeers()
|
||||
result = true
|
||||
|
||||
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
|
||||
|
||||
when isMainModule:
|
||||
waitFor(main())
|
||||
while true:
|
||||
poll()
|
||||
@@ -1,132 +0,0 @@
|
||||
import chronos, nimcrypto, strutils
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
|
||||
## nim c -r --threads:on chat.nim
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
const ServerProtocols = @["/test-chat-stream"]
|
||||
|
||||
type CustomData = ref object
|
||||
api: DaemonAPI
|
||||
remotes: seq[StreamTransport]
|
||||
consoleFd: AsyncFD
|
||||
serveFut: Future[void]
|
||||
|
||||
proc threadMain(wfd: AsyncFD) {.thread.} =
|
||||
## This procedure performs reading from `stdin` and sends data over
|
||||
## pipe to main thread.
|
||||
var transp = fromPipe(wfd)
|
||||
|
||||
while true:
|
||||
var line = stdin.readLine()
|
||||
let res = waitFor transp.write(line & "\r\n")
|
||||
|
||||
proc serveThread(udata: CustomData) {.async.} =
|
||||
## This procedure perform reading on pipe and sends data to remote clients.
|
||||
var transp = fromPipe(udata.consoleFd)
|
||||
|
||||
proc remoteReader(transp: StreamTransport) {.async.} =
|
||||
while true:
|
||||
var line = await transp.readLine()
|
||||
if len(line) == 0:
|
||||
break
|
||||
echo ">> ", line
|
||||
|
||||
while true:
|
||||
try:
|
||||
var line = await transp.readLine()
|
||||
if line.startsWith("/connect"):
|
||||
var parts = line.split(" ")
|
||||
if len(parts) == 2:
|
||||
var peerId = PeerId.init(parts[1])
|
||||
var address = MultiAddress.init(multiCodec("p2p-circuit"))
|
||||
address &= MultiAddress.init(multiCodec("p2p"), peerId)
|
||||
echo "= Searching for peer ", peerId.pretty()
|
||||
var id = await udata.api.dhtFindPeer(peerId)
|
||||
echo "= Peer " & parts[1] & " found at addresses:"
|
||||
for item in id.addresses:
|
||||
echo $item
|
||||
echo "= Connecting to peer ", $address
|
||||
await udata.api.connect(peerId, @[address], 30)
|
||||
echo "= Opening stream to peer chat ", parts[1]
|
||||
var stream = await udata.api.openStream(peerId, ServerProtocols)
|
||||
udata.remotes.add(stream.transp)
|
||||
echo "= Connected to peer chat ", parts[1]
|
||||
asyncSpawn remoteReader(stream.transp)
|
||||
elif line.startsWith("/search"):
|
||||
var parts = line.split(" ")
|
||||
if len(parts) == 2:
|
||||
var peerId = PeerId.init(parts[1])
|
||||
echo "= Searching for peer ", peerId.pretty()
|
||||
var id = await udata.api.dhtFindPeer(peerId)
|
||||
echo "= Peer " & parts[1] & " found at addresses:"
|
||||
for item in id.addresses:
|
||||
echo $item
|
||||
elif line.startsWith("/consearch"):
|
||||
var parts = line.split(" ")
|
||||
if len(parts) == 2:
|
||||
var peerId = PeerId.init(parts[1])
|
||||
echo "= Searching for peers connected to peer ", parts[1]
|
||||
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
|
||||
echo "= Found ", len(peers), " connected to peer ", parts[1]
|
||||
for item in peers:
|
||||
var peer = item.peer
|
||||
var addresses = newSeq[string]()
|
||||
var relay = false
|
||||
for a in item.addresses:
|
||||
addresses.add($a)
|
||||
if a.protoName() == "/p2p-circuit":
|
||||
relay = true
|
||||
break
|
||||
if relay:
|
||||
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
|
||||
else:
|
||||
echo peer.pretty(), " [", addresses.join(", "), "]"
|
||||
elif line.startsWith("/exit"):
|
||||
break
|
||||
else:
|
||||
var msg = line & "\r\n"
|
||||
echo "<< ", line
|
||||
var pending = newSeq[Future[int]]()
|
||||
for item in udata.remotes:
|
||||
pending.add(item.write(msg))
|
||||
if len(pending) > 0:
|
||||
var results = await all(pending)
|
||||
except CatchableError as err:
|
||||
echo err.msg
|
||||
|
||||
proc main() {.async.} =
|
||||
var data = new CustomData
|
||||
data.remotes = newSeq[StreamTransport]()
|
||||
|
||||
var (rfd, wfd) = createAsyncPipe()
|
||||
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
|
||||
raise newException(ValueError, "Could not initialize pipe!")
|
||||
|
||||
data.consoleFd = rfd
|
||||
|
||||
data.serveFut = serveThread(data)
|
||||
var thread: Thread[AsyncFD]
|
||||
thread.createThread(threadMain, wfd)
|
||||
|
||||
echo "= Starting P2P node"
|
||||
data.api = await newDaemonApi({DHTFull, Bootstrap})
|
||||
await sleepAsync(3000)
|
||||
var id = await data.api.identity()
|
||||
|
||||
proc streamHandler(api: DaemonAPI, stream: P2PStream) {.async.} =
|
||||
echo "= Peer ", stream.peer.pretty(), " joined chat"
|
||||
data.remotes.add(stream.transp)
|
||||
while true:
|
||||
var line = await stream.transp.readLine()
|
||||
if len(line) == 0:
|
||||
break
|
||||
echo ">> ", line
|
||||
|
||||
await data.api.addHandler(ServerProtocols, streamHandler)
|
||||
echo "= Your PeerId is ", id.peer.pretty()
|
||||
await data.serveFut
|
||||
|
||||
when isMainModule:
|
||||
waitFor(main())
|
||||
@@ -1,43 +0,0 @@
|
||||
# Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Examples](#examples)
|
||||
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.16.0`
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
```sh
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh -f
|
||||
```
|
||||
Or:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh --force
|
||||
```
|
||||
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
|
||||
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Examples
|
||||
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
|
||||
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
import chronos, nimcrypto, strutils, os
|
||||
import ../../libp2p/daemon/daemonapi
|
||||
|
||||
const PubSubTopic = "test-net"
|
||||
|
||||
proc main(bn: string) {.async.} =
|
||||
echo "= Starting P2P node"
|
||||
var bootnodes = bn.split(",")
|
||||
var api = await newDaemonApi(
|
||||
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
|
||||
)
|
||||
var id = await api.identity()
|
||||
echo "= P2P node ", id.peer.pretty(), " started:"
|
||||
for item in id.addresses:
|
||||
echo item
|
||||
|
||||
proc pubsubLogger(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async.} =
|
||||
let msglen = len(message.data)
|
||||
echo "= Recieved pubsub message with length ",
|
||||
msglen, " bytes from peer ", message.peer.pretty(), ": "
|
||||
var strdata = cast[string](message.data)
|
||||
echo strdata
|
||||
result = true
|
||||
|
||||
var ticket = await api.pubsubSubscribe(PubSubTopic, pubsubLogger)
|
||||
|
||||
# Waiting for gossipsub interval
|
||||
while true:
|
||||
var peers = await api.pubsubListPeers(PubSubTopic)
|
||||
if len(peers) > 0:
|
||||
break
|
||||
await sleepAsync(1000)
|
||||
|
||||
var data = "HELLO\r\n"
|
||||
var msgData = cast[seq[byte]](data)
|
||||
await api.pubsubPublish(PubSubTopic, msgData)
|
||||
|
||||
when isMainModule:
|
||||
if paramCount() != 1:
|
||||
echo "Please supply bootnodes!"
|
||||
else:
|
||||
waitFor(main(paramStr(1)))
|
||||
while true:
|
||||
poll()
|
||||
@@ -1,3 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import chronos # an efficient library for async
|
||||
import stew/byteutils # various utils
|
||||
import libp2p
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Simple ping tutorial
|
||||
##
|
||||
## Hi all, welcome to the first nim-libp2p tutorial!
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Custom protocol in libp2p
|
||||
##
|
||||
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Protobuf usage
|
||||
##
|
||||
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # GossipSub
|
||||
##
|
||||
## In this tutorial, we'll build a simple GossipSub network
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Discovery Manager
|
||||
##
|
||||
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{.used.}
|
||||
## # Tron example
|
||||
##
|
||||
## In this tutorial, we will create a video game based on libp2p, using
|
||||
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1752620740,
|
||||
"narHash": "sha256-f3pO+9lg66mV7IMmmIqG4PL3223TYMlnlw+pnpelbss=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "32a4e87942101f1c9f9865e04dc3ddb175f5f32e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
34
flake.nix
Normal file
34
flake.nix
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
description = "nim-libp2p dev shell flake";
|
||||
|
||||
nixConfig = {
|
||||
extra-substituters = [ "https://nix-cache.status.im/" ];
|
||||
extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
|
||||
};
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
let
|
||||
stableSystems = [
|
||||
"x86_64-linux" "aarch64-linux" "armv7a-linux"
|
||||
"x86_64-darwin" "aarch64-darwin"
|
||||
"x86_64-windows"
|
||||
];
|
||||
forEach = nixpkgs.lib.genAttrs;
|
||||
forAllSystems = forEach stableSystems;
|
||||
pkgsFor = forEach stableSystems (
|
||||
system: import nixpkgs { inherit system; }
|
||||
);
|
||||
in rec {
|
||||
devShells = forAllSystems (system: {
|
||||
default = pkgsFor.${system}.mkShell {
|
||||
nativeBuildInputs = with pkgsFor.${system}; [
|
||||
nim-2_2 nimble openssl.dev
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
From 29bac4cd8f28abfb9efb481d800b7c2e855d9b03 Mon Sep 17 00:00:00 2001
|
||||
From: Gabriel Cruz <gabe@gmelodie.com>
|
||||
Date: Wed, 17 Sep 2025 10:42:14 -0300
|
||||
Subject: [PATCH] disable filtering of private ip addresses
|
||||
|
||||
---
|
||||
p2p/protocol/autonatv2/autonat.go | 24 +-----------------------
|
||||
p2p/protocol/autonatv2/server.go | 9 ++++++---
|
||||
2 files changed, 7 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go
|
||||
index 24883052..00a6211f 100644
|
||||
--- a/p2p/protocol/autonatv2/autonat.go
|
||||
+++ b/p2p/protocol/autonatv2/autonat.go
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
logging "github.com/libp2p/go-libp2p/gologshim"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
- manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -180,21 +179,7 @@ func (an *AutoNAT) Close() {
|
||||
// GetReachability makes a single dial request for checking reachability for requested addresses
|
||||
func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) {
|
||||
var filteredReqs []Request
|
||||
- if !an.allowPrivateAddrs {
|
||||
- filteredReqs = make([]Request, 0, len(reqs))
|
||||
- for _, r := range reqs {
|
||||
- if manet.IsPublicAddr(r.Addr) {
|
||||
- filteredReqs = append(filteredReqs, r)
|
||||
- } else {
|
||||
- log.Error("private address in reachability check", "address", r.Addr)
|
||||
- }
|
||||
- }
|
||||
- if len(filteredReqs) == 0 {
|
||||
- return Result{}, ErrPrivateAddrs
|
||||
- }
|
||||
- } else {
|
||||
- filteredReqs = reqs
|
||||
- }
|
||||
+ filteredReqs = reqs
|
||||
an.mx.Lock()
|
||||
now := time.Now()
|
||||
var p peer.ID
|
||||
@@ -215,13 +200,6 @@ func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result,
|
||||
log.Debug("reachability check failed", "peer", p, "err", err)
|
||||
return res, fmt.Errorf("reachability check with %s failed: %w", p, err)
|
||||
}
|
||||
- // restore the correct index in case we'd filtered private addresses
|
||||
- for i, r := range reqs {
|
||||
- if r.Addr.Equal(res.Addr) {
|
||||
- res.Idx = i
|
||||
- break
|
||||
- }
|
||||
- }
|
||||
log.Debug("reachability check successful", "peer", p)
|
||||
return res, nil
|
||||
}
|
||||
diff --git a/p2p/protocol/autonatv2/server.go b/p2p/protocol/autonatv2/server.go
|
||||
index 167d3d8e..e6d1e492 100644
|
||||
--- a/p2p/protocol/autonatv2/server.go
|
||||
+++ b/p2p/protocol/autonatv2/server.go
|
||||
@@ -196,9 +197,6 @@ func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
- if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) {
|
||||
- continue
|
||||
- }
|
||||
if !as.dialerHost.Network().CanDial(p, a) {
|
||||
continue
|
||||
}
|
||||
--
|
||||
2.51.0
|
||||
|
||||
97
interop/autonatv2/go-peer/go.mod
Normal file
97
interop/autonatv2/go-peer/go.mod
Normal file
@@ -0,0 +1,97 @@
|
||||
module go-peer
|
||||
|
||||
go 1.25.1
|
||||
|
||||
require github.com/libp2p/go-libp2p v0.43.0
|
||||
replace github.com/libp2p/go-libp2p => ./go-libp2p
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.6.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.16.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/interceptor v0.1.40 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/rtp v1.8.19 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.6 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.54.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.9.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
)
|
||||
441
interop/autonatv2/go-peer/go.sum
Normal file
441
interop/autonatv2/go-peer/go.sum
Normal file
@@ -0,0 +1,441 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
|
||||
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
|
||||
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
||||
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
|
||||
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
|
||||
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
|
||||
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
|
||||
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
|
||||
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
|
||||
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
1
interop/autonatv2/go-peer/peer.id
Normal file
1
interop/autonatv2/go-peer/peer.id
Normal file
@@ -0,0 +1 @@
|
||||
12D3KooWSnUDxXeeEnerD1Wf35R5b8bjTMzdAz838aDUUY8GJPGa
|
||||
2
interop/autonatv2/go-peer/peer.key
Normal file
2
interop/autonatv2/go-peer/peer.key
Normal file
@@ -0,0 +1,2 @@
|
||||
@i
|
||||
(>%ËÁø‡®PM”ܘXE~§|# õ“ýºØ®ü\íÇØ¬åsqzïÔDSݺvöLË(±Úð…•(×
|
||||
97
interop/autonatv2/go-peer/testautonatv2.go
Normal file
97
interop/autonatv2/go-peer/testautonatv2.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
libp2p "github.com/libp2p/go-libp2p"
|
||||
crypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
privKeyFile = "peer.key"
|
||||
peerIDFile = "peer.id"
|
||||
)
|
||||
|
||||
func loadOrCreateIdentity() (crypto.PrivKey, peer.ID, error) {
|
||||
if _, err := os.Stat(privKeyFile); err == nil {
|
||||
// Load private key
|
||||
data, err := ioutil.ReadFile(privKeyFile)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to read private key: %w", err)
|
||||
}
|
||||
priv, err := crypto.UnmarshalPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to unmarshal private key: %w", err)
|
||||
}
|
||||
|
||||
// Load peer ID as string
|
||||
peerData, err := ioutil.ReadFile(peerIDFile)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to read peer ID: %w", err)
|
||||
}
|
||||
pid, err := peer.Decode(string(peerData))
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to decode peer ID: %w", err)
|
||||
}
|
||||
|
||||
return priv, pid, nil
|
||||
}
|
||||
|
||||
// Create new keypair
|
||||
priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to generate keypair: %w", err)
|
||||
}
|
||||
pid, err := peer.IDFromPublicKey(pub)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to derive peer ID: %w", err)
|
||||
}
|
||||
|
||||
// Save private key
|
||||
privBytes, err := crypto.MarshalPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(privKeyFile, privBytes, 0600); err != nil {
|
||||
return nil, "", fmt.Errorf("failed to write private key: %w", err)
|
||||
}
|
||||
|
||||
// Save peer ID in canonical string form
|
||||
if err := ioutil.WriteFile(peerIDFile, []byte(pid.String()), 0644); err != nil {
|
||||
return nil, "", fmt.Errorf("failed to write peer ID: %w", err)
|
||||
}
|
||||
|
||||
return priv, pid, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
priv, pid, err := loadOrCreateIdentity()
|
||||
if err != nil {
|
||||
log.Fatalf("Identity setup failed: %v", err)
|
||||
}
|
||||
|
||||
h, err := libp2p.New(
|
||||
libp2p.Identity(priv),
|
||||
libp2p.EnableAutoNATv2(),
|
||||
libp2p.ListenAddrStrings(
|
||||
"/ip4/0.0.0.0/tcp/4040",
|
||||
"/ip6/::/tcp/4040",
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create host: %v", err)
|
||||
}
|
||||
defer h.Close()
|
||||
|
||||
fmt.Println("Peer ID:", pid.String())
|
||||
fmt.Println("Listen addresses:", h.Addrs())
|
||||
fmt.Println("AutoNATv2 client started.")
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
4
interop/autonatv2/nim-peer/config.nims
Normal file
4
interop/autonatv2/nim-peer/config.nims
Normal file
@@ -0,0 +1,4 @@
|
||||
# begin Nimble config (version 2)
|
||||
when withDir(thisDir(), system.fileExists("nimble.paths")):
|
||||
include "nimble.paths"
|
||||
# end Nimble config
|
||||
10
interop/autonatv2/nim-peer/nim_peer.nimble
Normal file
10
interop/autonatv2/nim-peer/nim_peer.nimble
Normal file
@@ -0,0 +1,10 @@
|
||||
version = "0.1.0"
|
||||
author = "Status Research & Development Gmb"
|
||||
description = "AutoNATv2 peer for interop testing"
|
||||
license = "MIT"
|
||||
srcDir = "src"
|
||||
bin = @["nim_peer"]
|
||||
|
||||
# Dependencies
|
||||
|
||||
requires "nim >= 2.3.1", "libp2p"
|
||||
64
interop/autonatv2/nim-peer/src/nim_peer.nim
Normal file
64
interop/autonatv2/nim-peer/src/nim_peer.nim
Normal file
@@ -0,0 +1,64 @@
|
||||
import net, os, chronos, libp2p
|
||||
import libp2p/protocols/connectivity/autonatv2/service
|
||||
import libp2p/protocols/connectivity/autonatv2/types
|
||||
|
||||
proc waitForService(
|
||||
host: string, port: Port, retries: int = 20, delay: Duration = 500.milliseconds
|
||||
): Future[bool] {.async.} =
|
||||
for i in 0 ..< retries:
|
||||
try:
|
||||
var s = newSocket()
|
||||
s.connect(host, port)
|
||||
s.close()
|
||||
return true
|
||||
except OSError:
|
||||
discard
|
||||
await sleepAsync(delay)
|
||||
return false
|
||||
|
||||
proc main() {.async.} =
|
||||
if paramCount() != 1:
|
||||
quit("Usage: nim r src/nim_peer.nim <peerid>", 1)
|
||||
|
||||
# ensure go peer is started
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
let dstPeerId = PeerId.init(paramStr(1)).get()
|
||||
|
||||
var src = SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/3030").tryGet()])
|
||||
.withAutonatV2Server()
|
||||
.withAutonatV2(
|
||||
serviceConfig = AutonatV2ServiceConfig.new(scheduleInterval = Opt.some(1.seconds))
|
||||
)
|
||||
.withTcpTransport()
|
||||
.withYamux()
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(
|
||||
networkReachability: NetworkReachability, confidence: Opt[float]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
if networkReachability != NetworkReachability.Unknown and confidence.isSome() and
|
||||
confidence.get() >= 0.3:
|
||||
if not awaiter.finished:
|
||||
awaiter.complete()
|
||||
|
||||
let service = cast[AutonatV2Service](src.services[1])
|
||||
service.setStatusAndConfidenceHandler(statusAndConfidenceHandler)
|
||||
|
||||
await src.start()
|
||||
await src.connect(dstPeerId, @[MultiAddress.init("/ip4/127.0.0.1/tcp/4040").get()])
|
||||
|
||||
await awaiter
|
||||
echo service.networkReachability
|
||||
|
||||
when isMainModule:
|
||||
if waitFor(waitForService("127.0.0.1", Port(4040))):
|
||||
waitFor(main())
|
||||
else:
|
||||
quit("timeout waiting for service", 1)
|
||||
@@ -1,5 +1,5 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
FROM nimlang/nim:latest as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -7,11 +7,11 @@ COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs2 --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bullseye-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
FROM nimlang/nim:latest as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN \
|
||||
cd nim-libp2p && \
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs2 -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
|
||||
|
||||
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.11.0"
|
||||
version = "1.13.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
requires "nim >= 2.0.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7",
|
||||
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.16",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
@@ -49,12 +49,6 @@ proc tutorialToMd(filename: string) =
|
||||
task testnative, "Runs libp2p native tests":
|
||||
runTest("testnative")
|
||||
|
||||
task testdaemon, "Runs daemon tests":
|
||||
runTest("testdaemon")
|
||||
|
||||
task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")
|
||||
|
||||
|
||||
@@ -158,6 +158,11 @@ type ACMECertificateResponse* = object
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
|
||||
type ACMECertificate* = object
|
||||
rawCertificate*: string
|
||||
certificateExpiry*: DateTime
|
||||
certKeyPair*: KeyPair
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import options, sequtils, strutils, jwt, bearssl/pem
|
||||
|
||||
@@ -448,11 +453,16 @@ when defined(libp2p_autotls_support):
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
|
||||
self: ACMEApi,
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
certKeyPair: KeyPair,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
|
||||
finalize, %*{"csr": createCSR(domain, certKeyPair)}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalize, payload)
|
||||
# server responds with updated order response
|
||||
@@ -484,11 +494,13 @@ when defined(libp2p_autotls_support):
|
||||
domain: Domain,
|
||||
finalize: Uri,
|
||||
order: Uri,
|
||||
certKeyPair: KeyPair,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
|
||||
let finalizeResponse =
|
||||
await self.requestFinalize(domain, finalize, certKeyPair, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(order, key, kid, retries = retries)
|
||||
|
||||
|
||||
@@ -9,12 +9,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import uri
|
||||
import chronos, results, chronicles, stew/byteutils
|
||||
|
||||
import ./api, ./utils
|
||||
import chronicles
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
import ./api
|
||||
|
||||
export api
|
||||
|
||||
@@ -29,6 +26,11 @@ logScope:
|
||||
topics = "libp2p acme client"
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import uri
|
||||
import chronos, results, stew/byteutils
|
||||
import ../../crypto/rsa
|
||||
import ./utils
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEClient],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
@@ -59,17 +61,20 @@ when defined(libp2p_autotls_support):
|
||||
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
|
||||
|
||||
proc getCertificate*(
|
||||
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
|
||||
self: ACMEClient,
|
||||
domain: api.Domain,
|
||||
certKeyPair: KeyPair,
|
||||
challenge: ACMEChallengeResponseWrapper,
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let chalURL = parseUri(challenge.dns01.url)
|
||||
let orderURL = parseUri(challenge.order)
|
||||
let finalizeURL = parseUri(challenge.finalize)
|
||||
trace "sending challenge completed notification"
|
||||
trace "Sending challenge completed notification"
|
||||
discard await self.api.sendChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
|
||||
trace "checking for completed challenge"
|
||||
trace "Checking for completed challenge"
|
||||
let completed = await self.api.checkChallengeCompleted(
|
||||
chalURL, self.key, await self.getOrInitKid()
|
||||
)
|
||||
@@ -78,15 +83,15 @@ when defined(libp2p_autotls_support):
|
||||
ACMEError, "Failed to signal ACME server about challenge completion"
|
||||
)
|
||||
|
||||
trace "waiting for certificate to be finalized"
|
||||
trace "Waiting for certificate to be finalized"
|
||||
let finalized = await self.api.certificateFinalized(
|
||||
domain, finalizeURL, orderURL, self.key, await self.getOrInitKid()
|
||||
domain, finalizeURL, orderURL, certKeyPair, self.key, await self.getOrInitKid()
|
||||
)
|
||||
if not finalized:
|
||||
raise
|
||||
newException(ACMEError, "Failed to finalize certificate for domain " & domain)
|
||||
|
||||
trace "downloading certificate"
|
||||
trace "Downloading certificate"
|
||||
await self.api.downloadCertificate(orderURL)
|
||||
|
||||
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
|
||||
|
||||
@@ -21,19 +21,20 @@ proc new*(
|
||||
acmeServerURL: parseUri(LetsEncryptURL),
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return $self.acmeServerURL & "/acme/1234"
|
||||
when defined(libp2p_autotls_support):
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return $self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
method post*(
|
||||
self: MockACMEApi, uri: Uri, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
method get*(
|
||||
self: MockACMEApi, uri: Uri
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
result = self.mockedResponses[0]
|
||||
self.mockedResponses.delete(0)
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../transports/tls/certificate_ffi
|
||||
import ../../transports/tls/certificate
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
@@ -51,19 +52,21 @@ when defined(libp2p_autotls_support):
|
||||
ACMEError, "Unexpected error occurred while getting body bytes", exc
|
||||
)
|
||||
|
||||
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
proc createCSR*(
|
||||
domain: string, certKeyPair: KeyPair
|
||||
): string {.raises: [ACMEError].} =
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
let personalizationStr = "libp2p_autotls"
|
||||
if cert_init_drbg(
|
||||
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
|
||||
) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to initialize certCtx")
|
||||
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to generate cert key")
|
||||
# convert KeyPair to cert_key_t
|
||||
let rawSeckey: seq[byte] = certKeyPair.seckey.getRawBytes.valueOr:
|
||||
raise newException(ACMEError, "Failed to get seckey raw bytes (DER)")
|
||||
let seckeyBuffer = rawSeckey.toCertBuffer()
|
||||
if cert_new_key_t(seckeyBuffer.unsafeAddr, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to convert key pair to cert_key_t")
|
||||
|
||||
# create CSR
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
|
||||
|
||||
33
libp2p/autotls/mockservice.nim
Normal file
33
libp2p/autotls/mockservice.nim
Normal file
@@ -0,0 +1,33 @@
|
||||
when defined(libp2p_autotls_support):
|
||||
import ./service, ./acme/client, ../peeridauth/client
|
||||
|
||||
import ../crypto/crypto, ../crypto/rsa, websock/websock
|
||||
|
||||
type MockAutotlsService* = ref object of AutotlsService
|
||||
mockedCert*: TLSCertificate
|
||||
mockedKey*: TLSPrivateKey
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockAutotlsService],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
config: AutotlsConfig = AutotlsConfig.new(),
|
||||
): T =
|
||||
T(
|
||||
acmeClient:
|
||||
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
|
||||
brokerClient: PeerIDAuthClient.new(),
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
running: newAsyncEvent(),
|
||||
config: config,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
method getCertWhenReady*(
|
||||
self: MockAutotlsService
|
||||
): Future[AutotlsCert] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
AutotlsCert.new(self.mockedCert, self.mockedKey, Moment.now)
|
||||
|
||||
method setup*(self: MockAutotlsService) {.base, async.} =
|
||||
self.running.fire()
|
||||
@@ -10,19 +10,17 @@
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import net, results, json, sequtils
|
||||
|
||||
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
|
||||
import chronos, chronicles, net, results
|
||||
import chronos/apps/http/httpclient, bearssl/rand
|
||||
|
||||
import
|
||||
./acme/client,
|
||||
./utils,
|
||||
../crypto/crypto,
|
||||
../nameresolving/dnsresolver,
|
||||
../nameresolving/nameresolver,
|
||||
../peeridauth/client,
|
||||
../peerinfo,
|
||||
../switch,
|
||||
../utils/heartbeat,
|
||||
../peerinfo,
|
||||
../wire
|
||||
|
||||
logScope:
|
||||
@@ -40,6 +38,9 @@ const
|
||||
DefaultRenewCheckTime* = 1.hours
|
||||
DefaultRenewBufferTime = 1.hours
|
||||
|
||||
DefaultIssueRetries = 3
|
||||
DefaultIssueRetryTime = 1.seconds
|
||||
|
||||
AutoTLSBroker* = "registration.libp2p.direct"
|
||||
AutoTLSDNSServer* = "libp2p.direct"
|
||||
HttpOk* = 200
|
||||
@@ -53,35 +54,54 @@ type SigParam = object
|
||||
|
||||
type AutotlsCert* = ref object
|
||||
cert*: TLSCertificate
|
||||
privkey*: TLSPrivateKey
|
||||
expiry*: Moment
|
||||
|
||||
type AutotlsConfig* = ref object
|
||||
acmeServerURL*: Uri
|
||||
dnsResolver*: DnsResolver
|
||||
nameResolver*: NameResolver
|
||||
ipAddress: Opt[IpAddress]
|
||||
renewCheckTime*: Duration
|
||||
renewBufferTime*: Duration
|
||||
issueRetries*: int
|
||||
issueRetryTime*: Duration
|
||||
|
||||
type AutotlsService* = ref object of Service
|
||||
acmeClient: ACMEClient
|
||||
acmeClient*: ACMEClient
|
||||
brokerClient*: PeerIDAuthClient
|
||||
bearer*: Opt[BearerToken]
|
||||
brokerClient: PeerIDAuthClient
|
||||
cert*: Opt[AutotlsCert]
|
||||
certReady*: AsyncEvent
|
||||
config: AutotlsConfig
|
||||
running*: AsyncEvent
|
||||
config*: AutotlsConfig
|
||||
managerFut: Future[void]
|
||||
peerInfo: PeerInfo
|
||||
rng: ref HmacDrbgContext
|
||||
rng*: ref HmacDrbgContext
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc new*(T: typedesc[AutotlsCert], cert: TLSCertificate, expiry: Moment): T =
|
||||
T(cert: cert, expiry: expiry)
|
||||
import json, sequtils, bearssl/pem
|
||||
|
||||
proc getCertWhenReady*(
|
||||
import
|
||||
../crypto/rsa,
|
||||
../utils/heartbeat,
|
||||
../transports/transport,
|
||||
../utils/ipaddr,
|
||||
../transports/tcptransport,
|
||||
../nameresolving/dnsresolver
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsCert],
|
||||
cert: TLSCertificate,
|
||||
privkey: TLSPrivateKey,
|
||||
expiry: Moment,
|
||||
): T =
|
||||
T(cert: cert, privkey: privkey, expiry: expiry)
|
||||
|
||||
method getCertWhenReady*(
|
||||
self: AutotlsService
|
||||
): Future[TLSCertificate] {.async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
): Future[AutotlsCert] {.base, async: (raises: [AutoTLSError, CancelledError]).} =
|
||||
await self.certReady.wait()
|
||||
return self.cert.get.cert
|
||||
return self.cert.get
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutotlsConfig],
|
||||
@@ -90,13 +110,17 @@ when defined(libp2p_autotls_support):
|
||||
acmeServerURL: Uri = parseUri(LetsEncryptURL),
|
||||
renewCheckTime: Duration = DefaultRenewCheckTime,
|
||||
renewBufferTime: Duration = DefaultRenewBufferTime,
|
||||
issueRetries: int = DefaultIssueRetries,
|
||||
issueRetryTime: Duration = DefaultIssueRetryTime,
|
||||
): T =
|
||||
T(
|
||||
dnsResolver: DnsResolver.new(nameServers),
|
||||
nameResolver: DnsResolver.new(nameServers),
|
||||
acmeServerURL: acmeServerURL,
|
||||
ipAddress: ipAddress,
|
||||
renewCheckTime: renewCheckTime,
|
||||
renewBufferTime: renewBufferTime,
|
||||
issueRetries: issueRetries,
|
||||
issueRetryTime: issueRetryTime,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
@@ -111,6 +135,7 @@ when defined(libp2p_autotls_support):
|
||||
bearer: Opt.none(BearerToken),
|
||||
cert: Opt.none(AutotlsCert),
|
||||
certReady: newAsyncEvent(),
|
||||
running: newAsyncEvent(),
|
||||
config: config,
|
||||
managerFut: nil,
|
||||
peerInfo: nil,
|
||||
@@ -123,11 +148,13 @@ when defined(libp2p_autotls_support):
|
||||
trace "Setting up AutotlsService"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetup:
|
||||
self.peerInfo = switch.peerInfo
|
||||
if self.config.ipAddress.isNone():
|
||||
try:
|
||||
self.config.ipAddress = Opt.some(getPublicIPAddress())
|
||||
except AutoTLSError as exc:
|
||||
except ValueError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
except OSError as exc:
|
||||
error "Failed to get public IP address", err = exc.msg
|
||||
return false
|
||||
self.managerFut = self.run(switch)
|
||||
@@ -135,12 +162,14 @@ when defined(libp2p_autotls_support):
|
||||
|
||||
method issueCertificate(
|
||||
self: AutotlsService
|
||||
) {.
|
||||
): Future[bool] {.
|
||||
base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
trace "Issuing certificate"
|
||||
|
||||
assert not self.peerInfo.isNil(), "Cannot issue new certificate: peerInfo not set"
|
||||
if self.peerInfo.isNil():
|
||||
error "Cannot issue new certificate: peerInfo not set"
|
||||
return false
|
||||
|
||||
# generate autotls domain string: "*.{peerID}.libp2p.direct"
|
||||
let baseDomain =
|
||||
@@ -151,8 +180,15 @@ when defined(libp2p_autotls_support):
|
||||
|
||||
trace "Requesting ACME challenge"
|
||||
let dns01Challenge = await acmeClient.getChallenge(@[domain])
|
||||
trace "Generating key authorization"
|
||||
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
|
||||
let strMultiaddresses: seq[string] = self.peerInfo.addrs.mapIt($it)
|
||||
|
||||
let addrs = await self.peerInfo.expandAddrs()
|
||||
if addrs.len == 0:
|
||||
error "Unable to authenticate with broker: no addresses"
|
||||
return false
|
||||
|
||||
let strMultiaddresses: seq[string] = addrs.mapIt($it)
|
||||
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
|
||||
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
|
||||
|
||||
@@ -163,57 +199,86 @@ when defined(libp2p_autotls_support):
|
||||
# save bearer token for future
|
||||
self.bearer = Opt.some(bearer)
|
||||
if response.status != HttpOk:
|
||||
raise newException(
|
||||
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
)
|
||||
error "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
|
||||
debug "Broker message",
|
||||
body = bytesToString(response.body), peerinfo = self.peerInfo
|
||||
return false
|
||||
|
||||
debug "Waiting for DNS record to be set"
|
||||
let dashedIpAddr = ($self.config.ipAddress.get()).replace(".", "-")
|
||||
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
|
||||
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
|
||||
debug "Waiting for DNS record to be set", ip = ip4Domain, acme = acmeChalDomain
|
||||
let dnsSet = await checkDNSRecords(
|
||||
self.config.dnsResolver, self.config.ipAddress.get(), baseDomain, keyAuth
|
||||
self.config.nameResolver, self.config.ipAddress.get(), baseDomain, keyAuth
|
||||
)
|
||||
if not dnsSet:
|
||||
raise newException(AutoTLSError, "DNS records not set")
|
||||
error "DNS records not set"
|
||||
return false
|
||||
|
||||
debug "Notifying challenge completion to ACME and downloading cert"
|
||||
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
|
||||
trace "Notifying challenge completion to ACME and downloading cert"
|
||||
let certKeyPair = KeyPair.random(PKScheme.RSA, self.rng[]).get()
|
||||
|
||||
debug "Installing certificate"
|
||||
let certificate =
|
||||
await acmeClient.getCertificate(domain, certKeyPair, dns01Challenge)
|
||||
|
||||
let derPrivKey = certKeyPair.seckey.rsakey.getBytes.valueOr:
|
||||
raise newException(AutoTLSError, "Unable to get TLS private key")
|
||||
let pemPrivKey: string = derPrivKey.pemEncode("PRIVATE KEY")
|
||||
debug "autotls cert", pemPrivKey = pemPrivKey, cert = certificate.rawCertificate
|
||||
|
||||
trace "Installing certificate"
|
||||
let newCert =
|
||||
try:
|
||||
AutotlsCert.new(
|
||||
TLSCertificate.init(certResponse.rawCertificate),
|
||||
asMoment(certResponse.certificateExpiry),
|
||||
TLSCertificate.init(certificate.rawCertificate),
|
||||
TLSPrivateKey.init(pemPrivKey),
|
||||
asMoment(certificate.certificateExpiry),
|
||||
)
|
||||
except TLSStreamProtocolError:
|
||||
raise newException(AutoTLSError, "Could not parse downloaded certificates")
|
||||
error "Could not parse downloaded certificates"
|
||||
return false
|
||||
self.cert = Opt.some(newCert)
|
||||
self.certReady.fire()
|
||||
debug "Certificate installed"
|
||||
trace "Certificate installed"
|
||||
true
|
||||
|
||||
proc hasTcpStarted(switch: Switch): bool =
|
||||
switch.transports.filterIt(it of TcpTransport and it.running).len == 0
|
||||
|
||||
proc tryIssueCertificate(self: AutotlsService) {.async: (raises: [CancelledError]).} =
|
||||
for _ in 0 ..< self.config.issueRetries:
|
||||
try:
|
||||
if await self.issueCertificate():
|
||||
return
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
await sleepAsync(self.config.issueRetryTime)
|
||||
error "Failed to issue certificate"
|
||||
|
||||
method run*(
|
||||
self: AutotlsService, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting Autotls management"
|
||||
self.running.fire()
|
||||
self.peerInfo = switch.peerInfo
|
||||
|
||||
# ensure that there's at least one TcpTransport running
|
||||
# for communicating with autotls broker
|
||||
if switch.hasTcpStarted():
|
||||
error "Could not find a running TcpTransport in switch"
|
||||
return
|
||||
|
||||
heartbeat "Certificate Management", self.config.renewCheckTime:
|
||||
if self.cert.isNone():
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to issue certificate", err = exc.msg
|
||||
break
|
||||
await self.tryIssueCertificate()
|
||||
|
||||
# AutotlsService will renew the cert 1h before it expires
|
||||
let cert = self.cert.get
|
||||
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
|
||||
if waitTime <= self.config.renewBufferTime:
|
||||
try:
|
||||
await self.issueCertificate()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Failed to renew certificate", err = exc.msg
|
||||
break
|
||||
await self.tryIssueCertificate()
|
||||
|
||||
method stop*(
|
||||
self: AutotlsService, switch: Switch
|
||||
|
||||
@@ -9,57 +9,30 @@
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import chronos
|
||||
import chronos, chronicles
|
||||
import ../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p utils"
|
||||
|
||||
const
|
||||
DefaultDnsRetries = 10
|
||||
DefaultDnsRetries = 3
|
||||
DefaultDnsRetryTime = 1.seconds
|
||||
|
||||
type AutoTLSError* = object of LPError
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
import net, strutils
|
||||
import strutils
|
||||
from times import DateTime, toTime, toUnix
|
||||
import stew/base36, chronicles
|
||||
import stew/base36
|
||||
import
|
||||
../peerid,
|
||||
../multihash,
|
||||
../cid,
|
||||
../multicodec,
|
||||
../nameresolving/dnsresolver,
|
||||
../nameresolving/nameresolver,
|
||||
./acme/client
|
||||
|
||||
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
# This is so that we don't need to catch Exceptions directly
|
||||
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
|
||||
try:
|
||||
return getPrimaryIPAddr()
|
||||
except Exception as exc:
|
||||
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
|
||||
|
||||
proc isIPv4*(ip: IpAddress): bool =
|
||||
ip.family == IpAddressFamily.IPv4
|
||||
|
||||
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
|
||||
let ip = $ip
|
||||
try:
|
||||
not (
|
||||
ip.startsWith("10.") or
|
||||
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
|
||||
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(AutoTLSError, "Failed to parse IP address", exc)
|
||||
|
||||
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
|
||||
let ip = checkedGetPrimaryIPAddr()
|
||||
if not ip.isIPv4():
|
||||
raise newException(AutoTLSError, "Host does not have an IPv4 address")
|
||||
if not ip.isPublic():
|
||||
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
|
||||
return ip
|
||||
|
||||
proc asMoment*(dt: DateTime): Moment =
|
||||
let unixTime: int64 = dt.toTime.toUnix
|
||||
return Moment.init(unixTime, Second)
|
||||
@@ -78,7 +51,7 @@ when defined(libp2p_autotls_support):
|
||||
return Base36.encode(cidResult.get().data.buffer)
|
||||
|
||||
proc checkDNSRecords*(
|
||||
dnsResolver: DnsResolver,
|
||||
nameResolver: NameResolver,
|
||||
ipAddress: IpAddress,
|
||||
baseDomain: api.Domain,
|
||||
keyAuth: KeyAuthorization,
|
||||
@@ -95,9 +68,9 @@ when defined(libp2p_autotls_support):
|
||||
var txt: seq[string]
|
||||
var ip4: seq[TransportAddress]
|
||||
for _ in 0 .. retries:
|
||||
txt = await dnsResolver.resolveTxt(acmeChalDomain)
|
||||
txt = await nameResolver.resolveTxt(acmeChalDomain)
|
||||
try:
|
||||
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
|
||||
ip4 = await nameResolver.resolveIp(ip4Domain, 0.Port)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
|
||||
@@ -15,7 +15,7 @@ runnableExamples:
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, tables, chronos, chronicles, sequtils, uri
|
||||
import options, tables, chronos, chronicles, sequtils
|
||||
import
|
||||
switch,
|
||||
peerid,
|
||||
@@ -26,7 +26,15 @@ import
|
||||
transports/[transport, tcptransport, wstransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
protocols/connectivity/[
|
||||
autonat/server,
|
||||
autonatv2/server,
|
||||
autonatv2/service,
|
||||
autonatv2/client,
|
||||
relay/relay,
|
||||
relay/client,
|
||||
relay/rtransport,
|
||||
],
|
||||
connmanager,
|
||||
upgrademngrs/muxedupgrade,
|
||||
observedaddrmanager,
|
||||
@@ -43,9 +51,16 @@ export
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.deprecated: "Use TransportBuilder instead".} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
|
||||
TransportBuilder* {.public.} =
|
||||
proc(config: TransportConfig): Transport {.gcsafe, raises: [].}
|
||||
|
||||
TransportConfig* = ref object
|
||||
upgr*: Upgrade
|
||||
privateKey*: PrivateKey
|
||||
autotls*: AutotlsService
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise
|
||||
@@ -55,7 +70,7 @@ type
|
||||
addresses: seq[MultiAddress]
|
||||
secureManagers: seq[SecureProtocol]
|
||||
muxers: seq[MuxerProvider]
|
||||
transports: seq[TransportProvider]
|
||||
transports: seq[TransportBuilder]
|
||||
rng: ref HmacDrbgContext
|
||||
maxConnections: int
|
||||
maxIn: int
|
||||
@@ -67,6 +82,9 @@ type
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
autonatV2ServerConfig: Opt[AutonatV2Config]
|
||||
autonatV2Client: AutonatV2Client
|
||||
autonatV2ServiceConfig: AutonatV2ServiceConfig
|
||||
autotls: AutotlsService
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
@@ -152,28 +170,42 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b
|
||||
|
||||
proc withTransport*(
|
||||
b: SwitchBuilder, prov: TransportProvider
|
||||
b: SwitchBuilder, prov: TransportBuilder
|
||||
): SwitchBuilder {.public.} =
|
||||
## Use a custom transport
|
||||
runnableExamples:
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(
|
||||
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
|
||||
): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
proc(config: TransportConfig): Transport =
|
||||
TcpTransport.new(flags, config.upgr)
|
||||
)
|
||||
.build()
|
||||
b.transports.add(prov)
|
||||
b
|
||||
|
||||
proc withTransport*(
|
||||
b: SwitchBuilder, prov: TransportProvider
|
||||
): SwitchBuilder {.deprecated: "Use TransportBuilder instead".} =
|
||||
## Use a custom transport
|
||||
runnableExamples:
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
let tBuilder: TransportBuilder = proc(config: TransportConfig): Transport =
|
||||
prov(config.upgr, config.privateKey)
|
||||
b.withTransport(tBuilder)
|
||||
|
||||
proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
proc(config: TransportConfig): Transport =
|
||||
TcpTransport.new(flags, config.upgr)
|
||||
)
|
||||
|
||||
proc withWsTransport*(
|
||||
@@ -184,8 +216,10 @@ proc withWsTransport*(
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
|
||||
proc(config: TransportConfig): Transport =
|
||||
WsTransport.new(
|
||||
config.upgr, tlsPrivateKey, tlsCertificate, config.autotls, tlsFlags, flags
|
||||
)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
@@ -193,14 +227,14 @@ when defined(libp2p_quic_support):
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
proc(config: TransportConfig): Transport =
|
||||
QuicTransport.new(config.upgr, config.privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
|
||||
MemoryTransport.new(upgr)
|
||||
proc(config: TransportConfig): Transport =
|
||||
MemoryTransport.new(config.upgr)
|
||||
)
|
||||
|
||||
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
|
||||
@@ -257,6 +291,19 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.autonat = true
|
||||
b
|
||||
|
||||
proc withAutonatV2Server*(
|
||||
b: SwitchBuilder, config: AutonatV2Config = AutonatV2Config.new()
|
||||
): SwitchBuilder =
|
||||
b.autonatV2ServerConfig = Opt.some(config)
|
||||
b
|
||||
|
||||
proc withAutonatV2*(
|
||||
b: SwitchBuilder, serviceConfig = AutonatV2ServiceConfig.new()
|
||||
): SwitchBuilder =
|
||||
b.autonatV2Client = AutonatV2Client.new(b.rng)
|
||||
b.autonatV2ServiceConfig = serviceConfig
|
||||
b
|
||||
|
||||
when defined(libp2p_autotls_support):
|
||||
proc withAutotls*(
|
||||
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
|
||||
@@ -321,7 +368,11 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade, seckey, b.autotls))
|
||||
transports.add(
|
||||
tProvider(
|
||||
TransportConfig(upgr: muxedUpgrade, privateKey: seckey, autotls: b.autotls)
|
||||
)
|
||||
)
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
@@ -339,6 +390,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
if b.enableWildcardResolver:
|
||||
b.services.insert(WildcardAddressResolverService.new(), 0)
|
||||
|
||||
if not isNil(b.autonatV2Client):
|
||||
b.services.add(
|
||||
AutonatV2Service.new(
|
||||
b.rng, client = b.autonatV2Client, config = b.autonatV2ServiceConfig
|
||||
)
|
||||
)
|
||||
|
||||
let switch = newSwitch(
|
||||
peerInfo = peerInfo,
|
||||
transports = transports,
|
||||
@@ -352,9 +410,15 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
|
||||
switch.mount(identify)
|
||||
|
||||
if not isNil(b.autonatV2Client):
|
||||
b.autonatV2Client.setup(switch)
|
||||
switch.mount(b.autonatV2Client)
|
||||
|
||||
b.autonatV2ServerConfig.withValue(config):
|
||||
switch.mount(AutonatV2.new(switch, config = config))
|
||||
|
||||
if b.autonat:
|
||||
let autonat = Autonat.new(switch)
|
||||
switch.mount(autonat)
|
||||
switch.mount(Autonat.new(switch))
|
||||
|
||||
if not isNil(b.circuitRelay):
|
||||
if b.circuitRelay of RelayClient:
|
||||
@@ -368,13 +432,78 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
|
||||
return switch
|
||||
|
||||
proc newStandardSwitch*(
|
||||
type TransportType* {.pure.} = enum
|
||||
QUIC
|
||||
TCP
|
||||
Memory
|
||||
|
||||
proc newStandardSwitchBuilder*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
|
||||
transport: TransportType = TransportType.TCP,
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000,
|
||||
): SwitchBuilder {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withSignedPeerRecord(sendSignedPeerRecord)
|
||||
.withMaxConnections(maxConnections)
|
||||
.withMaxIn(maxIn)
|
||||
.withMaxOut(maxOut)
|
||||
.withMaxConnsPerPeer(maxConnsPerPeer)
|
||||
.withPeerStore(capacity = peerStoreCapacity)
|
||||
.withNameResolver(nameResolver)
|
||||
.withNoise()
|
||||
|
||||
var addrs =
|
||||
when addrs is MultiAddress:
|
||||
@[addrs]
|
||||
else:
|
||||
addrs
|
||||
|
||||
case transport
|
||||
of TransportType.QUIC:
|
||||
when defined(libp2p_quic_support):
|
||||
if addrs.len == 0:
|
||||
addrs = @[MultiAddress.init("/ip4/0.0.0.0/udp/0/quic-v1").tryGet()]
|
||||
b = b.withQuicTransport().withAddresses(addrs)
|
||||
else:
|
||||
raiseAssert "QUIC not supported in this build"
|
||||
of TransportType.TCP:
|
||||
if addrs.len == 0:
|
||||
addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()]
|
||||
b = b.withTcpTransport(transportFlags).withAddresses(addrs).withMplex(
|
||||
inTimeout, outTimeout
|
||||
)
|
||||
of TransportType.Memory:
|
||||
if addrs.len == 0:
|
||||
addrs = @[MultiAddress.init(MemoryAutoAddress).tryGet()]
|
||||
b = b.withMemoryTransport().withAddresses(addrs).withMplex(inTimeout, outTimeout)
|
||||
|
||||
privKey.withValue(pkey):
|
||||
b = b.withPrivateKey(pkey)
|
||||
|
||||
b
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
|
||||
transport: TransportType = TransportType.TCP,
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
@@ -385,28 +514,21 @@ proc newStandardSwitch*(
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000,
|
||||
): Switch {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
let addrs =
|
||||
when addrs is MultiAddress:
|
||||
@[addrs]
|
||||
else:
|
||||
addrs
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
.withAddresses(addrs)
|
||||
.withRng(rng)
|
||||
.withSignedPeerRecord(sendSignedPeerRecord)
|
||||
.withMaxConnections(maxConnections)
|
||||
.withMaxIn(maxIn)
|
||||
.withMaxOut(maxOut)
|
||||
.withMaxConnsPerPeer(maxConnsPerPeer)
|
||||
.withPeerStore(capacity = peerStoreCapacity)
|
||||
.withMplex(inTimeout, outTimeout)
|
||||
.withTcpTransport(transportFlags)
|
||||
.withNameResolver(nameResolver)
|
||||
.withNoise()
|
||||
|
||||
privKey.withValue(pkey):
|
||||
b = b.withPrivateKey(pkey)
|
||||
|
||||
b.build()
|
||||
newStandardSwitchBuilder(
|
||||
privKey = privKey,
|
||||
addrs = addrs,
|
||||
transport = transport,
|
||||
transportFlags = transportFlags,
|
||||
rng = rng,
|
||||
secureManagers = secureManagers,
|
||||
inTimeout = inTimeout,
|
||||
outTimeout = outTimeout,
|
||||
maxConnections = maxConnections,
|
||||
maxIn = maxIn,
|
||||
maxOut = maxOut,
|
||||
maxConnsPerPeer = maxConnsPerPeer,
|
||||
nameResolver = nameResolver,
|
||||
sendSignedPeerRecord = sendSignedPeerRecord,
|
||||
peerStoreCapacity = peerStoreCapacity,
|
||||
)
|
||||
.build()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,156 +0,0 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
## This module implements Pool of StreamTransport.
|
||||
import chronos
|
||||
|
||||
const DefaultPoolSize* = 8 ## Default pool size
|
||||
|
||||
type
|
||||
ConnectionFlags = enum
|
||||
None
|
||||
Busy
|
||||
|
||||
PoolItem = object
|
||||
transp*: StreamTransport
|
||||
flags*: set[ConnectionFlags]
|
||||
|
||||
PoolState = enum
|
||||
Connecting
|
||||
Connected
|
||||
Closing
|
||||
Closed
|
||||
|
||||
TransportPool* = ref object ## Transports pool object
|
||||
transports: seq[PoolItem]
|
||||
busyCount: int
|
||||
state: PoolState
|
||||
bufferSize: int
|
||||
event: AsyncEvent
|
||||
|
||||
TransportPoolError* = object of AsyncError
|
||||
|
||||
proc waitAll[T](futs: seq[Future[T]]): Future[void] =
|
||||
## Performs waiting for all Future[T].
|
||||
var counter = len(futs)
|
||||
var retFuture = newFuture[void]("connpool.waitAllConnections")
|
||||
proc cb(udata: pointer) =
|
||||
dec(counter)
|
||||
if counter == 0:
|
||||
retFuture.complete()
|
||||
|
||||
for fut in futs:
|
||||
fut.addCallback(cb)
|
||||
return retFuture
|
||||
|
||||
proc newPool*(
|
||||
address: TransportAddress,
|
||||
poolsize: int = DefaultPoolSize,
|
||||
bufferSize = DefaultStreamBufferSize,
|
||||
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
|
||||
## Establish pool of connections to address ``address`` with size
|
||||
## ``poolsize``.
|
||||
var pool = new TransportPool
|
||||
pool.bufferSize = bufferSize
|
||||
pool.transports = newSeq[PoolItem](poolsize)
|
||||
var conns = newSeq[Future[StreamTransport]](poolsize)
|
||||
pool.state = Connecting
|
||||
for i in 0 ..< poolsize:
|
||||
conns[i] = connect(address, bufferSize)
|
||||
# Waiting for all connections to be established.
|
||||
await waitAll(conns)
|
||||
# Checking connections and preparing pool.
|
||||
for i in 0 ..< poolsize:
|
||||
if conns[i].failed:
|
||||
raise conns[i].error
|
||||
else:
|
||||
let transp = conns[i].read()
|
||||
let item = PoolItem(transp: transp)
|
||||
pool.transports[i] = item
|
||||
# Setup available connections event
|
||||
pool.event = newAsyncEvent()
|
||||
pool.state = Connected
|
||||
result = pool
|
||||
|
||||
proc acquire*(
|
||||
pool: TransportPool
|
||||
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
|
||||
## Acquire non-busy connection from pool ``pool``.
|
||||
var transp: StreamTransport
|
||||
if pool.state in {Connected}:
|
||||
while true:
|
||||
if pool.busyCount < len(pool.transports):
|
||||
for conn in pool.transports.mitems():
|
||||
if Busy notin conn.flags:
|
||||
conn.flags.incl(Busy)
|
||||
inc(pool.busyCount)
|
||||
transp = conn.transp
|
||||
break
|
||||
else:
|
||||
await pool.event.wait()
|
||||
pool.event.clear()
|
||||
|
||||
if not isNil(transp):
|
||||
break
|
||||
else:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
result = transp
|
||||
|
||||
proc release*(
|
||||
pool: TransportPool, transp: StreamTransport
|
||||
) {.async: (raises: [TransportPoolError]).} =
|
||||
## Release connection ``transp`` back to pool ``pool``.
|
||||
if pool.state in {Connected, Closing}:
|
||||
var found = false
|
||||
for conn in pool.transports.mitems():
|
||||
if conn.transp == transp:
|
||||
conn.flags.excl(Busy)
|
||||
dec(pool.busyCount)
|
||||
pool.event.fire()
|
||||
found = true
|
||||
break
|
||||
if not found:
|
||||
raise newException(TransportPoolError, "Transport not bound to pool!")
|
||||
else:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc join*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Waiting for all connection to become available.
|
||||
if pool.state in {Connected, Closing}:
|
||||
while true:
|
||||
if pool.busyCount == 0:
|
||||
break
|
||||
else:
|
||||
await pool.event.wait()
|
||||
pool.event.clear()
|
||||
elif pool.state == Connecting:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc close*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Closes transports pool ``pool`` and release all resources.
|
||||
if pool.state == Connected:
|
||||
pool.state = Closing
|
||||
# Waiting for all transports to become available.
|
||||
await pool.join()
|
||||
# Closing all transports
|
||||
var pending = newSeq[Future[void]](len(pool.transports))
|
||||
for i in 0 ..< len(pool.transports):
|
||||
let transp = pool.transports[i].transp
|
||||
transp.close()
|
||||
pending[i] = transp.join()
|
||||
# Waiting for all transports to be closed
|
||||
await waitAll(pending)
|
||||
# Mark pool as `Closed`.
|
||||
pool.state = Closed
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import chronos
|
||||
import results
|
||||
import peerid, stream/connection, transports/transport
|
||||
import peerid, stream/connection, transports/transport, muxers/muxer
|
||||
|
||||
export results
|
||||
|
||||
@@ -49,6 +49,22 @@ method dial*(
|
||||
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method dialAndUpgrade*(
|
||||
self: Dial,
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
addrs: MultiAddress,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.base, async: (raises: [CancelledError]).} =
|
||||
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
|
||||
|
||||
method dialAndUpgrade*(
|
||||
self: Dial, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
|
||||
): Future[Muxer] {.
|
||||
base, async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
@@ -65,6 +81,11 @@ method dial*(
|
||||
method addTransport*(self: Dial, transport: Transport) {.base.} =
|
||||
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
|
||||
|
||||
method negotiateStream*(
|
||||
self: Dial, conn: Connection, protos: seq[string]
|
||||
): Future[Connection] {.base, async: (raises: [CatchableError]).} =
|
||||
doAssert(false, "[Dial.negotiateStream] abstract method not implemented!")
|
||||
|
||||
method tryDial*(
|
||||
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.
|
||||
|
||||
@@ -43,20 +43,20 @@ type Dialer* = ref object of Dial
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade(
|
||||
method dialAndUpgrade*(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
addrs: MultiAddress,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async: (raises: [CancelledError]).} =
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
|
||||
if transport.handles(addrs): # check if it can dial it
|
||||
trace "Dialing address", addrs, peerId = peerId.get(default(PeerId)), hostname
|
||||
let dialed =
|
||||
try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address, peerId)
|
||||
await transport.dial(hostname, addrs, peerId)
|
||||
except CancelledError as exc:
|
||||
trace "Dialing canceled",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
@@ -104,12 +104,13 @@ proc expandDnsAddr(
|
||||
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
if not DNSADDR.matchPartial(address):
|
||||
if not DNS.matchPartial(address):
|
||||
return @[(address, peerId)]
|
||||
if isNil(self.nameResolver):
|
||||
info "Can't resolve DNSADDR without NameResolver", ma = address
|
||||
return @[]
|
||||
|
||||
trace "Start trying to resolve addresses"
|
||||
let
|
||||
toResolve =
|
||||
if peerId.isSome:
|
||||
@@ -121,6 +122,9 @@ proc expandDnsAddr(
|
||||
address
|
||||
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
|
||||
|
||||
debug "resolved addresses",
|
||||
originalAddresses = toResolve, resolvedAddresses = resolved
|
||||
|
||||
for resolvedAddress in resolved:
|
||||
let lastPart = resolvedAddress[^1].tryGet()
|
||||
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||
@@ -135,7 +139,7 @@ proc expandDnsAddr(
|
||||
else:
|
||||
result.add((resolvedAddress, peerId))
|
||||
|
||||
proc dialAndUpgrade(
|
||||
method dialAndUpgrade*(
|
||||
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
|
||||
): Future[Muxer] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
@@ -145,7 +149,6 @@ proc dialAndUpgrade(
|
||||
for rawAddress in addrs:
|
||||
# resolve potential dnsaddr
|
||||
let addresses = await self.expandDnsAddr(peerId, rawAddress)
|
||||
|
||||
for (expandedAddress, addrPeerId) in addresses:
|
||||
# DNS resolution
|
||||
let
|
||||
@@ -156,6 +159,11 @@ proc dialAndUpgrade(
|
||||
else:
|
||||
await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
debug "Expanded address and hostname",
|
||||
expandedAddress = expandedAddress,
|
||||
hostname = hostname,
|
||||
resolvedAddresses = resolvedAddresses
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
@@ -276,7 +284,7 @@ method connect*(
|
||||
return
|
||||
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
|
||||
|
||||
proc negotiateStream(
|
||||
method negotiateStream*(
|
||||
self: Dialer, conn: Connection, protos: seq[string]
|
||||
): Future[Connection] {.async: (raises: [CatchableError]).} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
@@ -284,7 +292,6 @@ proc negotiateStream(
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
method tryDial*(
|
||||
|
||||
@@ -159,7 +159,7 @@ proc stop*(query: DiscoveryQuery) =
|
||||
query.finished = true
|
||||
for r in query.futs:
|
||||
if not r.finished():
|
||||
r.cancel()
|
||||
r.cancelSoon()
|
||||
|
||||
proc stop*(dm: DiscoveryManager) =
|
||||
for q in dm.queries:
|
||||
@@ -167,7 +167,7 @@ proc stop*(dm: DiscoveryManager) =
|
||||
for i in dm.interfaces:
|
||||
if isNil(i.advertiseLoop):
|
||||
continue
|
||||
i.advertiseLoop.cancel()
|
||||
i.advertiseLoop.cancelSoon()
|
||||
|
||||
proc getPeer*(
|
||||
query: DiscoveryQuery
|
||||
@@ -179,7 +179,7 @@ proc getPeer*(
|
||||
try:
|
||||
await getter or allFinished(query.futs)
|
||||
except CancelledError as exc:
|
||||
getter.cancel()
|
||||
getter.cancelSoon()
|
||||
raise exc
|
||||
|
||||
if not finished(getter):
|
||||
|
||||
@@ -27,7 +27,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
quote:
|
||||
for res in `futs`:
|
||||
if res.failed:
|
||||
let exc = res.readError()
|
||||
let exc = res.error
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details",
|
||||
error = exc.name
|
||||
@@ -37,7 +37,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
for res in `futs`:
|
||||
block check:
|
||||
if res.failed:
|
||||
let exc = res.readError()
|
||||
let exc = res.error
|
||||
for i in 0 ..< `nexclude`:
|
||||
if exc of `exclude`[i]:
|
||||
trace "A future has failed", error = exc.name, description = exc.msg
|
||||
|
||||
@@ -843,6 +843,14 @@ proc init*(
|
||||
res.data.finish()
|
||||
ok(res)
|
||||
|
||||
proc getPart*(ma: MultiAddress, codec: MultiCodec): MaResult[MultiAddress] =
|
||||
## Returns the first multiaddress in ``value`` with codec ``codec``
|
||||
for part in ma:
|
||||
let part = ?part
|
||||
if codec == ?part.protoCode:
|
||||
return ok(part)
|
||||
err("no such codec in multiaddress")
|
||||
|
||||
proc getProtocol(name: string): MAProtocol {.inline.} =
|
||||
let mc = MultiCodec.codec(name)
|
||||
if mc != InvalidMultiCodec:
|
||||
@@ -1119,3 +1127,32 @@ proc getRepeatedField*(
|
||||
err(ProtoError.IncorrectBlob)
|
||||
else:
|
||||
ok(true)
|
||||
|
||||
proc areAddrsConsistent*(a, b: MultiAddress): bool =
|
||||
## Checks if two multiaddresses have the same protocol stack.
|
||||
let protosA = a.protocols().get()
|
||||
let protosB = b.protocols().get()
|
||||
if protosA.len != protosB.len:
|
||||
return false
|
||||
|
||||
for idx in 0 ..< protosA.len:
|
||||
let protoA = protosA[idx]
|
||||
let protoB = protosB[idx]
|
||||
|
||||
if protoA != protoB:
|
||||
if idx == 0:
|
||||
# allow DNS ↔ IP at the first component
|
||||
if protoB == multiCodec("dns") or protoB == multiCodec("dnsaddr"):
|
||||
if not (protoA == multiCodec("ip4") or protoA == multiCodec("ip6")):
|
||||
return false
|
||||
elif protoB == multiCodec("dns4"):
|
||||
if protoA != multiCodec("ip4"):
|
||||
return false
|
||||
elif protoB == multiCodec("dns6"):
|
||||
if protoA != multiCodec("ip6"):
|
||||
return false
|
||||
else:
|
||||
return false
|
||||
else:
|
||||
return false
|
||||
true
|
||||
|
||||
@@ -249,11 +249,7 @@ proc addHandler*[E](
|
||||
m.handlers.add(HandlerHolder(protos: @[codec], protocol: protocol, match: matcher))
|
||||
|
||||
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([])`
|
||||
# TODO https://github.com/nim-lang/Nim/issues/23445
|
||||
var futs = newSeqOfCap[Future[void].Raising([CancelledError])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.start()
|
||||
let futs = m.handlers.mapIt(it.protocol.start())
|
||||
try:
|
||||
await allFutures(futs)
|
||||
for fut in futs:
|
||||
@@ -273,10 +269,7 @@ proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
raise exc
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
|
||||
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.stop()
|
||||
let futs = m.handlers.mapIt(it.protocol.stop())
|
||||
await noCancel allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
|
||||
@@ -150,6 +150,10 @@ method close*(s: LPChannel) {.async: (raises: []).} =
|
||||
|
||||
trace "Closed channel", s, len = s.len
|
||||
|
||||
method closeWrite*(s: LPChannel) {.async: (raises: []).} =
|
||||
## For mplex, closeWrite is the same as close - it implements half-close
|
||||
await s.close()
|
||||
|
||||
method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
@@ -95,6 +95,7 @@ proc newStreamInternal*(
|
||||
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.localAddr = m.connection.localAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
|
||||
@@ -54,6 +54,10 @@ method newStream*(
|
||||
.} =
|
||||
raiseAssert("[Muxer.newStream] abstract method not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
method setShortAgent*(m: Muxer, shortAgent: string) {.base, gcsafe.} =
|
||||
m.connection.shortAgent = shortAgent
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
await m.connection.close()
|
||||
|
||||
@@ -135,12 +135,11 @@ proc windowUpdate(
|
||||
)
|
||||
|
||||
type
|
||||
ToSend =
|
||||
tuple[
|
||||
data: seq[byte],
|
||||
sent: int,
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
||||
]
|
||||
ToSend = ref object
|
||||
data: seq[byte]
|
||||
sent: int
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError])
|
||||
|
||||
YamuxChannel* = ref object of Connection
|
||||
id: uint32
|
||||
recvWindow: int
|
||||
@@ -218,6 +217,19 @@ method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
discard
|
||||
await channel.actuallyClose()
|
||||
|
||||
method closeWrite*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## For yamux, closeWrite is the same as close - it implements half-close
|
||||
await channel.close()
|
||||
|
||||
proc clearQueues(channel: YamuxChannel, error: ref LPStreamEOFError = nil) =
|
||||
for toSend in channel.sendQueue:
|
||||
if error.isNil():
|
||||
toSend.fut.complete()
|
||||
else:
|
||||
toSend.fut.fail(error)
|
||||
channel.sendQueue = @[]
|
||||
channel.recvQueue.clear()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
@@ -227,9 +239,8 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
|
||||
trace "Reset channel"
|
||||
channel.isReset = true
|
||||
channel.remoteReset = not isLocal
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.clearQueues(newLPStreamEOFError())
|
||||
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal and not channel.isSending:
|
||||
@@ -278,6 +289,7 @@ method readOnce*(
|
||||
trace "stream is down when readOnce", channel = $channel
|
||||
newLPStreamConnDownError()
|
||||
if channel.isEof:
|
||||
channel.clearQueues()
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.isEmpty():
|
||||
channel.receivedData.clear()
|
||||
@@ -292,6 +304,7 @@ method readOnce*(
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
|
||||
channel.isEof = true
|
||||
channel.clearQueues()
|
||||
return
|
||||
0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
@@ -315,17 +328,18 @@ proc gotDataFromRemote(
|
||||
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
|
||||
channel.maxRecvWindow = maxRecvWindow
|
||||
|
||||
proc trySend(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
proc sendLoop(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.isSending:
|
||||
return
|
||||
channel.isSending = true
|
||||
defer:
|
||||
channel.isSending = false
|
||||
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
const NumBytesHeader = 12
|
||||
|
||||
while channel.sendQueue.len > 0:
|
||||
channel.sendQueue.keepItIf(not it.fut.finished())
|
||||
|
||||
if channel.sendWindow == 0:
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
@@ -337,54 +351,57 @@ proc trySend(
|
||||
|
||||
let
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
numBytesToSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninit[byte](toSend + 12)
|
||||
header = YamuxHeader.data(channel.id, toSend.uint32)
|
||||
sendBuffer = newSeqUninit[byte](NumBytesHeader + numBytesToSend)
|
||||
header = YamuxHeader.data(channel.id, numBytesToSend.uint32)
|
||||
inBuffer = 0
|
||||
|
||||
if toSend >= bytesAvailable and channel.closedLocally:
|
||||
trace "last buffer we'll sent on this channel", toSend, bytesAvailable
|
||||
if numBytesToSend >= bytesAvailable and channel.closedLocally:
|
||||
trace "last buffer we will send on this channel", numBytesToSend, bytesAvailable
|
||||
header.flags.incl({Fin})
|
||||
|
||||
sendBuffer[0 ..< 12] = header.encode()
|
||||
sendBuffer[0 ..< NumBytesHeader] = header.encode()
|
||||
|
||||
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
|
||||
while inBuffer < toSend:
|
||||
while inBuffer < numBytesToSend:
|
||||
var toSend = channel.sendQueue[0]
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
let bufferToSend = min(toSend.data.len - toSend.sent, numBytesToSend - inBuffer)
|
||||
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[
|
||||
sendBuffer.toOpenArray(NumBytesHeader, NumBytesHeader + numBytesToSend - 1)[
|
||||
inBuffer ..< (inBuffer + bufferToSend)
|
||||
] = channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
] = toSend.data.toOpenArray(toSend.sent, toSend.sent + bufferToSend - 1)
|
||||
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
|
||||
if toSend.sent >= toSend.data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(fut)
|
||||
futures.add(toSend.fut)
|
||||
channel.sendQueue.delete(0)
|
||||
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "try to send the buffer", h = $header
|
||||
channel.sendWindow.dec(toSend)
|
||||
try:
|
||||
await channel.conn.write(sendBuffer)
|
||||
channel.sendWindow.dec(inBuffer)
|
||||
except CancelledError:
|
||||
trace "cancelled sending the buffer"
|
||||
for fut in futures.items():
|
||||
fut.cancelSoon()
|
||||
await channel.reset()
|
||||
break
|
||||
## Just for compiler. This should never happen as sendLoop is started by asyncSpawn.
|
||||
## Therefore, no one owns that sendLoop's future and no one can cancel it.
|
||||
discard
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send the buffer"
|
||||
error "failed to send the buffer", description = exc.msg
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
for fut in futures:
|
||||
fut.fail(connDown)
|
||||
await channel.reset()
|
||||
break
|
||||
for fut in futures.items():
|
||||
|
||||
for fut in futures:
|
||||
fut.complete()
|
||||
|
||||
channel.activity = true
|
||||
|
||||
method write*(
|
||||
@@ -392,21 +409,29 @@ method write*(
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to yamux channel
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
var resFut = newFuture[void]("Yamux Send")
|
||||
|
||||
if channel.remoteReset:
|
||||
trace "stream is reset when write", channel = $channel
|
||||
result.fail(newLPStreamResetError())
|
||||
return result
|
||||
resFut.fail(newLPStreamResetError())
|
||||
return resFut
|
||||
|
||||
if channel.closedLocally or channel.isReset:
|
||||
result.fail(newLPStreamClosedError())
|
||||
return result
|
||||
resFut.fail(newLPStreamClosedError())
|
||||
return resFut
|
||||
|
||||
if msg.len == 0:
|
||||
result.complete()
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
resFut.complete()
|
||||
return resFut
|
||||
|
||||
channel.sendQueue.add(ToSend(data: msg, sent: 0, fut: resFut))
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
|
||||
asyncSpawn channel.sendLoop()
|
||||
|
||||
return resFut
|
||||
|
||||
proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
@@ -415,6 +440,8 @@ proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamErro
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
channel.isReset = false
|
||||
|
||||
await channel.conn.write(
|
||||
YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
@@ -488,6 +515,7 @@ proc createStream(
|
||||
stream.initStream()
|
||||
stream.peerId = m.connection.peerId
|
||||
stream.observedAddr = m.connection.observedAddr
|
||||
stream.localAddr = m.connection.localAddr
|
||||
stream.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
stream.shortAgent = m.connection.shortAgent
|
||||
@@ -502,18 +530,17 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
||||
if m.isClosed == true:
|
||||
trace "Already closed"
|
||||
return
|
||||
m.isClosed = true
|
||||
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
for (d, s, fut) in channel.sendQueue:
|
||||
fut.fail(newLPStreamEOFError())
|
||||
channel.sendQueue = @[]
|
||||
channel.clearQueues(newLPStreamEOFError())
|
||||
channel.recvWindow = 0
|
||||
channel.sendWindow = 0
|
||||
channel.closedLocally = true
|
||||
channel.isReset = true
|
||||
channel.opened = false
|
||||
channel.isClosed = true
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
try:
|
||||
@@ -523,6 +550,8 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send goAway", description = exc.msg
|
||||
await m.connection.close()
|
||||
|
||||
m.isClosed = true
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
@@ -583,8 +612,10 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninit[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
do:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
|
||||
# If we do not have a stream, likely we sent a RST and/or closed the stream
|
||||
trace "unknown stream id", id = header.streamId
|
||||
|
||||
continue
|
||||
|
||||
let channel =
|
||||
@@ -600,7 +631,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
channel.sendWindow += int(header.length)
|
||||
await channel.trySend()
|
||||
asyncSpawn channel.sendLoop()
|
||||
else:
|
||||
if header.length.int > channel.recvWindow.int:
|
||||
# check before allocating the buffer
|
||||
|
||||
@@ -52,6 +52,14 @@ func shortLog*(p: PeerInfo): auto =
|
||||
chronicles.formatIt(PeerInfo):
|
||||
shortLog(it)
|
||||
|
||||
proc expandAddrs*(
|
||||
p: PeerInfo
|
||||
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
|
||||
var addrs = p.listenAddrs
|
||||
for mapper in p.addressMappers:
|
||||
addrs = await mapper(addrs)
|
||||
addrs
|
||||
|
||||
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
|
||||
p.addrs = p.listenAddrs
|
||||
for mapper in p.addressMappers:
|
||||
@@ -85,6 +93,10 @@ proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
|
||||
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
|
||||
parseFullAddress(?MultiAddress.init(ma))
|
||||
|
||||
proc toFullAddress*(peerId: PeerId, ma: MultiAddress): MaResult[MultiAddress] =
|
||||
let peerIdPart = ?MultiAddress.init(multiCodec("p2p"), peerId.data)
|
||||
concat(ma, peerIdPart)
|
||||
|
||||
proc new*(
|
||||
p: typedesc[PeerInfo],
|
||||
key: PrivateKey,
|
||||
|
||||
@@ -214,7 +214,7 @@ proc identify*(
|
||||
info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
|
||||
if KnownLibP2PAgentsSeq.contains(shortAgent):
|
||||
knownAgent = shortAgent
|
||||
muxer.connection.setShortAgent(knownAgent)
|
||||
muxer.setShortAgent(knownAgent)
|
||||
|
||||
peerStore.updatePeerInfo(info, stream.observedAddr)
|
||||
finally:
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import ../../../switch, ../../../multiaddress, ../../../peerid
|
||||
import core
|
||||
import types
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
@@ -20,9 +20,9 @@ import
|
||||
../../../peerid,
|
||||
../../../utils/[semaphore, future],
|
||||
../../../errors
|
||||
import core
|
||||
import types
|
||||
|
||||
export core
|
||||
export types
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
@@ -105,7 +105,7 @@ proc tryDial(
|
||||
autonat.sem.release()
|
||||
for f in futs:
|
||||
if not f.finished():
|
||||
f.cancel()
|
||||
f.cancelSoon()
|
||||
|
||||
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||
let dial = msg.dial.valueOr:
|
||||
|
||||
@@ -14,11 +14,11 @@ import chronos, metrics
|
||||
import ../../../switch
|
||||
import ../../../wire
|
||||
import client
|
||||
from core import NetworkReachability, AutonatUnreachableError
|
||||
from types import NetworkReachability, AutonatUnreachableError
|
||||
import ../../../utils/heartbeat
|
||||
import ../../../crypto/crypto
|
||||
|
||||
export core.NetworkReachability
|
||||
export NetworkReachability
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonatservice"
|
||||
|
||||
@@ -58,6 +58,9 @@ type
|
||||
NotReachable
|
||||
Reachable
|
||||
|
||||
proc isReachable*(self: NetworkReachability): bool =
|
||||
self == NetworkReachability.Reachable
|
||||
|
||||
proc encode(p: AutonatPeerInfo): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
p.id.withValue(id):
|
||||
202
libp2p/protocols/connectivity/autonatv2/client.nim
Normal file
202
libp2p/protocols/connectivity/autonatv2/client.nim
Normal file
@@ -0,0 +1,202 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import results
|
||||
import chronos, chronicles, tables
|
||||
import
|
||||
../../protocol,
|
||||
../../../switch,
|
||||
../../../multiaddress,
|
||||
../../../multicodec,
|
||||
../../../peerid,
|
||||
../../../protobuf/minprotobuf,
|
||||
./types,
|
||||
./utils
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat v2 client"
|
||||
|
||||
const
|
||||
MaxAcceptedDialDataRequest* = 100 * 1024 # 100 KB
|
||||
MaxDialDataResponsePayload* = 1024
|
||||
DefaultDialBackTimeout* = 5.seconds
|
||||
|
||||
type AutonatV2Client* = ref object of LPProtocol
|
||||
dialer*: Dial
|
||||
dialBackTimeout: Duration
|
||||
rng: ref HmacDrbgContext
|
||||
expectedNonces: Table[Nonce, Opt[MultiAddress]]
|
||||
|
||||
proc handleDialBack(
|
||||
self: AutonatV2Client, conn: Connection, dialBack: DialBack
|
||||
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
|
||||
debug "Handling DialBack",
|
||||
conn = conn, localAddr = conn.localAddr, observedAddr = conn.observedAddr
|
||||
|
||||
if not self.expectedNonces.hasKey(dialBack.nonce):
|
||||
error "Not expecting this nonce", nonce = dialBack.nonce
|
||||
return
|
||||
|
||||
conn.localAddr.withValue(localAddr):
|
||||
debug "Setting expectedNonces",
|
||||
nonce = dialBack.nonce, localAddr = Opt.some(localAddr)
|
||||
self.expectedNonces[dialBack.nonce] = Opt.some(localAddr)
|
||||
else:
|
||||
error "Unable to get localAddr from connection"
|
||||
return
|
||||
|
||||
trace "Sending DialBackResponse"
|
||||
await conn.writeLp(DialBackResponse(status: DialBackStatus.Ok).encode().buffer)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2Client],
|
||||
rng: ref HmacDrbgContext,
|
||||
dialBackTimeout: Duration = DefaultDialBackTimeout,
|
||||
): T =
|
||||
let client = T(rng: rng, dialBackTimeout: dialBackTimeout)
|
||||
|
||||
# handler for DialBack messages
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let dialBack = DialBack.decode(initProtoBuffer(await conn.readLp(DialBackLpSize))).valueOr:
|
||||
trace "Unable to decode DialBack"
|
||||
return
|
||||
if not await client.handleDialBack(conn, dialBack).withTimeout(
|
||||
client.dialBackTimeout
|
||||
):
|
||||
trace "Sending DialBackResponse timed out"
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamRemoteClosedError as exc:
|
||||
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
|
||||
except LPStreamError as exc:
|
||||
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
|
||||
|
||||
client.handler = handleStream
|
||||
client.codec = $AutonatV2Codec.DialBack
|
||||
client
|
||||
|
||||
proc setup*(self: AutonatV2Client, switch: Switch) =
|
||||
self.dialer = switch.dialer
|
||||
|
||||
proc handleDialDataRequest*(
|
||||
conn: Connection, req: DialDataRequest
|
||||
): Future[DialResponse] {.
|
||||
async: (raises: [CancelledError, AutonatV2Error, LPStreamError])
|
||||
.} =
|
||||
debug "Received DialDataRequest",
|
||||
numBytes = req.numBytes, maxAcceptedNumBytes = MaxAcceptedDialDataRequest
|
||||
|
||||
if req.numBytes > MaxAcceptedDialDataRequest:
|
||||
raise newException(
|
||||
AutonatV2Error, "Rejecting DialDataRequest: numBytes is greater than the maximum"
|
||||
)
|
||||
|
||||
# send required data
|
||||
var msg = AutonatV2Msg(
|
||||
msgType: MsgType.DialDataResponse,
|
||||
dialDataResp: DialDataResponse(data: newSeq[byte](MaxDialDataResponsePayload)),
|
||||
)
|
||||
let messagesToSend =
|
||||
(req.numBytes + MaxDialDataResponsePayload - 1) div MaxDialDataResponsePayload
|
||||
for i in 0 ..< messagesToSend:
|
||||
await conn.writeLp(msg.encode().buffer)
|
||||
debug "Sending DialDataResponse", i = i, messagesToSend = messagesToSend
|
||||
|
||||
# get DialResponse
|
||||
msg = AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
|
||||
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
|
||||
|
||||
debug "Received message", msgType = msg.msgType
|
||||
if msg.msgType != MsgType.DialResponse:
|
||||
raise
|
||||
newException(AutonatV2Error, "Expecting DialResponse, but got " & $msg.msgType)
|
||||
|
||||
return msg.dialResp
|
||||
|
||||
proc checkAddrIdx(
|
||||
self: AutonatV2Client, addrIdx: AddrIdx, testAddrs: seq[MultiAddress], nonce: Nonce
|
||||
): bool {.raises: [AutonatV2Error].} =
|
||||
debug "checking addrs", addrIdx = addrIdx, testAddrs = testAddrs, nonce = nonce
|
||||
let dialBackAddrs = self.expectedNonces.getOrDefault(nonce).valueOr:
|
||||
debug "Not expecting this nonce",
|
||||
nonce = nonce, expectedNonces = self.expectedNonces
|
||||
return false
|
||||
|
||||
if addrIdx.int >= testAddrs.len:
|
||||
debug "addrIdx outside of testAddrs range",
|
||||
addrIdx = addrIdx, testAddrs = testAddrs, testAddrsLen = testAddrs.len
|
||||
return false
|
||||
|
||||
let dialRespAddrs = testAddrs[addrIdx]
|
||||
if not areAddrsConsistent(dialRespAddrs, dialBackAddrs):
|
||||
debug "Invalid addrIdx: got DialBack in another address",
|
||||
addrIdx = addrIdx, dialBackAddrs = dialBackAddrs, dialRespAddrs = dialRespAddrs
|
||||
return false
|
||||
true
|
||||
|
||||
method sendDialRequest*(
|
||||
self: AutonatV2Client, pid: PeerId, testAddrs: seq[MultiAddress]
|
||||
): Future[AutonatV2Response] {.
|
||||
base,
|
||||
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
|
||||
.} =
|
||||
## Dials peer with `pid` and requests that it tries connecting to `testAddrs`
|
||||
|
||||
let nonce = self.rng[].generate(Nonce)
|
||||
self.expectedNonces[nonce] = Opt.none(MultiAddress)
|
||||
|
||||
var dialResp: DialResponse
|
||||
try:
|
||||
let conn = await self.dialer.dial(pid, @[$AutonatV2Codec.DialRequest])
|
||||
defer:
|
||||
await conn.close()
|
||||
|
||||
# send dialRequest
|
||||
await conn.writeLp(
|
||||
AutonatV2Msg(
|
||||
msgType: MsgType.DialRequest,
|
||||
dialReq: DialRequest(addrs: testAddrs, nonce: nonce),
|
||||
).encode().buffer
|
||||
)
|
||||
let msg = AutonatV2Msg.decode(
|
||||
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
|
||||
).valueOr:
|
||||
raise newException(AutonatV2Error, "Unable to decode AutonatV2Msg")
|
||||
|
||||
dialResp =
|
||||
case msg.msgType
|
||||
of MsgType.DialResponse:
|
||||
msg.dialResp
|
||||
of MsgType.DialDataRequest:
|
||||
await conn.handleDialDataRequest(msg.dialDataReq)
|
||||
else:
|
||||
raise newException(
|
||||
AutonatV2Error,
|
||||
"Expecting DialResponse or DialDataRequest, but got " & $msg.msgType,
|
||||
)
|
||||
debug "Received DialResponse", dialResp = dialResp
|
||||
|
||||
dialResp.dialStatus.withValue(dialStatus):
|
||||
if dialStatus == DialStatus.Ok:
|
||||
dialResp.addrIdx.withValue(addrIdx):
|
||||
if not self.checkAddrIdx(addrIdx, testAddrs, nonce):
|
||||
raise newException(
|
||||
AutonatV2Error, "Invalid addrIdx " & $addrIdx & " in DialResponse"
|
||||
)
|
||||
except LPStreamRemoteClosedError as exc:
|
||||
error "Stream reset by server", description = exc.msg, peer = pid
|
||||
finally:
|
||||
# rollback any changes
|
||||
self.expectedNonces.del(nonce)
|
||||
return dialResp.asAutonatV2Response(testAddrs)
|
||||
65
libp2p/protocols/connectivity/autonatv2/mock.nim
Normal file
65
libp2p/protocols/connectivity/autonatv2/mock.nim
Normal file
@@ -0,0 +1,65 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../../../libp2p/
|
||||
[
|
||||
switch,
|
||||
muxers/muxer,
|
||||
dialer,
|
||||
multiaddress,
|
||||
multicodec,
|
||||
peerid,
|
||||
protobuf/minprotobuf,
|
||||
],
|
||||
../../protocol,
|
||||
./types,
|
||||
./server
|
||||
|
||||
type AutonatV2Mock* = ref object of LPProtocol
|
||||
config*: AutonatV2Config
|
||||
response*: ProtoBuffer
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
|
||||
): T =
|
||||
let autonatV2 = T(config: config)
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
let msg = AutonatV2Msg.decode(
|
||||
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
|
||||
).valueOr:
|
||||
return
|
||||
if msg.msgType != MsgType.DialRequest:
|
||||
return
|
||||
except LPStreamError:
|
||||
return
|
||||
|
||||
try:
|
||||
# return mocked message
|
||||
await conn.writeLp(autonatV2.response.buffer)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamRemoteClosedError:
|
||||
discard
|
||||
except LPStreamError:
|
||||
discard
|
||||
|
||||
autonatV2.handler = handleStream
|
||||
autonatV2.codec = $AutonatV2Codec.DialRequest
|
||||
autonatV2
|
||||
45
libp2p/protocols/connectivity/autonatv2/mockclient.nim
Normal file
45
libp2p/protocols/connectivity/autonatv2/mockclient.nim
Normal file
@@ -0,0 +1,45 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import ../../../peerid, ../../../multiaddress, ../../../switch
|
||||
import ./client, ./types
|
||||
|
||||
type AutonatV2ClientMock* = ref object of AutonatV2Client
|
||||
response*: AutonatV2Response
|
||||
dials*: int
|
||||
expectedDials: int
|
||||
finished*: Future[void]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2ClientMock], response: AutonatV2Response, expectedDials: int
|
||||
): T =
|
||||
return T(
|
||||
dials: 0,
|
||||
expectedDials: expectedDials,
|
||||
finished: newFuture[void](),
|
||||
response: response,
|
||||
)
|
||||
|
||||
method sendDialRequest*(
|
||||
self: AutonatV2ClientMock, pid: PeerId, testAddrs: seq[MultiAddress]
|
||||
): Future[AutonatV2Response] {.
|
||||
async: (raises: [AutonatV2Error, CancelledError, DialFailedError, LPStreamError])
|
||||
.} =
|
||||
self.dials += 1
|
||||
if self.dials == self.expectedDials:
|
||||
self.finished.complete()
|
||||
|
||||
var ans = self.response
|
||||
|
||||
ans.dialResp.addrIdx.withValue(addrIdx):
|
||||
ans.addrs = Opt.some(testAddrs[addrIdx])
|
||||
ans
|
||||
65
libp2p/protocols/connectivity/autonatv2/mockserver.nim
Normal file
65
libp2p/protocols/connectivity/autonatv2/mockserver.nim
Normal file
@@ -0,0 +1,65 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../../../libp2p/
|
||||
[
|
||||
switch,
|
||||
muxers/muxer,
|
||||
dialer,
|
||||
multiaddress,
|
||||
multicodec,
|
||||
peerid,
|
||||
protobuf/minprotobuf,
|
||||
],
|
||||
../../protocol,
|
||||
./types,
|
||||
./server
|
||||
|
||||
type AutonatV2Mock* = ref object of LPProtocol
|
||||
config*: AutonatV2Config
|
||||
response*: ProtoBuffer
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2Mock], config: AutonatV2Config = AutonatV2Config.new()
|
||||
): T =
|
||||
let autonatV2 = T(config: config)
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
let msg = AutonatV2Msg.decode(
|
||||
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
|
||||
).valueOr:
|
||||
return
|
||||
if msg.msgType != MsgType.DialRequest:
|
||||
return
|
||||
except LPStreamError:
|
||||
return
|
||||
|
||||
try:
|
||||
# return mocked message
|
||||
await conn.writeLp(autonatV2.response.buffer)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamRemoteClosedError:
|
||||
discard
|
||||
except LPStreamError:
|
||||
discard
|
||||
|
||||
autonatV2.handler = handleStream
|
||||
autonatV2.codec = $AutonatV2Codec.DialRequest
|
||||
autonatV2
|
||||
309
libp2p/protocols/connectivity/autonatv2/server.nim
Normal file
309
libp2p/protocols/connectivity/autonatv2/server.nim
Normal file
@@ -0,0 +1,309 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../../../libp2p/[
|
||||
switch,
|
||||
muxers/muxer,
|
||||
dialer,
|
||||
multiaddress,
|
||||
transports/transport,
|
||||
multicodec,
|
||||
peerid,
|
||||
protobuf/minprotobuf,
|
||||
utils/ipaddr,
|
||||
],
|
||||
../../protocol,
|
||||
./types
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat v2 server"
|
||||
|
||||
type AutonatV2Config* = object
|
||||
dialTimeout: Duration
|
||||
dialDataSize: uint64
|
||||
amplificationAttackTimeout: Duration
|
||||
allowPrivateAddresses: bool
|
||||
|
||||
type AutonatV2* = ref object of LPProtocol
|
||||
switch*: Switch
|
||||
config: AutonatV2Config
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2Config],
|
||||
dialTimeout: Duration = DefaultDialTimeout,
|
||||
dialDataSize: uint64 = DefaultDialDataSize,
|
||||
amplificationAttackTimeout: Duration = DefaultAmplificationAttackDialTimeout,
|
||||
allowPrivateAddresses: bool = false,
|
||||
): T =
|
||||
T(
|
||||
dialTimeout: dialTimeout,
|
||||
dialDataSize: dialDataSize,
|
||||
amplificationAttackTimeout: amplificationAttackTimeout,
|
||||
allowPrivateAddresses: allowPrivateAddresses,
|
||||
)
|
||||
|
||||
proc sendDialResponse(
|
||||
conn: Connection,
|
||||
status: ResponseStatus,
|
||||
addrIdx: Opt[AddrIdx] = Opt.none(AddrIdx),
|
||||
dialStatus: Opt[DialStatus] = Opt.none(DialStatus),
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
await conn.writeLp(
|
||||
AutonatV2Msg(
|
||||
msgType: MsgType.DialResponse,
|
||||
dialResp: DialResponse(status: status, addrIdx: addrIdx, dialStatus: dialStatus),
|
||||
).encode().buffer
|
||||
)
|
||||
|
||||
proc findObservedIPAddr*(
|
||||
conn: Connection, req: DialRequest
|
||||
): Future[Opt[MultiAddress]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let observedAddr = conn.observedAddr.valueOr:
|
||||
await conn.sendDialResponse(ResponseStatus.EInternalError)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
let isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
|
||||
error "Invalid observed address"
|
||||
await conn.sendDialResponse(ResponseStatus.EDialRefused)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
if isRelayed:
|
||||
error "Invalid observed address: relayed address"
|
||||
await conn.sendDialResponse(ResponseStatus.EDialRefused)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
let hostIp = observedAddr[0].valueOr:
|
||||
error "Invalid observed address"
|
||||
await conn.sendDialResponse(ResponseStatus.EInternalError)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
return Opt.some(hostIp)
|
||||
|
||||
proc dialBack(
|
||||
conn: Connection, nonce: Nonce
|
||||
): Future[DialStatus] {.
|
||||
async: (raises: [CancelledError, DialFailedError, LPStreamError])
|
||||
.} =
|
||||
try:
|
||||
# send dial back
|
||||
await conn.writeLp(DialBack(nonce: nonce).encode().buffer)
|
||||
|
||||
# receive DialBackResponse
|
||||
let dialBackResp = DialBackResponse.decode(
|
||||
initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))
|
||||
).valueOr:
|
||||
trace "DialBack failed, could not decode DialBackResponse"
|
||||
return DialStatus.EDialBackError
|
||||
except LPStreamRemoteClosedError as exc:
|
||||
# failed because of nonce error (remote reset the stream): EDialBackError
|
||||
debug "DialBack failed, remote closed the connection", description = exc.msg
|
||||
return DialStatus.EDialBackError
|
||||
|
||||
# TODO: failed because of client or server resources: EDialError
|
||||
|
||||
trace "DialBack successful"
|
||||
return DialStatus.Ok
|
||||
|
||||
proc handleDialDataResponses(
|
||||
self: AutonatV2, conn: Connection
|
||||
) {.async: (raises: [CancelledError, AutonatV2Error, LPStreamError]).} =
|
||||
var dataReceived: uint64 = 0
|
||||
|
||||
while dataReceived < self.config.dialDataSize:
|
||||
let msg = AutonatV2Msg.decode(
|
||||
initProtoBuffer(await conn.readLp(DialDataResponseLpSize))
|
||||
).valueOr:
|
||||
raise newException(AutonatV2Error, "Received malformed message")
|
||||
debug "Received message", msgType = $msg.msgType
|
||||
if msg.msgType != MsgType.DialDataResponse:
|
||||
raise
|
||||
newException(AutonatV2Error, "Expecting DialDataResponse, got " & $msg.msgType)
|
||||
let resp = msg.dialDataResp
|
||||
dataReceived += resp.data.len.uint64
|
||||
debug "received data",
|
||||
dataReceived = resp.data.len.uint64, totalDataReceived = dataReceived
|
||||
|
||||
proc amplificationAttackPrevention(
|
||||
self: AutonatV2, conn: Connection, addrIdx: AddrIdx
|
||||
): Future[bool] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
# send DialDataRequest
|
||||
await conn.writeLp(
|
||||
AutonatV2Msg(
|
||||
msgType: MsgType.DialDataRequest,
|
||||
dialDataReq: DialDataRequest(addrIdx: addrIdx, numBytes: self.config.dialDataSize),
|
||||
).encode().buffer
|
||||
)
|
||||
|
||||
# recieve DialDataResponses until we're satisfied
|
||||
try:
|
||||
await self.handleDialDataResponses(conn)
|
||||
except AutonatV2Error as exc:
|
||||
error "Amplification attack prevention failed", description = exc.msg
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
proc canDial(self: AutonatV2, addrs: MultiAddress): bool =
|
||||
let (ipv4Support, ipv6Support) = self.switch.peerInfo.listenAddrs.ipSupport()
|
||||
addrs[0].withValue(addrIp):
|
||||
if IP4.match(addrIp) and not ipv4Support:
|
||||
return false
|
||||
if IP6.match(addrIp) and not ipv6Support:
|
||||
return false
|
||||
try:
|
||||
if not self.config.allowPrivateAddresses and isPrivate($addrIp):
|
||||
return false
|
||||
except ValueError:
|
||||
warn "Unable to parse IP address, skipping", addrs = $addrIp
|
||||
return false
|
||||
for t in self.switch.transports:
|
||||
if t.handles(addrs):
|
||||
return true
|
||||
return false
|
||||
|
||||
proc forceNewConnection(
|
||||
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[Connection]] {.async: (raises: [CancelledError]).} =
|
||||
## Bypasses connManager to force a new connection to ``pid``
|
||||
## instead of reusing a preexistent one
|
||||
try:
|
||||
let mux = await self.switch.dialer.dialAndUpgrade(Opt.some(pid), addrs)
|
||||
if mux.isNil():
|
||||
return Opt.none(Connection)
|
||||
return Opt.some(
|
||||
await self.switch.dialer.negotiateStream(
|
||||
await mux.newStream(), @[$AutonatV2Codec.DialBack]
|
||||
)
|
||||
)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
return Opt.none(Connection)
|
||||
|
||||
proc chooseDialAddr(
|
||||
self: AutonatV2, pid: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[(Opt[Connection], Opt[AddrIdx])] {.async: (raises: [CancelledError]).} =
|
||||
for i, ma in addrs:
|
||||
if self.canDial(ma):
|
||||
debug "Trying to dial", chosenAddrs = ma, addrIdx = i
|
||||
let conn =
|
||||
try:
|
||||
(await (self.forceNewConnection(pid, @[ma]).wait(self.config.dialTimeout))).valueOr:
|
||||
return (Opt.none(Connection), Opt.none(AddrIdx))
|
||||
except AsyncTimeoutError:
|
||||
trace "Dial timed out"
|
||||
return (Opt.none(Connection), Opt.some(i.AddrIdx))
|
||||
return (Opt.some(conn), Opt.some(i.AddrIdx))
|
||||
return (Opt.none(Connection), Opt.none(AddrIdx))
|
||||
|
||||
proc handleDialRequest(
|
||||
self: AutonatV2, conn: Connection, req: DialRequest
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let observedIPAddr = (await conn.findObservedIPAddr(req)).valueOr:
|
||||
trace "Could not find observed IP address"
|
||||
await conn.sendDialResponse(ResponseStatus.ERequestRejected)
|
||||
return
|
||||
|
||||
let (dialBackConnOpt, addrIdxOpt) = await self.chooseDialAddr(conn.peerId, req.addrs)
|
||||
let addrIdx = addrIdxOpt.valueOr:
|
||||
trace "No dialable addresses found"
|
||||
await conn.sendDialResponse(ResponseStatus.EDialRefused)
|
||||
return
|
||||
let dialBackConn = dialBackConnOpt.valueOr:
|
||||
trace "Dial failed"
|
||||
await conn.sendDialResponse(
|
||||
ResponseStatus.Ok,
|
||||
addrIdx = Opt.some(addrIdx),
|
||||
dialStatus = Opt.some(DialStatus.EDialError),
|
||||
)
|
||||
return
|
||||
defer:
|
||||
await dialBackConn.close()
|
||||
|
||||
# if observed address for peer is not in address list to try
|
||||
# then we perform Amplification Attack Prevention
|
||||
if not ipAddrMatches(observedIPAddr, req.addrs):
|
||||
debug "Starting amplification attack prevention",
|
||||
observedIPAddr = observedIPAddr, testAddr = req.addrs[addrIdx]
|
||||
# send DialDataRequest and wait until dataReceived is enough
|
||||
if not await self.amplificationAttackPrevention(conn, addrIdx).withTimeout(
|
||||
self.config.amplificationAttackTimeout
|
||||
):
|
||||
debug "Amplification attack prevention timeout",
|
||||
timeout = self.config.amplificationAttackTimeout, peer = conn.peerId
|
||||
await conn.sendDialResponse(ResponseStatus.EDialRefused)
|
||||
return
|
||||
|
||||
debug "Sending DialBack",
|
||||
nonce = req.nonce, addrIdx = addrIdx, addr = req.addrs[addrIdx]
|
||||
|
||||
try:
|
||||
let dialStatus =
|
||||
await dialBackConn.dialBack(req.nonce).wait(self.config.dialTimeout)
|
||||
await conn.sendDialResponse(
|
||||
ResponseStatus.Ok, addrIdx = Opt.some(addrIdx), dialStatus = Opt.some(dialStatus)
|
||||
)
|
||||
except DialFailedError as exc:
|
||||
debug "DialBack failed", description = exc.msg
|
||||
await conn.sendDialResponse(
|
||||
ResponseStatus.Ok,
|
||||
addrIdx = Opt.some(addrIdx),
|
||||
dialStatus = Opt.some(DialStatus.EDialBackError),
|
||||
)
|
||||
except AsyncTimeoutError:
|
||||
debug "DialBack timeout", timeout = self.config.dialTimeout
|
||||
await conn.sendDialResponse(
|
||||
ResponseStatus.Ok,
|
||||
addrIdx = Opt.some(addrIdx),
|
||||
dialStatus = Opt.some(DialStatus.EDialBackError),
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2],
|
||||
switch: Switch,
|
||||
config: AutonatV2Config = AutonatV2Config.new(),
|
||||
): T =
|
||||
let autonatV2 = T(switch: switch, config: config)
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
await conn.close()
|
||||
|
||||
let msg =
|
||||
try:
|
||||
AutonatV2Msg.decode(initProtoBuffer(await conn.readLp(AutonatV2MsgLpSize))).valueOr:
|
||||
trace "Unable to decode AutonatV2Msg"
|
||||
return
|
||||
except LPStreamError as exc:
|
||||
debug "Could not receive AutonatV2Msg", description = exc.msg
|
||||
return
|
||||
|
||||
debug "Received message", msgType = $msg.msgType
|
||||
if msg.msgType != MsgType.DialRequest:
|
||||
debug "Expecting DialRequest", receivedMsgType = msg.msgType
|
||||
return
|
||||
|
||||
try:
|
||||
await autonatV2.handleDialRequest(conn, msg.dialReq)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamRemoteClosedError as exc:
|
||||
debug "Connection closed by peer", description = exc.msg, peer = conn.peerId
|
||||
except LPStreamError as exc:
|
||||
debug "Stream Error", description = exc.msg
|
||||
|
||||
autonatV2.handler = handleStream
|
||||
autonatV2.codec = $AutonatV2Codec.DialRequest
|
||||
autonatV2
|
||||
278
libp2p/protocols/connectivity/autonatv2/service.nim
Normal file
278
libp2p/protocols/connectivity/autonatv2/service.nim
Normal file
@@ -0,0 +1,278 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[deques, sequtils]
|
||||
import chronos, chronicles, metrics, results
|
||||
import
|
||||
../../protocol,
|
||||
../../../switch,
|
||||
../../../multiaddress,
|
||||
../../../multicodec,
|
||||
../../../peerid,
|
||||
../../../protobuf/minprotobuf,
|
||||
../../../wire,
|
||||
../../../utils/heartbeat,
|
||||
../../../crypto/crypto,
|
||||
../autonat/types,
|
||||
./types,
|
||||
./client
|
||||
|
||||
declarePublicGauge(
|
||||
libp2p_autonat_v2_reachability_confidence,
|
||||
"autonat v2 reachability confidence",
|
||||
labels = ["reachability"],
|
||||
)
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonatv2 service"
|
||||
|
||||
# needed because nim 2.0 can't do proper type assertions
|
||||
const noneDuration: Opt[Duration] = Opt.none(Duration)
|
||||
|
||||
type
|
||||
AutonatV2ServiceConfig* = object
|
||||
scheduleInterval: Opt[Duration]
|
||||
askNewConnectedPeers: bool
|
||||
numPeersToAsk: int
|
||||
maxQueueSize: int
|
||||
minConfidence: float
|
||||
enableAddressMapper: bool
|
||||
|
||||
AutonatV2Service* = ref object of Service
|
||||
config*: AutonatV2ServiceConfig
|
||||
confidence: Opt[float]
|
||||
newConnectedPeerHandler: PeerEventHandler
|
||||
statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||
addressMapper: AddressMapper
|
||||
scheduleHandle: Future[void]
|
||||
networkReachability*: NetworkReachability
|
||||
answers: Deque[NetworkReachability]
|
||||
client*: AutonatV2Client
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
StatusAndConfidenceHandler* = proc(
|
||||
networkReachability: NetworkReachability, confidence: Opt[float]
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2ServiceConfig],
|
||||
scheduleInterval: Opt[Duration] = noneDuration,
|
||||
askNewConnectedPeers = true,
|
||||
numPeersToAsk: int = 5,
|
||||
maxQueueSize: int = 10,
|
||||
minConfidence: float = 0.3,
|
||||
enableAddressMapper = true,
|
||||
): T =
|
||||
return T(
|
||||
scheduleInterval: scheduleInterval,
|
||||
askNewConnectedPeers: askNewConnectedPeers,
|
||||
numPeersToAsk: numPeersToAsk,
|
||||
maxQueueSize: maxQueueSize,
|
||||
minConfidence: minConfidence,
|
||||
enableAddressMapper: enableAddressMapper,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatV2Service],
|
||||
rng: ref HmacDrbgContext,
|
||||
client: AutonatV2Client = AutonatV2Client.new(),
|
||||
config: AutonatV2ServiceConfig = AutonatV2ServiceConfig.new(),
|
||||
): T =
|
||||
return T(
|
||||
config: config,
|
||||
confidence: Opt.none(float),
|
||||
networkReachability: Unknown,
|
||||
answers: initDeque[NetworkReachability](),
|
||||
client: client,
|
||||
rng: rng,
|
||||
)
|
||||
|
||||
proc callHandler(self: AutonatV2Service) {.async: (raises: [CancelledError]).} =
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
|
||||
proc hasEnoughIncomingSlots(switch: Switch): bool =
|
||||
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
|
||||
return switch.connManager.slotsAvailable(In) >= 2
|
||||
|
||||
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
|
||||
return switch.connManager.selectMuxer(peerId, In) != nil
|
||||
|
||||
proc handleAnswer(
|
||||
self: AutonatV2Service, ans: NetworkReachability
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
if ans == Unknown:
|
||||
return
|
||||
|
||||
let oldNetworkReachability = self.networkReachability
|
||||
let oldConfidence = self.confidence
|
||||
|
||||
if self.answers.len == self.config.maxQueueSize:
|
||||
self.answers.popFirst()
|
||||
self.answers.addLast(ans)
|
||||
|
||||
self.networkReachability = Unknown
|
||||
self.confidence = Opt.none(float)
|
||||
const reachabilityPriority = [Reachable, NotReachable]
|
||||
for reachability in reachabilityPriority:
|
||||
let confidence = self.answers.countIt(it == reachability) / self.config.maxQueueSize
|
||||
libp2p_autonat_v2_reachability_confidence.set(
|
||||
value = confidence, labelValues = [$reachability]
|
||||
)
|
||||
if self.confidence.isNone and confidence >= self.config.minConfidence:
|
||||
self.networkReachability = reachability
|
||||
self.confidence = Opt.some(confidence)
|
||||
|
||||
debug "Current status",
|
||||
currentStats = $self.networkReachability,
|
||||
confidence = $self.confidence,
|
||||
answers = self.answers
|
||||
|
||||
# Return whether anything has changed
|
||||
return
|
||||
self.networkReachability != oldNetworkReachability or
|
||||
self.confidence != oldConfidence
|
||||
|
||||
proc askPeer(
|
||||
self: AutonatV2Service, switch: Switch, peerId: PeerId
|
||||
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
peerId = $peerId
|
||||
|
||||
if doesPeerHaveIncomingConn(switch, peerId):
|
||||
return Unknown
|
||||
|
||||
if not hasEnoughIncomingSlots(switch):
|
||||
debug "No incoming slots available, not asking peer",
|
||||
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
|
||||
return Unknown
|
||||
|
||||
trace "Asking peer for reachability"
|
||||
let ans =
|
||||
try:
|
||||
let reqAddrs = switch.peerInfo.addrs
|
||||
let autonatV2Resp = await self.client.sendDialRequest(peerId, reqAddrs)
|
||||
debug "AutonatV2Response", autonatV2Resp = autonatV2Resp
|
||||
autonatV2Resp.reachability
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
debug "DialRequest stream error", description = exc.msg
|
||||
Unknown
|
||||
except DialFailedError as exc:
|
||||
debug "DialRequest dial failed", description = exc.msg
|
||||
Unknown
|
||||
except AutonatV2Error as exc:
|
||||
debug "DialRequest error", description = exc.msg
|
||||
Unknown
|
||||
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
|
||||
if hasReachabilityOrConfidenceChanged:
|
||||
await self.callHandler()
|
||||
await switch.peerInfo.update()
|
||||
return ans
|
||||
|
||||
proc askConnectedPeers(
|
||||
self: AutonatV2Service, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Asking peers for reachability"
|
||||
var peers = switch.connectedPeers(Direction.Out)
|
||||
self.rng.shuffle(peers)
|
||||
var answersFromPeers = 0
|
||||
for peer in peers:
|
||||
if answersFromPeers >= self.config.numPeersToAsk:
|
||||
break
|
||||
if not hasEnoughIncomingSlots(switch):
|
||||
debug "No incoming slots available, not asking peers",
|
||||
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
|
||||
break
|
||||
if (await askPeer(self, switch, peer)) != Unknown:
|
||||
answersFromPeers.inc()
|
||||
|
||||
proc schedule(
|
||||
service: AutonatV2Service, switch: Switch, interval: Duration
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Scheduling AutonatV2Service run", interval:
|
||||
await service.run(switch)
|
||||
|
||||
proc addressMapper(
|
||||
self: AutonatV2Service, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
|
||||
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
|
||||
if not self.networkReachability.isReachable():
|
||||
return listenAddrs
|
||||
|
||||
var addrs = newSeq[MultiAddress]()
|
||||
for listenAddr in listenAddrs:
|
||||
if listenAddr.isPublicMA() or not self.networkReachability.isReachable():
|
||||
addrs.add(listenAddr)
|
||||
else:
|
||||
addrs.add(peerStore.guessDialableAddr(listenAddr))
|
||||
return addrs
|
||||
|
||||
method setup*(
|
||||
self: AutonatV2Service, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
self.addressMapper = proc(
|
||||
listenAddrs: seq[MultiAddress]
|
||||
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
trace "Setting up AutonatV2Service"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if not hasBeenSetup:
|
||||
return hasBeenSetup
|
||||
|
||||
if self.config.askNewConnectedPeers:
|
||||
self.newConnectedPeerHandler = proc(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
discard askPeer(self, switch, peerId)
|
||||
|
||||
switch.connManager.addPeerEventHandler(
|
||||
self.newConnectedPeerHandler, PeerEventKind.Joined
|
||||
)
|
||||
|
||||
self.config.scheduleInterval.withValue(interval):
|
||||
self.scheduleHandle = schedule(self, switch, interval)
|
||||
|
||||
if self.config.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
|
||||
return hasBeenSetup
|
||||
|
||||
method run*(
|
||||
self: AutonatV2Service, switch: Switch
|
||||
) {.public, async: (raises: [CancelledError]).} =
|
||||
trace "Running AutonatV2Service"
|
||||
await askConnectedPeers(self, switch)
|
||||
|
||||
method stop*(
|
||||
self: AutonatV2Service, switch: Switch
|
||||
): Future[bool] {.public, async: (raises: [CancelledError]).} =
|
||||
trace "Stopping AutonatV2Service"
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if not hasBeenStopped:
|
||||
return hasBeenStopped
|
||||
if not isNil(self.scheduleHandle):
|
||||
self.scheduleHandle.cancelSoon()
|
||||
self.scheduleHandle = nil
|
||||
if not isNil(self.newConnectedPeerHandler):
|
||||
switch.connManager.removePeerEventHandler(
|
||||
self.newConnectedPeerHandler, PeerEventKind.Joined
|
||||
)
|
||||
if self.config.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
|
||||
await switch.peerInfo.update()
|
||||
return hasBeenStopped
|
||||
|
||||
proc setStatusAndConfidenceHandler*(
|
||||
self: AutonatV2Service, statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||
) =
|
||||
self.statusAndConfidenceHandler = statusAndConfidenceHandler
|
||||
265
libp2p/protocols/connectivity/autonatv2/types.nim
Normal file
265
libp2p/protocols/connectivity/autonatv2/types.nim
Normal file
@@ -0,0 +1,265 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import results, chronos, chronicles
|
||||
import
|
||||
../../../multiaddress, ../../../peerid, ../../../protobuf/minprotobuf, ../../../switch
|
||||
from ../autonat/types import NetworkReachability
|
||||
|
||||
export NetworkReachability
|
||||
|
||||
const
|
||||
DefaultDialTimeout*: Duration = 15.seconds
|
||||
DefaultAmplificationAttackDialTimeout*: Duration = 3.seconds
|
||||
DefaultDialDataSize*: uint64 = 50 * 1024 # 50 KiB > 50 KB
|
||||
AutonatV2MsgLpSize*: int = 1024
|
||||
DialBackLpSize*: int = 1024
|
||||
# readLp needs to receive more than 4096 bytes (since it's a DialDataResponse) + overhead
|
||||
DialDataResponseLpSize*: int = 5000
|
||||
|
||||
type
|
||||
AutonatV2Codec* {.pure.} = enum
|
||||
DialRequest = "/libp2p/autonat/2/dial-request"
|
||||
DialBack = "/libp2p/autonat/2/dial-back"
|
||||
|
||||
AutonatV2Response* = object
|
||||
reachability*: NetworkReachability
|
||||
dialResp*: DialResponse
|
||||
addrs*: Opt[MultiAddress]
|
||||
|
||||
AutonatV2Error* = object of LPError
|
||||
|
||||
Nonce* = uint64
|
||||
|
||||
AddrIdx* = uint32
|
||||
|
||||
NumBytes* = uint64
|
||||
|
||||
MsgType* {.pure.} = enum
|
||||
# DialBack and DialBackResponse are not defined as AutonatV2Msg as per the spec
|
||||
# likely because they are expected in response to some other message
|
||||
DialRequest
|
||||
DialResponse
|
||||
DialDataRequest
|
||||
DialDataResponse
|
||||
|
||||
ResponseStatus* {.pure.} = enum
|
||||
EInternalError = 0
|
||||
ERequestRejected = 100
|
||||
EDialRefused = 101
|
||||
Ok = 200
|
||||
|
||||
DialBackStatus* {.pure.} = enum
|
||||
Ok = 0
|
||||
|
||||
DialStatus* {.pure.} = enum
|
||||
Unused = 0
|
||||
EDialError = 100
|
||||
EDialBackError = 101
|
||||
Ok = 200
|
||||
|
||||
DialRequest* = object
|
||||
addrs*: seq[MultiAddress]
|
||||
nonce*: Nonce
|
||||
|
||||
DialResponse* = object
|
||||
status*: ResponseStatus
|
||||
addrIdx*: Opt[AddrIdx]
|
||||
dialStatus*: Opt[DialStatus]
|
||||
|
||||
DialBack* = object
|
||||
nonce*: Nonce
|
||||
|
||||
DialBackResponse* = object
|
||||
status*: DialBackStatus
|
||||
|
||||
DialDataRequest* = object
|
||||
addrIdx*: AddrIdx
|
||||
numBytes*: NumBytes
|
||||
|
||||
DialDataResponse* = object
|
||||
data*: seq[byte]
|
||||
|
||||
AutonatV2Msg* = object
|
||||
case msgType*: MsgType
|
||||
of MsgType.DialRequest:
|
||||
dialReq*: DialRequest
|
||||
of MsgType.DialResponse:
|
||||
dialResp*: DialResponse
|
||||
of MsgType.DialDataRequest:
|
||||
dialDataReq*: DialDataRequest
|
||||
of MsgType.DialDataResponse:
|
||||
dialDataResp*: DialDataResponse
|
||||
|
||||
# DialRequest
|
||||
proc encode*(dialReq: DialRequest): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
for ma in dialReq.addrs:
|
||||
encoded.write(1, ma.data.buffer)
|
||||
encoded.write(2, dialReq.nonce)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialRequest], pb: ProtoBuffer): Opt[T] =
|
||||
var
|
||||
addrs: seq[MultiAddress]
|
||||
nonce: Nonce
|
||||
if not ?pb.getRepeatedField(1, addrs).toOpt():
|
||||
return Opt.none(T)
|
||||
if not ?pb.getField(2, nonce).toOpt():
|
||||
return Opt.none(T)
|
||||
Opt.some(T(addrs: addrs, nonce: nonce))
|
||||
|
||||
# DialResponse
|
||||
proc encode*(dialResp: DialResponse): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
encoded.write(1, dialResp.status.uint)
|
||||
# minprotobuf casts uses float64 for fixed64 fields
|
||||
dialResp.addrIdx.withValue(addrIdx):
|
||||
encoded.write(2, addrIdx)
|
||||
dialResp.dialStatus.withValue(dialStatus):
|
||||
encoded.write(3, dialStatus.uint)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialResponse], pb: ProtoBuffer): Opt[T] =
|
||||
var
|
||||
status: uint
|
||||
addrIdx: AddrIdx
|
||||
dialStatus: uint
|
||||
|
||||
if not ?pb.getField(1, status).toOpt():
|
||||
return Opt.none(T)
|
||||
|
||||
var optAddrIdx = Opt.none(AddrIdx)
|
||||
if ?pb.getField(2, addrIdx).toOpt():
|
||||
optAddrIdx = Opt.some(addrIdx)
|
||||
|
||||
var optDialStatus = Opt.none(DialStatus)
|
||||
if ?pb.getField(3, dialStatus).toOpt():
|
||||
optDialStatus = Opt.some(cast[DialStatus](dialStatus))
|
||||
|
||||
Opt.some(
|
||||
T(
|
||||
status: cast[ResponseStatus](status),
|
||||
addrIdx: optAddrIdx,
|
||||
dialStatus: optDialStatus,
|
||||
)
|
||||
)
|
||||
|
||||
# DialBack
|
||||
proc encode*(dialBack: DialBack): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
encoded.write(1, dialBack.nonce)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialBack], pb: ProtoBuffer): Opt[T] =
|
||||
var nonce: Nonce
|
||||
if not ?pb.getField(1, nonce).toOpt():
|
||||
return Opt.none(T)
|
||||
Opt.some(T(nonce: nonce))
|
||||
|
||||
# DialBackResponse
|
||||
proc encode*(dialBackResp: DialBackResponse): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
encoded.write(1, dialBackResp.status.uint)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialBackResponse], pb: ProtoBuffer): Opt[T] =
|
||||
var status: uint
|
||||
if not ?pb.getField(1, status).toOpt():
|
||||
return Opt.none(T)
|
||||
Opt.some(T(status: cast[DialBackStatus](status)))
|
||||
|
||||
# DialDataRequest
|
||||
proc encode*(dialDataReq: DialDataRequest): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
encoded.write(1, dialDataReq.addrIdx)
|
||||
encoded.write(2, dialDataReq.numBytes)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialDataRequest], pb: ProtoBuffer): Opt[T] =
|
||||
var
|
||||
addrIdx: AddrIdx
|
||||
numBytes: NumBytes
|
||||
if not ?pb.getField(1, addrIdx).toOpt():
|
||||
return Opt.none(T)
|
||||
if not ?pb.getField(2, numBytes).toOpt():
|
||||
return Opt.none(T)
|
||||
Opt.some(T(addrIdx: addrIdx, numBytes: numBytes))
|
||||
|
||||
# DialDataResponse
|
||||
proc encode*(dialDataResp: DialDataResponse): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
encoded.write(1, dialDataResp.data)
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[DialDataResponse], pb: ProtoBuffer): Opt[T] =
|
||||
var data: seq[byte]
|
||||
if not ?pb.getField(1, data).toOpt():
|
||||
return Opt.none(T)
|
||||
Opt.some(T(data: data))
|
||||
|
||||
proc protoField(msgType: MsgType): int =
|
||||
case msgType
|
||||
of MsgType.DialRequest: 1.int
|
||||
of MsgType.DialResponse: 2.int
|
||||
of MsgType.DialDataRequest: 3.int
|
||||
of MsgType.DialDataResponse: 4.int
|
||||
|
||||
# AutonatV2Msg
|
||||
proc encode*(msg: AutonatV2Msg): ProtoBuffer =
|
||||
var encoded = initProtoBuffer()
|
||||
case msg.msgType
|
||||
of MsgType.DialRequest:
|
||||
encoded.write(MsgType.DialRequest.protoField, msg.dialReq.encode())
|
||||
of MsgType.DialResponse:
|
||||
encoded.write(MsgType.DialResponse.protoField, msg.dialResp.encode())
|
||||
of MsgType.DialDataRequest:
|
||||
encoded.write(MsgType.DialDataRequest.protoField, msg.dialDataReq.encode())
|
||||
of MsgType.DialDataResponse:
|
||||
encoded.write(MsgType.DialDataResponse.protoField, msg.dialDataResp.encode())
|
||||
encoded.finish()
|
||||
encoded
|
||||
|
||||
proc decode*(T: typedesc[AutonatV2Msg], pb: ProtoBuffer): Opt[T] =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
msg: ProtoBuffer
|
||||
|
||||
if ?pb.getField(MsgType.DialRequest.protoField, msg).toOpt():
|
||||
let dialReq = DialRequest.decode(msg).valueOr:
|
||||
return Opt.none(AutonatV2Msg)
|
||||
Opt.some(AutonatV2Msg(msgType: MsgType.DialRequest, dialReq: dialReq))
|
||||
elif ?pb.getField(MsgType.DialResponse.protoField, msg).toOpt():
|
||||
let dialResp = DialResponse.decode(msg).valueOr:
|
||||
return Opt.none(AutonatV2Msg)
|
||||
Opt.some(AutonatV2Msg(msgType: MsgType.DialResponse, dialResp: dialResp))
|
||||
elif ?pb.getField(MsgType.DialDataRequest.protoField, msg).toOpt():
|
||||
let dialDataReq = DialDataRequest.decode(msg).valueOr:
|
||||
return Opt.none(AutonatV2Msg)
|
||||
Opt.some(AutonatV2Msg(msgType: MsgType.DialDataRequest, dialDataReq: dialDataReq))
|
||||
elif ?pb.getField(MsgType.DialDataResponse.protoField, msg).toOpt():
|
||||
let dialDataResp = DialDataResponse.decode(msg).valueOr:
|
||||
return Opt.none(AutonatV2Msg)
|
||||
Opt.some(
|
||||
AutonatV2Msg(msgType: MsgType.DialDataResponse, dialDataResp: dialDataResp)
|
||||
)
|
||||
else:
|
||||
Opt.none(AutonatV2Msg)
|
||||
|
||||
# Custom `==` is needed to compare since AutonatV2Msg is a case object
|
||||
proc `==`*(a, b: AutonatV2Msg): bool =
|
||||
a.msgType == b.msgType and a.encode() == b.encode()
|
||||
46
libp2p/protocols/connectivity/autonatv2/utils.nim
Normal file
46
libp2p/protocols/connectivity/autonatv2/utils.nim
Normal file
@@ -0,0 +1,46 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import results
|
||||
import chronos
|
||||
import
|
||||
../../protocol,
|
||||
../../../switch,
|
||||
../../../multiaddress,
|
||||
../../../multicodec,
|
||||
../../../peerid,
|
||||
../../../protobuf/minprotobuf,
|
||||
./types
|
||||
|
||||
proc asNetworkReachability*(self: DialResponse): NetworkReachability =
|
||||
if self.status == EInternalError:
|
||||
return Unknown
|
||||
if self.status == ERequestRejected:
|
||||
return Unknown
|
||||
if self.status == EDialRefused:
|
||||
return Unknown
|
||||
|
||||
# if got here it means a dial was attempted
|
||||
let dialStatus = self.dialStatus.valueOr:
|
||||
return Unknown
|
||||
if dialStatus == Unused:
|
||||
return Unknown
|
||||
if dialStatus == EDialError:
|
||||
return NotReachable
|
||||
if dialStatus == EDialBackError:
|
||||
return NotReachable
|
||||
return Reachable
|
||||
|
||||
proc asAutonatV2Response*(
|
||||
self: DialResponse, testAddrs: seq[MultiAddress]
|
||||
): AutonatV2Response =
|
||||
let addrIdx = self.addrIdx.valueOr:
|
||||
return AutonatV2Response(
|
||||
reachability: self.asNetworkReachability(),
|
||||
dialResp: self,
|
||||
addrs: Opt.none(MultiAddress),
|
||||
)
|
||||
AutonatV2Response(
|
||||
reachability: self.asNetworkReachability(),
|
||||
dialResp: self,
|
||||
addrs: Opt.some(testAddrs[addrIdx]),
|
||||
)
|
||||
@@ -422,6 +422,6 @@ method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
|
||||
warn "Stopping relay without starting it"
|
||||
return fut
|
||||
r.started = false
|
||||
r.reservationLoop.cancel()
|
||||
r.reservationLoop.cancelSoon()
|
||||
r.reservationLoop = nil
|
||||
fut
|
||||
|
||||
@@ -31,7 +31,7 @@ type RelayTransport* = ref object of Transport
|
||||
|
||||
method start*(
|
||||
self: RelayTransport, ma: seq[MultiAddress]
|
||||
) {.async: (raises: [LPError, transport.TransportError]).} =
|
||||
) {.async: (raises: [LPError, transport.TransportError, CancelledError]).} =
|
||||
if self.selfRunning:
|
||||
trace "Relay transport already running"
|
||||
return
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import chronos
|
||||
|
||||
const
|
||||
IdLength* = 32 # 256-bit IDs
|
||||
k* = 20 # replication parameter
|
||||
DefaultReplic* = 20 ## replication parameter, aka `k` in the spec
|
||||
alpha* = 10 # concurrency parameter
|
||||
ttl* = 24.hours
|
||||
maxBuckets* = 256
|
||||
|
||||
const KadCodec* = "/ipfs/kad/1.0.0"
|
||||
|
||||
@@ -1,53 +1,451 @@
|
||||
import chronos
|
||||
import chronicles
|
||||
import sequtils
|
||||
import sets
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import ./xordistance
|
||||
import ./routingtable
|
||||
import ./lookupstate
|
||||
import ./requests
|
||||
import ./keys
|
||||
import ../protocol
|
||||
import ../../switch
|
||||
import ./protobuf
|
||||
import ../../switch
|
||||
import ../../multihash
|
||||
import ../../utils/heartbeat
|
||||
import std/[times, options, tables]
|
||||
import results
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht"
|
||||
|
||||
type EntryKey* = object
|
||||
data: seq[byte]
|
||||
|
||||
proc init*(T: typedesc[EntryKey], inner: seq[byte]): EntryKey {.gcsafe, raises: [].} =
|
||||
EntryKey(data: inner)
|
||||
|
||||
type EntryValue* = object
|
||||
data*: seq[byte] # public because needed for tests
|
||||
|
||||
proc init*(
|
||||
T: typedesc[EntryValue], inner: seq[byte]
|
||||
): EntryValue {.gcsafe, raises: [].} =
|
||||
EntryValue(data: inner)
|
||||
|
||||
type TimeStamp* = object
|
||||
# Currently a string, because for some reason, that's what is chosen at the protobuf level
|
||||
# TODO: convert between RFC3339 strings and use of integers (i.e. the _correct_ way)
|
||||
ts*: string # only public because needed for tests
|
||||
|
||||
type EntryRecord* = object
|
||||
value*: EntryValue # only public because needed for tests
|
||||
time*: TimeStamp # only public because needed for tests
|
||||
|
||||
proc init*(
|
||||
T: typedesc[EntryRecord], value: EntryValue, time: Option[TimeStamp]
|
||||
): EntryRecord {.gcsafe, raises: [].} =
|
||||
EntryRecord(value: value, time: time.get(TimeStamp(ts: $times.now().utc)))
|
||||
|
||||
type LocalTable* = object
|
||||
entries*: Table[EntryKey, EntryRecord] # public because needed for tests
|
||||
|
||||
proc init(self: typedesc[LocalTable]): LocalTable {.raises: [].} =
|
||||
LocalTable()
|
||||
|
||||
type EntryCandidate* = object
|
||||
key*: EntryKey
|
||||
value*: EntryValue
|
||||
|
||||
type ValidatedEntry* = object
|
||||
key: EntryKey
|
||||
value: EntryValue
|
||||
|
||||
proc init*(
|
||||
T: typedesc[ValidatedEntry], key: EntryKey, value: EntryValue
|
||||
): ValidatedEntry {.gcsafe, raises: [].} =
|
||||
ValidatedEntry(key: key, value: value)
|
||||
|
||||
type EntryValidator* = ref object of RootObj
|
||||
method isValid*(
|
||||
self: EntryValidator, key: EntryKey, val: EntryValue
|
||||
): bool {.base, raises: [], gcsafe.} =
|
||||
doAssert(false, "unimplimented base method")
|
||||
|
||||
type EntrySelector* = ref object of RootObj
|
||||
method select*(
|
||||
self: EntrySelector, cand: EntryRecord, others: seq[EntryRecord]
|
||||
): Result[EntryRecord, string] {.base, raises: [], gcsafe.} =
|
||||
doAssert(false, "EntrySelection base not implemented")
|
||||
|
||||
type KadDHT* = ref object of LPProtocol
|
||||
switch: Switch
|
||||
rng: ref HmacDrbgContext
|
||||
rtable*: RoutingTable
|
||||
maintenanceLoop: Future[void]
|
||||
dataTable*: LocalTable
|
||||
entryValidator: EntryValidator
|
||||
entrySelector: EntrySelector
|
||||
|
||||
proc insert*(
|
||||
self: var LocalTable, value: sink ValidatedEntry, time: TimeStamp
|
||||
) {.raises: [].} =
|
||||
debug "local table insertion", key = value.key.data, value = value.value.data
|
||||
self.entries[value.key] = EntryRecord(value: value.value, time: time)
|
||||
|
||||
const MaxMsgSize = 4096
|
||||
# Forward declaration
|
||||
proc findNode*(
|
||||
kad: KadDHT, targetId: Key
|
||||
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
proc sendFindNode(
|
||||
kad: KadDHT, peerId: PeerId, addrs: seq[MultiAddress], targetId: Key
|
||||
): Future[Message] {.
|
||||
async: (raises: [CancelledError, DialFailedError, ValueError, LPStreamError])
|
||||
.} =
|
||||
let conn =
|
||||
if addrs.len == 0:
|
||||
await kad.switch.dial(peerId, KadCodec)
|
||||
else:
|
||||
await kad.switch.dial(peerId, addrs, KadCodec)
|
||||
defer:
|
||||
await conn.close()
|
||||
|
||||
let msg = Message(msgType: MessageType.findNode, key: some(targetId.getBytes()))
|
||||
await conn.writeLp(msg.encode().buffer)
|
||||
|
||||
let reply = Message.decode(await conn.readLp(MaxMsgSize)).tryGet()
|
||||
if reply.msgType != MessageType.findNode:
|
||||
raise newException(ValueError, "unexpected message type in reply: " & $reply)
|
||||
|
||||
return reply
|
||||
|
||||
proc waitRepliesOrTimeouts(
|
||||
pendingFutures: Table[PeerId, Future[Message]]
|
||||
): Future[(seq[Message], seq[PeerId])] {.async: (raises: [CancelledError]).} =
|
||||
await allFutures(toSeq(pendingFutures.values))
|
||||
|
||||
var receivedReplies: seq[Message] = @[]
|
||||
var failedPeers: seq[PeerId] = @[]
|
||||
|
||||
for (peerId, replyFut) in pendingFutures.pairs:
|
||||
try:
|
||||
receivedReplies.add(await replyFut)
|
||||
except CatchableError:
|
||||
failedPeers.add(peerId)
|
||||
error "could not send find_node to peer", peerId, err = getCurrentExceptionMsg()
|
||||
|
||||
return (receivedReplies, failedPeers)
|
||||
|
||||
proc dispatchPutVal(
|
||||
kad: KadDHT, peer: PeerId, entry: ValidatedEntry
|
||||
): Future[void] {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
|
||||
let conn = await kad.switch.dial(peer, KadCodec)
|
||||
defer:
|
||||
await conn.close()
|
||||
let msg = Message(
|
||||
msgType: MessageType.putValue,
|
||||
record: some(Record(key: some(entry.key.data), value: some(entry.value.data))),
|
||||
)
|
||||
await conn.writeLp(msg.encode().buffer)
|
||||
|
||||
let reply = Message.decode(await conn.readLp(MaxMsgSize)).valueOr:
|
||||
# todo log this more meaningfully
|
||||
error "putValue reply decode fail", error = error, conn = conn
|
||||
return
|
||||
if reply != msg:
|
||||
error "unexpected change between msg and reply: ",
|
||||
msg = msg, reply = reply, conn = conn
|
||||
|
||||
proc putValue*(
|
||||
kad: KadDHT, entKey: EntryKey, value: EntryValue, timeout: Option[int]
|
||||
): Future[Result[void, string]] {.async: (raises: [CancelledError]), gcsafe.} =
|
||||
if not kad.entryValidator.isValid(entKey, value):
|
||||
return err("invalid key/value pair")
|
||||
|
||||
let others: seq[EntryRecord] =
|
||||
if entKey in kad.dataTable.entries:
|
||||
@[kad.dataTable.entries.getOrDefault(entKey)]
|
||||
else:
|
||||
@[]
|
||||
|
||||
let candAsRec = EntryRecord.init(value, none(TimeStamp))
|
||||
let confirmedRec = kad.entrySelector.select(candAsRec, others).valueOr:
|
||||
error "application provided selector error (local)", msg = error
|
||||
return err(error)
|
||||
trace "local putval", candidate = candAsRec, others = others, selected = confirmedRec
|
||||
let validEnt = ValidatedEntry.init(entKey, confirmedRec.value)
|
||||
|
||||
let peers = await kad.findNode(entKey.data.toKey())
|
||||
# We first prime the sends so the data is ready to go
|
||||
let rpcBatch = peers.mapIt(kad.dispatchPutVal(it, validEnt))
|
||||
# then we do the `move`, as insert takes the data as `sink`
|
||||
kad.dataTable.insert(validEnt, confirmedRec.time)
|
||||
try:
|
||||
# now that the all the data is where it needs to be in memory, we can dispatch the
|
||||
# RPCs
|
||||
await rpcBatch.allFutures().wait(chronos.seconds(timeout.get(5)))
|
||||
|
||||
# It's quite normal for the dispatch to timeout, as it would require all calls to get
|
||||
# their response. Downstream users may desire some sort of functionality in the
|
||||
# future to get rpc telemetry, but in the meantime, we just move on...
|
||||
except AsyncTimeoutError:
|
||||
discard
|
||||
return results.ok()
|
||||
|
||||
# Helper function forward declaration
|
||||
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.}
|
||||
|
||||
proc findNode*(
|
||||
kad: KadDHT, targetId: Key
|
||||
): Future[seq[PeerId]] {.async: (raises: [CancelledError]).} =
|
||||
## Node lookup. Iteratively search for the k closest peers to a target ID.
|
||||
## Not necessarily will return the target itself
|
||||
|
||||
#debug "findNode", target = target
|
||||
|
||||
var initialPeers = kad.rtable.findClosestPeers(targetId, DefaultReplic)
|
||||
var state = LookupState.init(targetId, initialPeers, kad.rtable.hasher)
|
||||
var addrTable: Table[PeerId, seq[MultiAddress]] =
|
||||
initTable[PeerId, seq[MultiAddress]]()
|
||||
|
||||
while not state.done:
|
||||
let toQuery = state.selectAlphaPeers()
|
||||
debug "queries", list = toQuery.mapIt(it.shortLog()), addrTab = addrTable
|
||||
var pendingFutures = initTable[PeerId, Future[Message]]()
|
||||
|
||||
# TODO: pending futures always empty here, no?
|
||||
for peer in toQuery.filterIt(
|
||||
kad.switch.peerInfo.peerId != it or pendingFutures.hasKey(it)
|
||||
):
|
||||
state.markPending(peer)
|
||||
|
||||
pendingFutures[peer] = kad
|
||||
.sendFindNode(peer, addrTable.getOrDefault(peer, @[]), targetId)
|
||||
.wait(chronos.seconds(5))
|
||||
|
||||
state.activeQueries.inc
|
||||
|
||||
let (successfulReplies, timedOutPeers) = await waitRepliesOrTimeouts(pendingFutures)
|
||||
|
||||
for msg in successfulReplies:
|
||||
for peer in msg.closerPeers:
|
||||
let pid = PeerId.init(peer.id)
|
||||
if not pid.isOk:
|
||||
error "PeerId init went bad. this is unusual", data = peer.id
|
||||
continue
|
||||
addrTable[pid.get()] = peer.addrs
|
||||
state.updateShortlist(
|
||||
msg,
|
||||
proc(p: PeerInfo) =
|
||||
discard kad.rtable.insert(p.peerId)
|
||||
# Nodes might return different addresses for a peer, so we append instead of replacing
|
||||
var existingAddresses =
|
||||
kad.switch.peerStore[AddressBook][p.peerId].toHashSet()
|
||||
for a in p.addrs:
|
||||
existingAddresses.incl(a)
|
||||
kad.switch.peerStore[AddressBook][p.peerId] = existingAddresses.toSeq()
|
||||
# TODO: add TTL to peerstore, otherwise we can spam it with junk
|
||||
,
|
||||
kad.rtable.hasher,
|
||||
)
|
||||
|
||||
for timedOut in timedOutPeers:
|
||||
state.markFailed(timedOut)
|
||||
|
||||
# Check for covergence: no active queries, and no other peers to be selected
|
||||
state.done = checkConvergence(state, kad.switch.peerInfo.peerId)
|
||||
|
||||
return state.selectClosestK()
|
||||
|
||||
proc findPeer*(
|
||||
kad: KadDHT, peer: PeerId
|
||||
): Future[Result[PeerInfo, string]] {.async: (raises: [CancelledError]).} =
|
||||
## Walks the key space until it finds candidate addresses for a peer Id
|
||||
|
||||
if kad.switch.peerInfo.peerId == peer:
|
||||
# Looking for yourself.
|
||||
return ok(kad.switch.peerInfo)
|
||||
|
||||
if kad.switch.isConnected(peer):
|
||||
# Return known info about already connected peer
|
||||
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
|
||||
|
||||
let foundNodes = await kad.findNode(peer.toKey())
|
||||
if not foundNodes.contains(peer):
|
||||
return err("peer not found")
|
||||
|
||||
return ok(PeerInfo(peerId: peer, addrs: kad.switch.peerStore[AddressBook][peer]))
|
||||
|
||||
proc checkConvergence(state: LookupState, me: PeerId): bool {.raises: [], gcsafe.} =
|
||||
let ready = state.activeQueries == 0
|
||||
let noNew = selectAlphaPeers(state).filterIt(me != it).len == 0
|
||||
return ready and noNew
|
||||
|
||||
proc bootstrap*(
|
||||
kad: KadDHT, bootstrapNodes: seq[PeerInfo]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
for b in bootstrapNodes:
|
||||
try:
|
||||
await kad.switch.connect(b.peerId, b.addrs)
|
||||
debug "connected to bootstrap peer", peerId = b.peerId
|
||||
except DialFailedError as e:
|
||||
# at some point will want to bubble up a Result[void, SomeErrorEnum]
|
||||
error "failed to dial to bootstrap peer", peerId = b.peerId, error = e.msg
|
||||
continue
|
||||
|
||||
let msg =
|
||||
try:
|
||||
await kad.sendFindNode(b.peerId, b.addrs, kad.rtable.selfId).wait(
|
||||
chronos.seconds(5)
|
||||
)
|
||||
except CatchableError as e:
|
||||
debug "send find node exception during bootstrap",
|
||||
target = b.peerId, addrs = b.addrs, err = e.msg
|
||||
continue
|
||||
for peer in msg.closerPeers:
|
||||
let p = PeerId.init(peer.id).valueOr:
|
||||
debug "invalid peer id received", error = error
|
||||
continue
|
||||
discard kad.rtable.insert(p)
|
||||
try:
|
||||
kad.switch.peerStore[AddressBook][p] = peer.addrs
|
||||
except:
|
||||
error "this is here because an ergonomic means of keying into a table without exceptions is unknown"
|
||||
|
||||
# bootstrap node replied succesfully. Adding to routing table
|
||||
discard kad.rtable.insert(b.peerId)
|
||||
|
||||
let key = PeerId.random(kad.rng).valueOr:
|
||||
doAssert(false, "this should never happen")
|
||||
return
|
||||
discard await kad.findNode(key.toKey())
|
||||
info "bootstrap lookup complete"
|
||||
|
||||
proc refreshBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
|
||||
for i in 0 ..< kad.rtable.buckets.len:
|
||||
if kad.rtable.buckets[i].isStale():
|
||||
let randomKey = randomKeyInBucketRange(kad.rtable.selfId, i, kad.rng)
|
||||
discard await kad.findNode(randomKey)
|
||||
|
||||
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "refresh buckets", 10.minutes:
|
||||
debug "TODO: implement bucket maintenance"
|
||||
heartbeat "refresh buckets", chronos.minutes(10):
|
||||
await kad.refreshBuckets()
|
||||
|
||||
proc new*(
|
||||
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
|
||||
T: typedesc[KadDHT],
|
||||
switch: Switch,
|
||||
validator: EntryValidator,
|
||||
entrySelector: EntrySelector,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
): T {.raises: [].} =
|
||||
var rtable = RoutingTable.init(switch.peerInfo.peerId)
|
||||
let kad = T(rng: rng, switch: switch, rtable: rtable)
|
||||
var rtable = RoutingTable.init(switch.peerInfo.peerId.toKey(), Opt.none(XorDHasher))
|
||||
let kad = T(
|
||||
rng: rng,
|
||||
switch: switch,
|
||||
rtable: rtable,
|
||||
dataTable: LocalTable.init(),
|
||||
entryValidator: validator,
|
||||
entrySelector: entrySelector,
|
||||
)
|
||||
|
||||
kad.codec = KadCodec
|
||||
kad.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
while not conn.atEof:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msg = Message.decode(buf).tryGet()
|
||||
|
||||
# TODO: handle msg.msgType
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
error "could not handle request",
|
||||
peerId = conn.PeerId, err = getCurrentExceptionMsg()
|
||||
finally:
|
||||
defer:
|
||||
await conn.close()
|
||||
while not conn.atEof:
|
||||
let buf =
|
||||
try:
|
||||
await conn.readLp(MaxMsgSize)
|
||||
except LPStreamError as e:
|
||||
debug "Read error when handling kademlia RPC", conn = conn, err = e.msg
|
||||
return
|
||||
let msg = Message.decode(buf).valueOr:
|
||||
debug "msg decode error handling kademlia RPC", err = error
|
||||
return
|
||||
|
||||
case msg.msgType
|
||||
of MessageType.findNode:
|
||||
let targetIdBytes = msg.key.valueOr:
|
||||
error "findNode message without key data present", msg = msg, conn = conn
|
||||
return
|
||||
let targetId = PeerId.init(targetIdBytes).valueOr:
|
||||
error "findNode message without valid key data", msg = msg, conn = conn
|
||||
return
|
||||
let closerPeers = kad.rtable
|
||||
.findClosest(targetId.toKey(), DefaultReplic)
|
||||
# exclude the node requester because telling a peer about itself does not reduce the distance,
|
||||
.filterIt(it != conn.peerId.toKey())
|
||||
|
||||
let responsePb = encodeFindNodeReply(closerPeers, switch)
|
||||
try:
|
||||
await conn.writeLp(responsePb.buffer)
|
||||
except LPStreamError as e:
|
||||
debug "write error when writing kad find-node RPC reply",
|
||||
conn = conn, err = e.msg
|
||||
return
|
||||
|
||||
# Peer is useful. adding to rtable
|
||||
discard kad.rtable.insert(conn.peerId)
|
||||
of MessageType.putValue:
|
||||
let record = msg.record.valueOr:
|
||||
error "no record in message buffer", msg = msg, conn = conn
|
||||
return
|
||||
let (skey, svalue) =
|
||||
if record.key.isSome() and record.value.isSome():
|
||||
(record.key.unsafeGet(), record.value.unsafeGet())
|
||||
else:
|
||||
error "no key or no value in rpc buffer", msg = msg, conn = conn
|
||||
return
|
||||
let key = EntryKey.init(skey)
|
||||
let value = EntryValue.init(svalue)
|
||||
|
||||
# Value sanitisation done. Start insertion process
|
||||
if not kad.entryValidator.isValid(key, value):
|
||||
return
|
||||
|
||||
let others =
|
||||
if kad.dataTable.entries.contains(key):
|
||||
# need to do this shenans in order to avoid exceptions.
|
||||
@[kad.dataTable.entries.getOrDefault(key)]
|
||||
else:
|
||||
@[]
|
||||
let candRec = EntryRecord.init(value, none(TimeStamp))
|
||||
let selectedRec = kad.entrySelector.select(candRec, others).valueOr:
|
||||
error "application provided selector error", msg = error, conn = conn
|
||||
return
|
||||
trace "putval handler selection",
|
||||
cand = candRec, others = others, selected = selectedRec
|
||||
|
||||
# Assume that if selection goes with another value, that it is valid
|
||||
let validated = ValidatedEntry(key: key, value: selectedRec.value)
|
||||
|
||||
kad.dataTable.insert(validated, selectedRec.time)
|
||||
# consistent with following link, echo message without change
|
||||
# https://github.com/libp2p/js-libp2p/blob/cf9aab5c841ec08bc023b9f49083c95ad78a7a07/packages/kad-dht/src/rpc/handlers/put-value.ts#L22
|
||||
try:
|
||||
await conn.writeLp(buf)
|
||||
except LPStreamError as e:
|
||||
debug "write error when writing kad find-node RPC reply",
|
||||
conn = conn, err = e.msg
|
||||
return
|
||||
else:
|
||||
error "unhandled kad-dht message type", msg = msg
|
||||
return
|
||||
return kad
|
||||
|
||||
proc setSelector*(kad: KadDHT, selector: EntrySelector) =
|
||||
doAssert(selector != nil)
|
||||
kad.entrySelector = selector
|
||||
|
||||
proc setValidator*(kad: KadDHT, validator: EntryValidator) =
|
||||
doAssert(validator != nil)
|
||||
kad.entryValidator = validator
|
||||
|
||||
method start*(
|
||||
kad: KadDHT
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
@@ -65,10 +463,12 @@ method start*(
|
||||
fut
|
||||
|
||||
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not kad.started:
|
||||
return
|
||||
return fut
|
||||
|
||||
kad.started = false
|
||||
kad.maintenanceLoop.cancelSoon()
|
||||
kad.maintenanceLoop = nil
|
||||
return
|
||||
return fut
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import nimcrypto/sha2
|
||||
import ../../peerid
|
||||
import ./consts
|
||||
import chronicles
|
||||
import stew/byteutils
|
||||
|
||||
type
|
||||
KeyType* {.pure.} = enum
|
||||
Unhashed
|
||||
Raw
|
||||
PeerId
|
||||
|
||||
@@ -13,25 +12,26 @@ type
|
||||
case kind*: KeyType
|
||||
of KeyType.PeerId:
|
||||
peerId*: PeerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
data*: array[IdLength, byte]
|
||||
of KeyType.Raw:
|
||||
data*: seq[byte]
|
||||
|
||||
proc toKey*(s: seq[byte]): Key =
|
||||
doAssert s.len == IdLength
|
||||
var data: array[IdLength, byte]
|
||||
for i in 0 ..< IdLength:
|
||||
data[i] = s[i]
|
||||
return Key(kind: KeyType.Raw, data: data)
|
||||
return Key(kind: KeyType.Raw, data: s)
|
||||
|
||||
proc toKey*(p: PeerId): Key =
|
||||
return Key(kind: KeyType.PeerId, peerId: p)
|
||||
|
||||
proc toPeerId*(k: Key): PeerId {.raises: [ValueError].} =
|
||||
if k.kind != KeyType.PeerId:
|
||||
raise newException(ValueError, "not a peerId")
|
||||
k.peerId
|
||||
|
||||
proc getBytes*(k: Key): seq[byte] =
|
||||
return
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
k.peerId.getBytes()
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
of KeyType.Raw:
|
||||
@(k.data)
|
||||
|
||||
template `==`*(a, b: Key): bool =
|
||||
@@ -41,7 +41,7 @@ proc shortLog*(k: Key): string =
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
"PeerId:" & $k.peerId
|
||||
of KeyType.Raw, KeyType.Unhashed:
|
||||
of KeyType.Raw:
|
||||
$k.kind & ":" & toHex(k.data)
|
||||
|
||||
chronicles.formatIt(Key):
|
||||
|
||||
120
libp2p/protocols/kademlia/lookupstate.nim
Normal file
120
libp2p/protocols/kademlia/lookupstate.nim
Normal file
@@ -0,0 +1,120 @@
|
||||
import sequtils
|
||||
import ./consts
|
||||
import ./protobuf
|
||||
import ./xordistance
|
||||
import ./keys
|
||||
import ../../[peerid, peerinfo]
|
||||
import algorithm
|
||||
import chronicles
|
||||
|
||||
type
|
||||
LookupNode* = object
|
||||
peerId: PeerId
|
||||
distance: XorDistance
|
||||
queried: bool # have we already queried this node?
|
||||
pending: bool # is there an active request rn?
|
||||
failed: bool # did the query timeout or error?
|
||||
|
||||
LookupState* = object
|
||||
targetId: Key
|
||||
shortlist: seq[LookupNode] # current known closest node
|
||||
activeQueries*: int # how many queries in flight
|
||||
alpha: int # parallelism level
|
||||
repliCount: int ## aka `k` in the spec: number of closest nodes to find
|
||||
done*: bool # has lookup converged
|
||||
|
||||
proc alreadyInShortlist(state: LookupState, peer: Peer): bool =
|
||||
return state.shortlist.anyIt(it.peerId.getBytes() == peer.id)
|
||||
|
||||
proc updateShortlist*(
|
||||
state: var LookupState,
|
||||
msg: Message,
|
||||
onInsert: proc(p: PeerInfo) {.gcsafe.},
|
||||
hasher: Opt[XorDHasher],
|
||||
) =
|
||||
for newPeer in msg.closerPeers.filterIt(not alreadyInShortlist(state, it)):
|
||||
let peerInfo = PeerInfo(peerId: PeerId.init(newPeer.id).get(), addrs: newPeer.addrs)
|
||||
try:
|
||||
onInsert(peerInfo)
|
||||
state.shortlist.add(
|
||||
LookupNode(
|
||||
peerId: peerInfo.peerId,
|
||||
distance: xorDistance(peerInfo.peerId, state.targetId, hasher),
|
||||
queried: false,
|
||||
pending: false,
|
||||
failed: false,
|
||||
)
|
||||
)
|
||||
except Exception as exc:
|
||||
debug "could not update shortlist", err = exc.msg
|
||||
|
||||
state.shortlist.sort(
|
||||
proc(a, b: LookupNode): int =
|
||||
cmp(a.distance, b.distance)
|
||||
)
|
||||
|
||||
state.activeQueries.dec
|
||||
|
||||
proc markFailed*(state: var LookupState, peerId: PeerId) =
|
||||
for p in mitems(state.shortlist):
|
||||
if p.peerId == peerId:
|
||||
p.failed = true
|
||||
p.pending = false
|
||||
p.queried = true
|
||||
state.activeQueries.dec
|
||||
break
|
||||
|
||||
proc markPending*(state: var LookupState, peerId: PeerId) =
|
||||
for p in mitems(state.shortlist):
|
||||
if p.peerId == peerId:
|
||||
p.pending = true
|
||||
p.queried = true
|
||||
break
|
||||
|
||||
proc selectAlphaPeers*(state: LookupState): seq[PeerId] =
|
||||
var selected: seq[PeerId] = @[]
|
||||
for p in state.shortlist:
|
||||
if not p.queried and not p.failed and not p.pending:
|
||||
selected.add(p.peerId)
|
||||
if selected.len >= state.alpha:
|
||||
break
|
||||
return selected
|
||||
|
||||
proc init*(
|
||||
T: type LookupState,
|
||||
targetId: Key,
|
||||
initialPeers: seq[PeerId],
|
||||
hasher: Opt[XorDHasher],
|
||||
): T =
|
||||
var res = LookupState(
|
||||
targetId: targetId,
|
||||
shortlist: @[],
|
||||
activeQueries: 0,
|
||||
alpha: alpha,
|
||||
repliCount: DefaultReplic,
|
||||
done: false,
|
||||
)
|
||||
for p in initialPeers:
|
||||
res.shortlist.add(
|
||||
LookupNode(
|
||||
peerId: p,
|
||||
distance: xorDistance(p, targetId, hasher),
|
||||
queried: false,
|
||||
pending: false,
|
||||
failed: false,
|
||||
)
|
||||
)
|
||||
|
||||
res.shortlist.sort(
|
||||
proc(a, b: LookupNode): int =
|
||||
cmp(a.distance, b.distance)
|
||||
)
|
||||
return res
|
||||
|
||||
proc selectClosestK*(state: LookupState): seq[PeerId] =
|
||||
var res: seq[PeerId] = @[]
|
||||
for p in state.shortlist.filterIt(not it.failed):
|
||||
res.add(p.peerId)
|
||||
if res.len >= state.repliCount:
|
||||
break
|
||||
return res
|
||||
@@ -39,9 +39,11 @@ type
|
||||
closerPeers*: seq[Peer]
|
||||
providerPeers*: seq[Peer]
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [], gcsafe.}
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
|
||||
proc writeOpt*[T](
|
||||
pb: var ProtoBuffer, field: int, opt: Option[T]
|
||||
) {.raises: [], gcsafe.}
|
||||
|
||||
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
@@ -60,7 +62,7 @@ proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
|
||||
proc encode*(msg: Message): ProtoBuffer {.raises: [], gcsafe.} =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, uint32(ord(msg.msgType)))
|
||||
@@ -80,11 +82,13 @@ proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
|
||||
|
||||
return pb
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
|
||||
proc writeOpt*[T](
|
||||
pb: var ProtoBuffer, field: int, opt: Option[T]
|
||||
) {.raises: [], gcsafe.} =
|
||||
opt.withValue(v):
|
||||
pb.write(field, v)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [], gcsafe.} =
|
||||
pb.write(field, value.encode())
|
||||
|
||||
proc getOptionField[T: ProtoScalar | string | seq[byte]](
|
||||
@@ -120,7 +124,7 @@ proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
|
||||
return ok(some(p))
|
||||
|
||||
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
|
||||
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[T] =
|
||||
var
|
||||
m: Message
|
||||
key: seq[byte]
|
||||
@@ -156,4 +160,4 @@ proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
|
||||
peer.withValue(peer):
|
||||
m.providerPeers.add(peer)
|
||||
|
||||
return ok(some(m))
|
||||
return ok(m)
|
||||
|
||||
34
libp2p/protocols/kademlia/requests.nim
Normal file
34
libp2p/protocols/kademlia/requests.nim
Normal file
@@ -0,0 +1,34 @@
|
||||
import ../../peerid
|
||||
import ../../switch
|
||||
import ../../peerstore
|
||||
import ./protobuf
|
||||
import ../../protobuf/minprotobuf
|
||||
import ./keys
|
||||
|
||||
proc encodeFindNodeReply*(
|
||||
closerPeers: seq[Key], switch: Switch
|
||||
): ProtoBuffer {.raises: [].} =
|
||||
var msg: Message
|
||||
msg.msgType = MessageType.findNode
|
||||
for peer in closerPeers:
|
||||
let peer =
|
||||
try:
|
||||
peer.toPeerId()
|
||||
except ValueError:
|
||||
continue
|
||||
let addrs = switch.peerStore[AddressBook][peer]
|
||||
if addrs.len == 0:
|
||||
continue
|
||||
|
||||
let p = Peer(
|
||||
id: peer.getBytes(),
|
||||
addrs: addrs,
|
||||
connection:
|
||||
# TODO: this should likely be optional as it can reveal the network graph of a node
|
||||
if switch.isConnected(peer):
|
||||
ConnectionType.connected
|
||||
else:
|
||||
ConnectionType.notConnected,
|
||||
)
|
||||
msg.closerPeers.add(p)
|
||||
return msg.encode()
|
||||
@@ -8,6 +8,7 @@ import ./xordistance
|
||||
import ../../peerid
|
||||
import sequtils
|
||||
import ../../utils/sequninit
|
||||
import results
|
||||
|
||||
logScope:
|
||||
topics = "kad-dht rtable"
|
||||
@@ -23,12 +24,16 @@ type
|
||||
RoutingTable* = ref object
|
||||
selfId*: Key
|
||||
buckets*: seq[Bucket]
|
||||
hasher*: Opt[XorDHasher]
|
||||
|
||||
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
|
||||
return RoutingTable(selfId: selfId, buckets: @[])
|
||||
proc `$`*(rt: RoutingTable): string =
|
||||
"selfId(" & $rt.selfId & ") buckets(" & $rt.buckets & ")"
|
||||
|
||||
proc bucketIndex*(selfId, key: Key): int =
|
||||
return xorDistance(selfId, key).leadingZeros
|
||||
proc init*(T: typedesc[RoutingTable], selfId: Key, hasher: Opt[XorDHasher]): T =
|
||||
return RoutingTable(selfId: selfId, buckets: @[], hasher: hasher)
|
||||
|
||||
proc bucketIndex*(selfId, key: Key, hasher: Opt[XorDHasher]): int =
|
||||
return xorDistance(selfId, key, hasher).leadingZeros
|
||||
|
||||
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
|
||||
for i, p in bucket.peers:
|
||||
@@ -40,7 +45,7 @@ proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
|
||||
if nodeId == rtable.selfId:
|
||||
return false # No self insertion
|
||||
|
||||
let idx = bucketIndex(rtable.selfId, nodeId)
|
||||
let idx = bucketIndex(rtable.selfId, nodeId, rtable.hasher)
|
||||
if idx >= maxBuckets:
|
||||
trace "cannot insert node. max buckets have been reached",
|
||||
nodeId, bucketIdx = idx, maxBuckets
|
||||
@@ -54,12 +59,12 @@ proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
|
||||
let keyx = peerIndexInBucket(bucket, nodeId)
|
||||
if keyx.isSome:
|
||||
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
|
||||
elif bucket.peers.len < k:
|
||||
elif bucket.peers.len < DefaultReplic:
|
||||
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
|
||||
else:
|
||||
# TODO: eviction policy goes here, rn we drop the node
|
||||
trace "cannot insert node in bucket, dropping node",
|
||||
nodeId, bucket = k, bucketIdx = idx
|
||||
nodeId, bucket = DefaultReplic, bucketIdx = idx
|
||||
return false
|
||||
|
||||
rtable.buckets[idx] = bucket
|
||||
@@ -77,7 +82,9 @@ proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
|
||||
|
||||
allNodes.sort(
|
||||
proc(a, b: Key): int =
|
||||
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
|
||||
cmp(
|
||||
xorDistance(a, targetId, rtable.hasher), xorDistance(b, targetId, rtable.hasher)
|
||||
)
|
||||
)
|
||||
|
||||
return allNodes[0 ..< min(count, allNodes.len)]
|
||||
|
||||
@@ -1,9 +1,27 @@
|
||||
import ./consts
|
||||
import stew/arrayOps
|
||||
import ./keys
|
||||
import nimcrypto/sha2
|
||||
import ../../peerid
|
||||
import results
|
||||
|
||||
type XorDistance* = array[IdLength, byte]
|
||||
type XorDHasher* = proc(input: seq[byte]): array[IdLength, byte] {.
|
||||
raises: [], nimcall, noSideEffect, gcsafe
|
||||
.}
|
||||
|
||||
proc defaultHasher(
|
||||
input: seq[byte]
|
||||
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
|
||||
return sha256.digest(input).data
|
||||
|
||||
# useful for testing purposes
|
||||
proc noOpHasher*(
|
||||
input: seq[byte]
|
||||
): array[IdLength, byte] {.raises: [], nimcall, noSideEffect, gcsafe.} =
|
||||
var data: array[IdLength, byte]
|
||||
discard data.copyFrom(input)
|
||||
return data
|
||||
|
||||
proc countLeadingZeroBits*(b: byte): int =
|
||||
for i in 0 .. 7:
|
||||
@@ -31,25 +49,23 @@ proc `<`*(a, b: XorDistance): bool =
|
||||
proc `<=`*(a, b: XorDistance): bool =
|
||||
cmp(a, b) <= 0
|
||||
|
||||
proc hashFor(k: Key): seq[byte] =
|
||||
proc hashFor(k: Key, hasher: Opt[XorDHasher]): seq[byte] =
|
||||
return
|
||||
@(
|
||||
case k.kind
|
||||
of KeyType.PeerId:
|
||||
sha256.digest(k.peerId.getBytes()).data
|
||||
hasher.get(defaultHasher)(k.peerId.getBytes())
|
||||
of KeyType.Raw:
|
||||
sha256.digest(k.data).data
|
||||
of KeyType.Unhashed:
|
||||
k.data
|
||||
hasher.get(defaultHasher)(k.data)
|
||||
)
|
||||
|
||||
proc xorDistance*(a, b: Key): XorDistance =
|
||||
let hashA = a.hashFor()
|
||||
let hashB = b.hashFor()
|
||||
proc xorDistance*(a, b: Key, hasher: Opt[XorDHasher]): XorDistance =
|
||||
let hashA = a.hashFor(hasher)
|
||||
let hashB = b.hashFor(hasher)
|
||||
var response: XorDistance
|
||||
for i in 0 ..< hashA.len:
|
||||
response[i] = hashA[i] xor hashB[i]
|
||||
return response
|
||||
|
||||
proc xorDistance*(a: PeerId, b: Key): XorDistance =
|
||||
xorDistance(a.toKey(), b)
|
||||
proc xorDistance*(a: PeerId, b: Key, hasher: Opt[XorDHasher]): XorDistance =
|
||||
xorDistance(a.toKey(), b, hasher)
|
||||
|
||||
53
libp2p/protocols/mix/crypto.nim
Normal file
53
libp2p/protocols/mix/crypto.nim
Normal file
@@ -0,0 +1,53 @@
|
||||
import endians, nimcrypto
|
||||
|
||||
proc aes_ctr*(key, iv, data: openArray[byte]): seq[byte] =
|
||||
## Processes 'data' using AES in CTR mode.
|
||||
## For CTR mode, the same function handles both encryption and decryption.
|
||||
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
|
||||
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
|
||||
|
||||
var
|
||||
ctx: CTR[aes128]
|
||||
output = newSeq[byte](data.len)
|
||||
|
||||
ctx.init(key, iv)
|
||||
ctx.encrypt(data, output)
|
||||
ctx.clear()
|
||||
|
||||
output
|
||||
|
||||
proc advance_ctr*(iv: var openArray[byte], blocks: uint64) =
|
||||
## Advances the counter in the AES-CTR IV by a specified number of blocks.
|
||||
var counter: uint64
|
||||
bigEndian64(addr counter, addr iv[8])
|
||||
counter += blocks
|
||||
bigEndian64(addr iv[8], addr counter)
|
||||
|
||||
proc aes_ctr_start_index*(key, iv, data: openArray[byte], startIndex: int): seq[byte] =
|
||||
## Encrypts 'data' using AES in CTR mode from startIndex, without processing all preceding data.
|
||||
## For CTR mode, the same function handles both encryption and decryption.
|
||||
doAssert key.len == 16, "Key must be 16 bytes for AES-128"
|
||||
doAssert iv.len == 16, "IV must be 16 bytes for AES-128"
|
||||
doAssert startIndex mod 16 == 0, "Start index must be a multiple of 16"
|
||||
|
||||
var advIV = @iv
|
||||
|
||||
# Advance the counter to the start index
|
||||
let blocksToAdvance = startIndex div 16
|
||||
advance_ctr(advIV, blocksToAdvance.uint64)
|
||||
|
||||
return aes_ctr(key, advIV, data)
|
||||
|
||||
proc sha256_hash*(data: openArray[byte]): array[32, byte] =
|
||||
## hashes 'data' using SHA-256.
|
||||
return sha256.digest(data).data
|
||||
|
||||
proc kdf*(key: openArray[byte]): seq[byte] =
|
||||
## Returns the hash of 'key' truncated to 16 bytes.
|
||||
let hash = sha256_hash(key)
|
||||
return hash[0 .. 15]
|
||||
|
||||
proc hmac*(key, data: openArray[byte]): seq[byte] =
|
||||
## Computes a HMAC for 'data' using given 'key'.
|
||||
let hmac = sha256.hmac(key, data).data
|
||||
return hmac[0 .. 15]
|
||||
52
libp2p/protocols/mix/curve25519.nim
Normal file
52
libp2p/protocols/mix/curve25519.nim
Normal file
@@ -0,0 +1,52 @@
|
||||
import results
|
||||
import bearssl/rand
|
||||
import ../../crypto/curve25519
|
||||
|
||||
const FieldElementSize* = Curve25519KeySize
|
||||
|
||||
type FieldElement* = Curve25519Key
|
||||
|
||||
proc bytesToFieldElement*(bytes: openArray[byte]): Result[FieldElement, string] =
|
||||
## Convert bytes to FieldElement
|
||||
if bytes.len != FieldElementSize:
|
||||
return err("Field element size must be 32 bytes")
|
||||
ok(intoCurve25519Key(bytes))
|
||||
|
||||
proc fieldElementToBytes*(fe: FieldElement): seq[byte] =
|
||||
## Convert FieldElement to bytes
|
||||
fe.getBytes()
|
||||
|
||||
# Generate a random FieldElement
|
||||
proc generateRandomFieldElement*(): Result[FieldElement, string] =
|
||||
let rng = HmacDrbgContext.new()
|
||||
if rng.isNil:
|
||||
return err("Failed to create HmacDrbgContext with system randomness")
|
||||
ok(Curve25519Key.random(rng[]))
|
||||
|
||||
# Generate a key pair (private key and public key are both FieldElements)
|
||||
proc generateKeyPair*(): Result[tuple[privateKey, publicKey: FieldElement], string] =
|
||||
let privateKey = generateRandomFieldElement().valueOr:
|
||||
return err("Error in private key generation: " & error)
|
||||
|
||||
let publicKey = public(privateKey)
|
||||
ok((privateKey, publicKey))
|
||||
|
||||
proc multiplyPointWithScalars*(
|
||||
point: FieldElement, scalars: openArray[FieldElement]
|
||||
): FieldElement =
|
||||
## Multiply a given Curve25519 point with a set of scalars
|
||||
var res = point
|
||||
for scalar in scalars:
|
||||
Curve25519.mul(res, scalar)
|
||||
res
|
||||
|
||||
proc multiplyBasePointWithScalars*(
|
||||
scalars: openArray[FieldElement]
|
||||
): Result[FieldElement, string] =
|
||||
## Multiply the Curve25519 base point with a set of scalars
|
||||
if scalars.len <= 0:
|
||||
return err("Atleast one scalar must be provided")
|
||||
var res: FieldElement = public(scalars[0]) # Use the predefined base point
|
||||
for i in 1 ..< scalars.len:
|
||||
Curve25519.mul(res, scalars[i]) # Multiply with each scalar
|
||||
ok(res)
|
||||
133
libp2p/protocols/mix/entry_connection.nim
Normal file
133
libp2p/protocols/mix/entry_connection.nim
Normal file
@@ -0,0 +1,133 @@
|
||||
import hashes, chronos, stew/byteutils, results, chronicles
|
||||
import ../../stream/connection
|
||||
import ../../varint
|
||||
import ../../utils/sequninit
|
||||
import ./mix_protocol
|
||||
from fragmentation import DataSize
|
||||
|
||||
type MixDialer* = proc(
|
||||
msg: seq[byte], codec: string, destination: MixDestination
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).}
|
||||
|
||||
type MixEntryConnection* = ref object of Connection
|
||||
destination: MixDestination
|
||||
codec: string
|
||||
mixDialer: MixDialer
|
||||
|
||||
func shortLog*(conn: MixEntryConnection): string =
|
||||
if conn == nil:
|
||||
"MixEntryConnection(nil)"
|
||||
else:
|
||||
"MixEntryConnection(" & $conn.destination & ")"
|
||||
|
||||
chronicles.formatIt(MixEntryConnection):
|
||||
shortLog(it)
|
||||
|
||||
method readOnce*(
|
||||
s: MixEntryConnection, pbytes: pointer, nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
# TODO: implement
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readExactly*(
|
||||
s: MixEntryConnection, pbytes: pointer, nbytes: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
# TODO: implement
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readLine*(
|
||||
s: MixEntryConnection, limit = 0, sep = "\r\n"
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
# TODO: implement
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readVarint*(
|
||||
conn: MixEntryConnection
|
||||
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
# TODO: implement
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readLp*(
|
||||
s: MixEntryConnection, maxSize: int
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
# TODO: implement
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method write*(
|
||||
self: MixEntryConnection, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
self.mixDialer(msg, self.codec, self.destination)
|
||||
|
||||
proc write*(
|
||||
self: MixEntryConnection, msg: string
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
self.write(msg.toBytes())
|
||||
|
||||
method writeLp*(
|
||||
self: MixEntryConnection, msg: openArray[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
if msg.len() > DataSize:
|
||||
let fut = newFuture[void]()
|
||||
fut.fail(
|
||||
newException(LPStreamError, "exceeds max msg size of " & $DataSize & " bytes")
|
||||
)
|
||||
return fut
|
||||
|
||||
## Write `msg` with a varint-encoded length prefix
|
||||
let vbytes = PB.toBytes(msg.len().uint64)
|
||||
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
|
||||
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
|
||||
buf[vbytes.len ..< buf.len] = msg
|
||||
|
||||
self.write(buf)
|
||||
|
||||
method writeLp*(
|
||||
self: MixEntryConnection, msg: string
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
|
||||
self.writeLp(msg.toOpenArrayByte(0, msg.high))
|
||||
|
||||
proc shortLog*(self: MixEntryConnection): string {.raises: [].} =
|
||||
"[MixEntryConnection] Destination: " & $self.destination
|
||||
|
||||
method closeImpl*(
|
||||
self: MixEntryConnection
|
||||
): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
return fut
|
||||
|
||||
func hash*(self: MixEntryConnection): Hash =
|
||||
hash($self.destination)
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
proc setShortAgent*(self: MixEntryConnection, shortAgent: string) =
|
||||
discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MixEntryConnection],
|
||||
srcMix: MixProtocol,
|
||||
destination: MixDestination,
|
||||
codec: string,
|
||||
): T {.raises: [].} =
|
||||
var instance = T()
|
||||
instance.destination = destination
|
||||
instance.codec = codec
|
||||
instance.mixDialer = proc(
|
||||
msg: seq[byte], codec: string, dest: MixDestination
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
await srcMix.anonymizeLocalProtocolSend(
|
||||
nil, msg, codec, dest, 0 # TODO: set incoming queue for replies and surbs
|
||||
)
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
instance.shortAgent = connection.shortAgent
|
||||
|
||||
instance
|
||||
|
||||
proc toConnection*(
|
||||
srcMix: MixProtocol, destination: MixDestination, codec: string
|
||||
): Result[Connection, string] {.gcsafe, raises: [].} =
|
||||
## Create a stream to send and optionally receive responses.
|
||||
## Under the hood it will wrap the message in a sphinx packet
|
||||
## and send it via a random mix path.
|
||||
ok(MixEntryConnection.new(srcMix, destination, codec))
|
||||
36
libp2p/protocols/mix/exit_layer.nim
Normal file
36
libp2p/protocols/mix/exit_layer.nim
Normal file
@@ -0,0 +1,36 @@
|
||||
import chronicles, chronos, metrics
|
||||
import ../../builders
|
||||
import ../../stream/connection
|
||||
import ./mix_metrics
|
||||
|
||||
type ExitLayer* = object
|
||||
switch: Switch
|
||||
|
||||
proc init*(T: typedesc[ExitLayer], switch: Switch): T =
|
||||
ExitLayer(switch: switch)
|
||||
|
||||
proc onMessage*(
|
||||
self: ExitLayer,
|
||||
codec: string,
|
||||
message: seq[byte],
|
||||
destAddr: MultiAddress,
|
||||
destPeerId: PeerId,
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
# If dialing destination fails, no response is returned to
|
||||
# the sender, so, flow can just end here. Only log errors
|
||||
# for now
|
||||
# https://github.com/vacp2p/mix/issues/86
|
||||
|
||||
try:
|
||||
let destConn = await self.switch.dial(destPeerId, @[destAddr], codec)
|
||||
defer:
|
||||
await destConn.close()
|
||||
await destConn.write(message)
|
||||
except LPStreamError as exc:
|
||||
error "Stream error while writing to next hop: ", err = exc.msg
|
||||
mix_messages_error.inc(labelValues = ["ExitLayer", "LPSTREAM_ERR"])
|
||||
except DialFailedError as exc:
|
||||
error "Failed to dial next hop: ", err = exc.msg
|
||||
mix_messages_error.inc(labelValues = ["ExitLayer", "DIAL_FAILED"])
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
95
libp2p/protocols/mix/fragmentation.nim
Normal file
95
libp2p/protocols/mix/fragmentation.nim
Normal file
@@ -0,0 +1,95 @@
|
||||
import ./[serialization, seqno_generator]
|
||||
import results, stew/endians2
|
||||
import ../../peerid
|
||||
|
||||
const PaddingLengthSize* = 2
|
||||
const SeqNoSize* = 4
|
||||
const DataSize* = MessageSize - PaddingLengthSize - SeqNoSize
|
||||
|
||||
# Unpadding and reassembling messages will be handled by the top-level applications.
|
||||
# Although padding and splitting messages could also be managed at that level, we
|
||||
# implement it here to clarify the sender's logic.
|
||||
# This is crucial as the sender is responsible for wrapping messages in Sphinx packets.
|
||||
|
||||
type MessageChunk* = object
|
||||
paddingLength: uint16
|
||||
data: seq[byte]
|
||||
seqNo: uint32
|
||||
|
||||
proc init*(
|
||||
T: typedesc[MessageChunk], paddingLength: uint16, data: seq[byte], seqNo: uint32
|
||||
): T =
|
||||
T(paddingLength: paddingLength, data: data, seqNo: seqNo)
|
||||
|
||||
proc get*(msgChunk: MessageChunk): (uint16, seq[byte], uint32) =
|
||||
(msgChunk.paddingLength, msgChunk.data, msgChunk.seqNo)
|
||||
|
||||
proc serialize*(msgChunk: MessageChunk): seq[byte] =
|
||||
let
|
||||
paddingBytes = msgChunk.paddingLength.toBytesBE()
|
||||
seqNoBytes = msgChunk.seqNo.toBytesBE()
|
||||
|
||||
doAssert msgChunk.data.len == DataSize,
|
||||
"Padded data must be exactly " & $DataSize & " bytes"
|
||||
|
||||
return @paddingBytes & msgChunk.data & @seqNoBytes
|
||||
|
||||
proc deserialize*(T: typedesc[MessageChunk], data: openArray[byte]): Result[T, string] =
|
||||
if data.len != MessageSize:
|
||||
return err("Data must be exactly " & $MessageSize & " bytes")
|
||||
|
||||
let
|
||||
paddingLength = uint16.fromBytesBE(data[0 .. PaddingLengthSize - 1])
|
||||
chunk = data[PaddingLengthSize .. (PaddingLengthSize + DataSize - 1)]
|
||||
seqNo = uint32.fromBytesBE(data[PaddingLengthSize + DataSize ..^ 1])
|
||||
|
||||
ok(T(paddingLength: paddingLength, data: chunk, seqNo: seqNo))
|
||||
|
||||
proc ceilDiv*(a, b: int): int =
|
||||
(a + b - 1) div b
|
||||
|
||||
proc addPadding*(messageBytes: seq[byte], seqNo: SeqNo): MessageChunk =
|
||||
## Pads messages smaller than DataSize
|
||||
let paddingLength = uint16(DataSize - messageBytes.len)
|
||||
let paddedData =
|
||||
if paddingLength > 0:
|
||||
let paddingBytes = newSeq[byte](paddingLength)
|
||||
paddingBytes & messageBytes
|
||||
else:
|
||||
messageBytes
|
||||
MessageChunk(paddingLength: paddingLength, data: paddedData, seqNo: seqNo)
|
||||
|
||||
proc addPadding*(messageBytes: seq[byte], peerId: PeerId): MessageChunk =
|
||||
## Pads messages smaller than DataSize
|
||||
var seqNoGen = SeqNo.init(peerId)
|
||||
seqNoGen.generate(messageBytes)
|
||||
messageBytes.addPadding(seqNoGen)
|
||||
|
||||
proc removePadding*(msgChunk: MessageChunk): Result[seq[byte], string] =
|
||||
let msgLength = len(msgChunk.data) - int(msgChunk.paddingLength)
|
||||
if msgLength < 0:
|
||||
return err("Invalid padding length")
|
||||
|
||||
ok(msgChunk.data[msgChunk.paddingLength ..^ 1])
|
||||
|
||||
proc padAndChunkMessage*(messageBytes: seq[byte], peerId: PeerId): seq[MessageChunk] =
|
||||
var seqNoGen = SeqNo.init(peerId)
|
||||
seqNoGen.generate(messageBytes)
|
||||
|
||||
var chunks: seq[MessageChunk] = @[]
|
||||
|
||||
# Split to chunks
|
||||
let totalChunks = max(1, ceilDiv(messageBytes.len, DataSize))
|
||||
# Ensure at least one chunk is generated
|
||||
for i in 0 ..< totalChunks:
|
||||
let
|
||||
startIdx = i * DataSize
|
||||
endIdx = min(startIdx + DataSize, messageBytes.len)
|
||||
chunkData = messageBytes[startIdx .. endIdx - 1]
|
||||
msgChunk = chunkData.addPadding(seqNoGen)
|
||||
|
||||
chunks.add(msgChunk)
|
||||
|
||||
seqNoGen.inc()
|
||||
|
||||
return chunks
|
||||
47
libp2p/protocols/mix/mix_message.nim
Normal file
47
libp2p/protocols/mix/mix_message.nim
Normal file
@@ -0,0 +1,47 @@
|
||||
import chronicles, results
|
||||
import stew/[byteutils, leb128]
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../utils/sequninit
|
||||
|
||||
type MixMessage* = object
|
||||
message*: seq[byte]
|
||||
codec*: string
|
||||
|
||||
proc init*(T: typedesc[MixMessage], message: openArray[byte], codec: string): T =
|
||||
return T(message: @message, codec: codec)
|
||||
|
||||
proc serialize*(mixMsg: MixMessage): seq[byte] =
|
||||
let vbytes = toBytes(mixMsg.codec.len.uint64, Leb128)
|
||||
doAssert vbytes.len <= 2, "serialization failed: codec length exceeds 2 bytes"
|
||||
|
||||
var buf = newSeqUninit[byte](vbytes.len + mixMsg.codec.len + mixMsg.message.len)
|
||||
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
|
||||
buf[vbytes.len ..< mixMsg.codec.len] = mixMsg.codec.toBytes()
|
||||
buf[vbytes.len + mixMsg.codec.len ..< buf.len] = mixMsg.message
|
||||
buf
|
||||
|
||||
proc deserialize*(
|
||||
T: typedesc[MixMessage], data: openArray[byte]
|
||||
): Result[MixMessage, string] =
|
||||
if data.len == 0:
|
||||
return err("deserialization failed: data is empty")
|
||||
|
||||
var codecLen: int
|
||||
var varintLen: int
|
||||
for i in 0 ..< min(data.len, 2):
|
||||
let parsed = uint16.fromBytes(data[0 ..< i], Leb128)
|
||||
if parsed.len < 0 or (i == 1 and parsed.len == 0):
|
||||
return err("deserialization failed: invalid codec length")
|
||||
|
||||
varintLen = parsed.len
|
||||
codecLen = parsed.val.int
|
||||
|
||||
if data.len < varintLen + codecLen:
|
||||
return err("deserialization failed: not enough data")
|
||||
|
||||
ok(
|
||||
T(
|
||||
codec: string.fromBytes(data[varintLen ..< varintLen + codecLen]),
|
||||
message: data[varintLen + codecLen ..< data.len],
|
||||
)
|
||||
)
|
||||
13
libp2p/protocols/mix/mix_metrics.nim
Normal file
13
libp2p/protocols/mix/mix_metrics.nim
Normal file
@@ -0,0 +1,13 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import metrics
|
||||
|
||||
declarePublicCounter mix_messages_recvd, "number of mix messages received", ["type"]
|
||||
|
||||
declarePublicCounter mix_messages_forwarded,
|
||||
"number of mix messages forwarded", ["type"]
|
||||
|
||||
declarePublicCounter mix_messages_error,
|
||||
"number of mix messages failed processing", ["type", "error"]
|
||||
|
||||
declarePublicGauge mix_pool_size, "number of nodes in the pool"
|
||||
318
libp2p/protocols/mix/mix_node.nim
Normal file
318
libp2p/protocols/mix/mix_node.nim
Normal file
@@ -0,0 +1,318 @@
|
||||
import os, results, strformat, sugar, sequtils
|
||||
import std/streams
|
||||
import ../../crypto/[crypto, curve25519, secp]
|
||||
import ../../[multiaddress, multicodec, peerid, peerinfo]
|
||||
import ./[serialization, curve25519, multiaddr]
|
||||
|
||||
const MixNodeInfoSize* =
|
||||
AddrSize + (2 * FieldElementSize) + (SkRawPublicKeySize + SkRawPrivateKeySize)
|
||||
const MixPubInfoSize* = AddrSize + FieldElementSize + SkRawPublicKeySize
|
||||
|
||||
type MixNodeInfo* = object
|
||||
peerId*: PeerId
|
||||
multiAddr*: MultiAddress
|
||||
mixPubKey*: FieldElement
|
||||
mixPrivKey*: FieldElement
|
||||
libp2pPubKey*: SkPublicKey
|
||||
libp2pPrivKey*: SkPrivateKey
|
||||
|
||||
proc initMixNodeInfo*(
|
||||
peerId: PeerId,
|
||||
multiAddr: MultiAddress,
|
||||
mixPubKey, mixPrivKey: FieldElement,
|
||||
libp2pPubKey: SkPublicKey,
|
||||
libp2pPrivKey: SkPrivateKey,
|
||||
): MixNodeInfo =
|
||||
MixNodeInfo(
|
||||
peerId: peerId,
|
||||
multiAddr: multiAddr,
|
||||
mixPubKey: mixPubKey,
|
||||
mixPrivKey: mixPrivKey,
|
||||
libp2pPubKey: libp2pPubKey,
|
||||
libp2pPrivKey: libp2pPrivKey,
|
||||
)
|
||||
|
||||
proc get*(
|
||||
info: MixNodeInfo
|
||||
): (PeerId, MultiAddress, FieldElement, FieldElement, SkPublicKey, SkPrivateKey) =
|
||||
(
|
||||
info.peerId, info.multiAddr, info.mixPubKey, info.mixPrivKey, info.libp2pPubKey,
|
||||
info.libp2pPrivKey,
|
||||
)
|
||||
|
||||
proc serialize*(nodeInfo: MixNodeInfo): Result[seq[byte], string] =
|
||||
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
|
||||
return err("Error in multiaddress conversion to bytes: " & error)
|
||||
|
||||
let
|
||||
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
|
||||
mixPrivKeyBytes = fieldElementToBytes(nodeInfo.mixPrivKey)
|
||||
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
|
||||
libp2pPrivKeyBytes = nodeInfo.libp2pPrivKey.getBytes()
|
||||
|
||||
return ok(
|
||||
addrBytes & mixPubKeyBytes & mixPrivKeyBytes & libp2pPubKeyBytes & libp2pPrivKeyBytes
|
||||
)
|
||||
|
||||
proc deserialize*(T: typedesc[MixNodeInfo], data: openArray[byte]): Result[T, string] =
|
||||
if len(data) != MixNodeInfoSize:
|
||||
return
|
||||
err("Serialized Mix node info must be exactly " & $MixNodeInfoSize & " bytes")
|
||||
|
||||
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
|
||||
return err("Error in multiaddress conversion to bytes: " & error)
|
||||
|
||||
let mixPubKey = bytesToFieldElement(
|
||||
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
|
||||
).valueOr:
|
||||
return err("Mix public key deserialize error: " & error)
|
||||
|
||||
let mixPrivKey = bytesToFieldElement(
|
||||
data[(AddrSize + FieldElementSize) .. (AddrSize + (2 * FieldElementSize) - 1)]
|
||||
).valueOr:
|
||||
return err("Mix private key deserialize error: " & error)
|
||||
|
||||
let libp2pPubKey = SkPublicKey.init(
|
||||
data[
|
||||
AddrSize + (2 * FieldElementSize) ..
|
||||
AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize - 1
|
||||
]
|
||||
).valueOr:
|
||||
return err("Failed to initialize libp2p public key")
|
||||
|
||||
let libp2pPrivKey = SkPrivateKey.init(
|
||||
data[AddrSize + (2 * FieldElementSize) + SkRawPublicKeySize ..^ 1]
|
||||
).valueOr:
|
||||
return err("Failed to initialize libp2p private key")
|
||||
|
||||
ok(
|
||||
T(
|
||||
peerId: peerId,
|
||||
multiAddr: multiAddr,
|
||||
mixPubKey: mixPubKey,
|
||||
mixPrivKey: mixPrivKey,
|
||||
libp2pPubKey: libp2pPubKey,
|
||||
libp2pPrivKey: libp2pPrivKey,
|
||||
)
|
||||
)
|
||||
|
||||
proc writeToFile*(
|
||||
node: MixNodeInfo, index: int, nodeInfoFolderPath: string = "./nodeInfo"
|
||||
): Result[void, string] =
|
||||
if not dirExists(nodeInfoFolderPath):
|
||||
createDir(nodeInfoFolderPath)
|
||||
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
|
||||
var file = newFileStream(filename, fmWrite)
|
||||
if file == nil:
|
||||
return err("Failed to create file stream for " & filename)
|
||||
defer:
|
||||
file.close()
|
||||
|
||||
let serializedData = node.serialize().valueOr:
|
||||
return err("Failed to serialize mix node info: " & error)
|
||||
|
||||
file.writeData(addr serializedData[0], serializedData.len)
|
||||
return ok()
|
||||
|
||||
proc readFromFile*(
|
||||
T: typedesc[MixNodeInfo], index: int, nodeInfoFolderPath: string = "./nodeInfo"
|
||||
): Result[T, string] =
|
||||
let filename = nodeInfoFolderPath / fmt"mixNode_{index}"
|
||||
if not fileExists(filename):
|
||||
return err("File does not exist")
|
||||
var file = newFileStream(filename, fmRead)
|
||||
if file == nil:
|
||||
return err(
|
||||
"Failed to open file: " & filename &
|
||||
". Check permissions or if the path is correct."
|
||||
)
|
||||
defer:
|
||||
file.close()
|
||||
|
||||
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
|
||||
if data.len != MixNodeInfoSize:
|
||||
return err(
|
||||
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
|
||||
" bytes, but got " & $(data.len) & " bytes."
|
||||
)
|
||||
let dMixNodeInfo = MixNodeInfo.deserialize(cast[seq[byte]](data)).valueOr:
|
||||
return err("Mix node info deserialize error: " & error)
|
||||
return ok(dMixNodeInfo)
|
||||
|
||||
proc deleteNodeInfoFolder*(nodeInfoFolderPath: string = "./nodeInfo") =
|
||||
## Deletes the folder that stores serialized mix node info files
|
||||
## along with all its contents, if the folder exists.
|
||||
if dirExists(nodeInfoFolderPath):
|
||||
removeDir(nodeInfoFolderPath)
|
||||
|
||||
type MixPubInfo* = object
|
||||
peerId*: PeerId
|
||||
multiAddr*: MultiAddress
|
||||
mixPubKey*: FieldElement
|
||||
libp2pPubKey*: SkPublicKey
|
||||
|
||||
proc init*(
|
||||
T: typedesc[MixPubInfo],
|
||||
peerId: PeerId,
|
||||
multiAddr: MultiAddress,
|
||||
mixPubKey: FieldElement,
|
||||
libp2pPubKey: SkPublicKey,
|
||||
): T =
|
||||
T(
|
||||
peerId: PeerId,
|
||||
multiAddr: multiAddr,
|
||||
mixPubKey: mixPubKey,
|
||||
libp2pPubKey: libp2pPubKey,
|
||||
)
|
||||
|
||||
proc get*(info: MixPubInfo): (PeerId, MultiAddress, FieldElement, SkPublicKey) =
|
||||
(info.peerId, info.multiAddr, info.mixPubKey, info.libp2pPubKey)
|
||||
|
||||
proc serialize*(nodeInfo: MixPubInfo): Result[seq[byte], string] =
|
||||
let addrBytes = multiAddrToBytes(nodeInfo.peerId, nodeInfo.multiAddr).valueOr:
|
||||
return err("Error in multiaddress conversion to bytes: " & error)
|
||||
|
||||
let
|
||||
mixPubKeyBytes = fieldElementToBytes(nodeInfo.mixPubKey)
|
||||
libp2pPubKeyBytes = nodeInfo.libp2pPubKey.getBytes()
|
||||
|
||||
return ok(addrBytes & mixPubKeyBytes & libp2pPubKeyBytes)
|
||||
|
||||
proc deserialize*(T: typedesc[MixPubInfo], data: openArray[byte]): Result[T, string] =
|
||||
if len(data) != MixPubInfoSize:
|
||||
return
|
||||
err("Serialized mix public info must be exactly " & $MixPubInfoSize & " bytes")
|
||||
|
||||
let (peerId, multiAddr) = bytesToMultiAddr(data[0 .. AddrSize - 1]).valueOr:
|
||||
return err("Error in bytes to multiaddress conversion: " & error)
|
||||
|
||||
let mixPubKey = bytesToFieldElement(
|
||||
data[AddrSize .. (AddrSize + FieldElementSize - 1)]
|
||||
).valueOr:
|
||||
return err("Mix public key deserialize error: " & error)
|
||||
|
||||
let libp2pPubKey = SkPublicKey.init(data[(AddrSize + FieldElementSize) ..^ 1]).valueOr:
|
||||
return err("Failed to initialize libp2p public key: ")
|
||||
|
||||
ok(
|
||||
MixPubInfo(
|
||||
peerId: peerId,
|
||||
multiAddr: multiAddr,
|
||||
mixPubKey: mixPubKey,
|
||||
libp2pPubKey: libp2pPubKey,
|
||||
)
|
||||
)
|
||||
|
||||
proc writeToFile*(
|
||||
node: MixPubInfo, index: int, pubInfoFolderPath: string = "./pubInfo"
|
||||
): Result[void, string] =
|
||||
if not dirExists(pubInfoFolderPath):
|
||||
createDir(pubInfoFolderPath)
|
||||
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
|
||||
var file = newFileStream(filename, fmWrite)
|
||||
if file == nil:
|
||||
return err("Failed to create file stream for " & filename)
|
||||
defer:
|
||||
file.close()
|
||||
|
||||
let serializedData = node.serialize().valueOr:
|
||||
return err("Failed to serialize mix pub info: " & error)
|
||||
|
||||
file.writeData(unsafeAddr serializedData[0], serializedData.len)
|
||||
return ok()
|
||||
|
||||
proc readFromFile*(
|
||||
T: typedesc[MixPubInfo], index: int, pubInfoFolderPath: string = "./pubInfo"
|
||||
): Result[T, string] =
|
||||
let filename = pubInfoFolderPath / fmt"mixNode_{index}"
|
||||
if not fileExists(filename):
|
||||
return err("File does not exist")
|
||||
var file = newFileStream(filename, fmRead)
|
||||
if file == nil:
|
||||
return err(
|
||||
"Failed to open file: " & filename &
|
||||
". Check permissions or if the path is correct."
|
||||
)
|
||||
defer:
|
||||
file.close()
|
||||
let data = ?file.readAll().catch().mapErr(x => "File read error: " & x.msg)
|
||||
if data.len != MixPubInfoSize:
|
||||
return err(
|
||||
"Invalid data size for MixNodeInfo: expected " & $MixNodeInfoSize &
|
||||
" bytes, but got " & $(data.len) & " bytes."
|
||||
)
|
||||
let dMixPubInfo = MixPubInfo.deserialize(cast[seq[byte]](data)).valueOr:
|
||||
return err("Mix pub info deserialize error: " & error)
|
||||
return ok(dMixPubInfo)
|
||||
|
||||
proc deletePubInfoFolder*(pubInfoFolderPath: string = "./pubInfo") =
|
||||
## Deletes the folder containing serialized public mix node info
|
||||
## and all files inside it, if the folder exists.
|
||||
if dirExists(pubInfoFolderPath):
|
||||
removeDir(pubInfoFolderPath)
|
||||
|
||||
type MixNodes* = seq[MixNodeInfo]
|
||||
|
||||
proc getMixPubInfoByIndex*(self: MixNodes, index: int): Result[MixPubInfo, string] =
|
||||
if index < 0 or index >= self.len:
|
||||
return err("Index must be between 0 and " & $(self.len))
|
||||
ok(
|
||||
MixPubInfo(
|
||||
peerId: self[index].peerId,
|
||||
multiAddr: self[index].multiAddr,
|
||||
mixPubKey: self[index].mixPubKey,
|
||||
libp2pPubKey: self[index].libp2pPubKey,
|
||||
)
|
||||
)
|
||||
|
||||
proc generateMixNodes(
|
||||
count: int, basePort: int = 4242, rng: ref HmacDrbgContext = newRng()
|
||||
): Result[MixNodes, string] =
|
||||
var nodes = newSeq[MixNodeInfo](count)
|
||||
for i in 0 ..< count:
|
||||
let keyPairResult = generateKeyPair()
|
||||
if keyPairResult.isErr:
|
||||
return err("Generate key pair error: " & $keyPairResult.error)
|
||||
let (mixPrivKey, mixPubKey) = keyPairResult.get()
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
keyPair = SkKeyPair.random(rng[])
|
||||
pubKeyProto = PublicKey(scheme: Secp256k1, skkey: keyPair.pubkey)
|
||||
peerId = PeerId.init(pubKeyProto).get()
|
||||
multiAddr =
|
||||
?MultiAddress.init(fmt"/ip4/0.0.0.0/tcp/{basePort + i}").tryGet().catch().mapErr(
|
||||
x => x.msg
|
||||
)
|
||||
|
||||
nodes[i] = MixNodeInfo(
|
||||
peerId: peerId,
|
||||
multiAddr: multiAddr,
|
||||
mixPubKey: mixPubKey,
|
||||
mixPrivKey: mixPrivKey,
|
||||
libp2pPubKey: keyPair.pubkey,
|
||||
libp2pPrivKey: keyPair.seckey,
|
||||
)
|
||||
|
||||
ok(nodes)
|
||||
|
||||
proc initializeMixNodes*(count: int, basePort: int = 4242): Result[MixNodes, string] =
|
||||
## Creates and initializes a set of mix nodes
|
||||
let mixNodes = generateMixNodes(count, basePort).valueOr:
|
||||
return err("Mix node initialization error: " & error)
|
||||
return ok(mixNodes)
|
||||
|
||||
proc findByPeerId*(self: MixNodes, peerId: PeerId): Result[MixNodeInfo, string] =
|
||||
let filteredNodes = self.filterIt(it.peerId == peerId)
|
||||
if filteredNodes.len != 0:
|
||||
return ok(filteredNodes[0])
|
||||
return err("No node with peer id: " & $peerId)
|
||||
|
||||
proc initMixMultiAddrByIndex*(
|
||||
self: var MixNodes, index: int, peerId: PeerId, multiAddr: MultiAddress
|
||||
): Result[void, string] =
|
||||
if index < 0 or index >= self.len:
|
||||
return err("Index must be between 0 and " & $(self.len))
|
||||
self[index].multiAddr = multiAddr
|
||||
self[index].peerId = peerId
|
||||
ok()
|
||||
392
libp2p/protocols/mix/mix_protocol.nim
Normal file
392
libp2p/protocols/mix/mix_protocol.nim
Normal file
@@ -0,0 +1,392 @@
|
||||
import chronicles, chronos, sequtils, strutils, os, results
|
||||
import std/[strformat, tables], metrics
|
||||
import
|
||||
./[
|
||||
curve25519, fragmentation, mix_message, mix_node, sphinx, serialization,
|
||||
tag_manager, mix_metrics, exit_layer, multiaddr,
|
||||
]
|
||||
import stew/endians2
|
||||
import ../protocol
|
||||
import ../../stream/[connection, lpstream]
|
||||
import ../../[switch, multicodec, peerinfo]
|
||||
|
||||
const MixProtocolID* = "/mix/1.0.0"
|
||||
|
||||
## Mix Protocol defines a decentralized anonymous message routing layer for libp2p networks.
|
||||
## It enables sender anonymity by routing each message through a decentralized mix overlay
|
||||
## network composed of participating libp2p nodes, known as mix nodes. Each message is
|
||||
## routed independently in a stateless manner, allowing other libp2p protocols to selectively
|
||||
## anonymize messages without modifying their core protocol behavior.
|
||||
type MixProtocol* = ref object of LPProtocol
|
||||
mixNodeInfo: MixNodeInfo
|
||||
pubNodeInfo: Table[PeerId, MixPubInfo]
|
||||
switch: Switch
|
||||
tagManager: TagManager
|
||||
exitLayer: ExitLayer
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
proc loadAllButIndexMixPubInfo*(
|
||||
index, numNodes: int, pubInfoFolderPath: string = "./pubInfo"
|
||||
): Result[Table[PeerId, MixPubInfo], string] =
|
||||
var pubInfoTable = initTable[PeerId, MixPubInfo]()
|
||||
for i in 0 ..< numNodes:
|
||||
if i == index:
|
||||
continue
|
||||
let pubInfo = MixPubInfo.readFromFile(i, pubInfoFolderPath).valueOr:
|
||||
return err("Failed to load pub info from file: " & error)
|
||||
pubInfoTable[pubInfo.peerId] = pubInfo
|
||||
return ok(pubInfoTable)
|
||||
|
||||
proc cryptoRandomInt(rng: ref HmacDrbgContext, max: int): Result[int, string] =
|
||||
if max == 0:
|
||||
return err("Max cannot be zero.")
|
||||
let res = rng[].generate(uint64) mod uint64(max)
|
||||
ok(res.int)
|
||||
|
||||
proc handleMixNodeConnection(
|
||||
mixProto: MixProtocol, conn: Connection
|
||||
) {.async: (raises: [LPStreamError, CancelledError]).} =
|
||||
let receivedBytes =
|
||||
try:
|
||||
await conn.readLp(PacketSize)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
if receivedBytes.len == 0:
|
||||
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "NO_DATA"])
|
||||
return # No data, end of stream
|
||||
|
||||
# Process the packet
|
||||
let (peerId, multiAddr, _, mixPrivKey, _, _) = mixProto.mixNodeInfo.get()
|
||||
|
||||
let sphinxPacket = SphinxPacket.deserialize(receivedBytes).valueOr:
|
||||
error "Sphinx packet deserialization error", err = error
|
||||
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
|
||||
return
|
||||
|
||||
let processedSP = processSphinxPacket(sphinxPacket, mixPrivKey, mixProto.tagManager).valueOr:
|
||||
error "Failed to process Sphinx packet", err = error
|
||||
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_SPHINX"])
|
||||
return
|
||||
|
||||
case processedSP.status
|
||||
of Exit:
|
||||
mix_messages_recvd.inc(labelValues = ["Exit"])
|
||||
|
||||
# This is the exit node, forward to destination
|
||||
let msgChunk = MessageChunk.deserialize(processedSP.messageChunk).valueOr:
|
||||
error "Deserialization failed", err = error
|
||||
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
|
||||
return
|
||||
|
||||
let unpaddedMsg = msgChunk.removePadding().valueOr:
|
||||
error "Unpadding message failed", err = error
|
||||
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
|
||||
return
|
||||
|
||||
let deserialized = MixMessage.deserialize(unpaddedMsg).valueOr:
|
||||
error "Deserialization failed", err = error
|
||||
mix_messages_error.inc(labelValues = ["Exit", "INVALID_SPHINX"])
|
||||
return
|
||||
|
||||
if processedSP.destination == Hop():
|
||||
error "no destination available"
|
||||
mix_messages_error.inc(labelValues = ["Exit", "NO_DESTINATION"])
|
||||
return
|
||||
|
||||
let destBytes = processedSP.destination.get()
|
||||
|
||||
let (destPeerId, destAddr) = bytesToMultiAddr(destBytes).valueOr:
|
||||
error "Failed to convert bytes to multiaddress", err = error
|
||||
mix_messages_error.inc(labelValues = ["Exit", "INVALID_DEST"])
|
||||
return
|
||||
|
||||
trace "Exit node - Received mix message",
|
||||
peerId,
|
||||
message = deserialized.message,
|
||||
codec = deserialized.codec,
|
||||
to = destPeerId
|
||||
|
||||
await mixProto.exitLayer.onMessage(
|
||||
deserialized.codec, deserialized.message, destAddr, destPeerId
|
||||
)
|
||||
|
||||
mix_messages_forwarded.inc(labelValues = ["Exit"])
|
||||
of Reply:
|
||||
# TODO: implement
|
||||
discard
|
||||
of Intermediate:
|
||||
trace "# Intermediate: ", peerId, multiAddr
|
||||
# Add delay
|
||||
mix_messages_recvd.inc(labelValues = ["Intermediate"])
|
||||
await sleepAsync(milliseconds(processedSP.delayMs))
|
||||
|
||||
# Forward to next hop
|
||||
let nextHopBytes = processedSP.nextHop.get()
|
||||
|
||||
let (nextPeerId, nextAddr) = bytesToMultiAddr(nextHopBytes).valueOr:
|
||||
error "Failed to convert bytes to multiaddress", err = error
|
||||
mix_messages_error.inc(labelValues = ["Intermediate", "INVALID_DEST"])
|
||||
return
|
||||
|
||||
try:
|
||||
let nextHopConn =
|
||||
await mixProto.switch.dial(nextPeerId, @[nextAddr], MixProtocolID)
|
||||
defer:
|
||||
await nextHopConn.close()
|
||||
|
||||
await nextHopConn.writeLp(processedSP.serializedSphinxPacket)
|
||||
mix_messages_forwarded.inc(labelValues = ["Intermediate"])
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except DialFailedError as exc:
|
||||
error "Failed to dial next hop: ", err = exc.msg
|
||||
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
|
||||
except LPStreamError as exc:
|
||||
error "Failed to write to next hop: ", err = exc.msg
|
||||
mix_messages_error.inc(labelValues = ["Intermediate", "DIAL_FAILED"])
|
||||
of Duplicate:
|
||||
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "DUPLICATE"])
|
||||
of InvalidMAC:
|
||||
mix_messages_error.inc(labelValues = ["Intermediate/Exit", "INVALID_MAC"])
|
||||
|
||||
proc getMaxMessageSizeForCodec*(
|
||||
codec: string, numberOfSurbs: uint8 = 0
|
||||
): Result[int, string] =
|
||||
## Computes the maximum payload size (in bytes) available for a message when encoded
|
||||
## with the given `codec`
|
||||
## Returns an error if the codec length would cause it to exceeds the data capacity.
|
||||
let serializedMsg = MixMessage.init(@[], codec).serialize()
|
||||
if serializedMsg.len > DataSize:
|
||||
return err("cannot encode messages for this codec")
|
||||
return ok(DataSize - serializedMsg.len)
|
||||
|
||||
proc sendPacket(
|
||||
mixProto: MixProtocol,
|
||||
multiAddrs: MultiAddress,
|
||||
sphinxPacket: seq[byte],
|
||||
label: string,
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Send the wrapped message to the first mix node in the selected path
|
||||
|
||||
let (firstMixPeerId, firstMixAddr) = parseFullAddress(multiAddrs).valueOr:
|
||||
error "Invalid multiaddress", err = error
|
||||
mix_messages_error.inc(labelValues = [label, "NON_RECOVERABLE"])
|
||||
return
|
||||
|
||||
try:
|
||||
let nextHopConn =
|
||||
await mixProto.switch.dial(firstMixPeerId, @[firstMixAddr], @[MixProtocolID])
|
||||
defer:
|
||||
await nextHopConn.close()
|
||||
await nextHopConn.writeLp(sphinxPacket)
|
||||
except DialFailedError as exc:
|
||||
error "Failed to dial next hop: ",
|
||||
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
|
||||
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
|
||||
except LPStreamError as exc:
|
||||
error "Failed to write to next hop: ",
|
||||
peerId = firstMixPeerId, address = firstMixAddr, err = exc.msg
|
||||
mix_messages_error.inc(labelValues = [label, "SEND_FAILED"])
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
mix_messages_forwarded.inc(labelValues = ["Entry"])
|
||||
|
||||
proc buildMessage(
|
||||
msg: seq[byte], codec: string, peerId: PeerId
|
||||
): Result[Message, (string, string)] =
|
||||
let
|
||||
mixMsg = MixMessage.init(msg, codec)
|
||||
serialized = mixMsg.serialize()
|
||||
|
||||
if serialized.len > DataSize:
|
||||
return err(("message size exceeds maximum payload size", "INVALID_SIZE"))
|
||||
|
||||
let
|
||||
paddedMsg = addPadding(serialized, peerId)
|
||||
serializedMsgChunk = paddedMsg.serialize()
|
||||
|
||||
ok(serializedMsgChunk)
|
||||
|
||||
## Represents the final target of a mixnet message.
|
||||
## contains the peer id and multiaddress of the destination node.
|
||||
type MixDestination* = object
|
||||
peerId: PeerId
|
||||
address: MultiAddress
|
||||
|
||||
proc init*(T: typedesc[MixDestination], peerId: PeerId, address: MultiAddress): T =
|
||||
## Initializes a destination object with the given peer id and multiaddress.
|
||||
T(peerId: peerId, address: address)
|
||||
|
||||
proc `$`*(d: MixDestination): string =
|
||||
$d.address & "/p2p/" & $d.peerId
|
||||
|
||||
proc anonymizeLocalProtocolSend*(
|
||||
mixProto: MixProtocol,
|
||||
incoming: AsyncQueue[seq[byte]],
|
||||
msg: seq[byte],
|
||||
codec: string,
|
||||
destination: MixDestination,
|
||||
numSurbs: uint8,
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mix_messages_recvd.inc(labelValues = ["Entry"])
|
||||
|
||||
var
|
||||
multiAddrs: seq[MultiAddress] = @[]
|
||||
publicKeys: seq[FieldElement] = @[]
|
||||
hop: seq[Hop] = @[]
|
||||
delay: seq[seq[byte]] = @[]
|
||||
exitPeerId: PeerId
|
||||
|
||||
# Select L mix nodes at random
|
||||
let numMixNodes = mixProto.pubNodeInfo.len
|
||||
var numAvailableNodes = numMixNodes
|
||||
|
||||
debug "Destination data", destination
|
||||
|
||||
if mixProto.pubNodeInfo.hasKey(destination.peerId):
|
||||
numAvailableNodes = numMixNodes - 1
|
||||
|
||||
if numAvailableNodes < PathLength:
|
||||
error "No. of public mix nodes less than path length.",
|
||||
numMixNodes = numAvailableNodes, pathLength = PathLength
|
||||
mix_messages_error.inc(labelValues = ["Entry", "LOW_MIX_POOL"])
|
||||
return
|
||||
|
||||
# Skip the destination peer
|
||||
var pubNodeInfoKeys =
|
||||
mixProto.pubNodeInfo.keys.toSeq().filterIt(it != destination.peerId)
|
||||
var availableIndices = toSeq(0 ..< pubNodeInfoKeys.len)
|
||||
|
||||
var i = 0
|
||||
while i < PathLength:
|
||||
let randomIndexPosition = cryptoRandomInt(mixProto.rng, availableIndices.len).valueOr:
|
||||
error "Failed to generate random number", err = error
|
||||
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
|
||||
return
|
||||
let selectedIndex = availableIndices[randomIndexPosition]
|
||||
let randPeerId = pubNodeInfoKeys[selectedIndex]
|
||||
availableIndices.del(randomIndexPosition)
|
||||
|
||||
# Last hop will be the exit node that will forward the request
|
||||
if i == PathLength - 1:
|
||||
exitPeerId = randPeerId
|
||||
|
||||
debug "Selected mix node: ", indexInPath = i, peerId = randPeerId
|
||||
|
||||
# Extract multiaddress, mix public key, and hop
|
||||
let (peerId, multiAddr, mixPubKey, _) =
|
||||
mixProto.pubNodeInfo.getOrDefault(randPeerId).get()
|
||||
multiAddrs.add(multiAddr)
|
||||
publicKeys.add(mixPubKey)
|
||||
|
||||
let multiAddrBytes = multiAddrToBytes(peerId, multiAddr).valueOr:
|
||||
error "Failed to convert multiaddress to bytes", err = error
|
||||
mix_messages_error.inc(labelValues = ["Entry", "INVALID_MIX_INFO"])
|
||||
#TODO: should we skip and pick a different node here??
|
||||
return
|
||||
|
||||
hop.add(Hop.init(multiAddrBytes))
|
||||
|
||||
# Compute delay
|
||||
let delayMillisec =
|
||||
if i != PathLength - 1:
|
||||
cryptoRandomInt(mixProto.rng, 3).valueOr:
|
||||
error "Failed to generate random number", err = error
|
||||
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
|
||||
return
|
||||
else:
|
||||
0 # Last hop does not require a delay
|
||||
|
||||
delay.add(@(delayMillisec.uint16.toBytesBE()))
|
||||
|
||||
i = i + 1
|
||||
|
||||
#Encode destination
|
||||
let destAddrBytes = multiAddrToBytes(destination.peerId, destination.address).valueOr:
|
||||
error "Failed to convert multiaddress to bytes", err = error
|
||||
mix_messages_error.inc(labelValues = ["Entry", "INVALID_DEST"])
|
||||
return
|
||||
let destHop = Hop.init(destAddrBytes)
|
||||
let message = buildMessage(msg, codec, mixProto.mixNodeInfo.peerId).valueOr:
|
||||
error "Error building message", err = error[0]
|
||||
mix_messages_error.inc(labelValues = ["Entry", error[1]])
|
||||
return
|
||||
|
||||
# Wrap in Sphinx packet
|
||||
let sphinxPacket = wrapInSphinxPacket(message, publicKeys, delay, hop, destHop).valueOr:
|
||||
error "Failed to wrap in sphinx packet", err = error
|
||||
mix_messages_error.inc(labelValues = ["Entry", "NON_RECOVERABLE"])
|
||||
return
|
||||
|
||||
# Send the wrapped message to the first mix node in the selected path
|
||||
await mixProto.sendPacket(multiAddrs[0], sphinxPacket, "Entry")
|
||||
|
||||
proc init*(
|
||||
mixProto: MixProtocol,
|
||||
mixNodeInfo: MixNodeInfo,
|
||||
pubNodeInfo: Table[PeerId, MixPubInfo],
|
||||
switch: Switch,
|
||||
tagManager: TagManager = TagManager.new(),
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
) =
|
||||
mixProto.mixNodeInfo = mixNodeInfo
|
||||
mixProto.pubNodeInfo = pubNodeInfo
|
||||
mixProto.switch = switch
|
||||
mixProto.tagManager = tagManager
|
||||
|
||||
mixProto.exitLayer = ExitLayer.init(switch)
|
||||
mixProto.codecs = @[MixProtocolID]
|
||||
mixProto.rng = rng
|
||||
mixProto.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
await mixProto.handleMixNodeConnection(conn)
|
||||
except LPStreamError as e:
|
||||
debug "Stream error", conn = conn, err = e.msg
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MixProtocol],
|
||||
mixNodeInfo: MixNodeInfo,
|
||||
pubNodeInfo: Table[PeerId, MixPubInfo],
|
||||
switch: Switch,
|
||||
tagManager: TagManager = TagManager.new(),
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
): T =
|
||||
let mixProto = new(T)
|
||||
mixProto.init(mixNodeInfo, pubNodeInfo, switch)
|
||||
mixProto
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MixProtocol],
|
||||
index, numNodes: int,
|
||||
switch: Switch,
|
||||
nodeFolderInfoPath: string = ".",
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
): Result[T, string] =
|
||||
## Constructs a new `MixProtocol` instance for the mix node at `index`,
|
||||
## loading its private info from `nodeInfo` and the public info of all other nodes from `pubInfo`.
|
||||
let mixNodeInfo = MixNodeInfo.readFromFile(index, nodeFolderInfoPath / fmt"nodeInfo").valueOr:
|
||||
return err("Failed to load mix node info for index " & $index & " - err: " & error)
|
||||
|
||||
let pubNodeInfo = loadAllButIndexMixPubInfo(
|
||||
index, numNodes, nodeFolderInfoPath / fmt"pubInfo"
|
||||
).valueOr:
|
||||
return err("Failed to load mix pub info for index " & $index & " - err: " & error)
|
||||
|
||||
let mixProto =
|
||||
MixProtocol.new(mixNodeInfo, pubNodeInfo, switch, TagManager.new(), rng)
|
||||
|
||||
return ok(mixProto)
|
||||
|
||||
proc setNodePool*(
|
||||
mixProtocol: MixProtocol, mixNodeTable: Table[PeerId, MixPubInfo]
|
||||
) {.gcsafe, raises: [].} =
|
||||
mixProtocol.pubNodeInfo = mixNodeTable
|
||||
|
||||
proc getNodePoolSize*(mixProtocol: MixProtocol): int {.gcsafe, raises: [].} =
|
||||
mixProtocol.pubNodeInfo.len
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user