Compare commits

..

1 Commits

Author SHA1 Message Date
Tanguy
ae67a9b3f6 Add queued send bytes 2023-02-13 17:32:07 +01:00
358 changed files with 18573 additions and 44781 deletions

52
.appveyor.yml Normal file
View File

@@ -0,0 +1,52 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
- p2pdCache
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
- git submodule update --init --recursive
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
# set path for produced Go binaries
- MKDIR goblin
- CD goblin
- SET GOPATH=%CD%
- SET PATH=%GOPATH%\bin;%PATH%
- CD ..
# install and build go-libp2p-daemon
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
build_script:
- nimble install -y --depsOnly
test_script:
- nimble test
- nimble examples_build
deploy: off

View File

@@ -1,2 +0,0 @@
# Formatted with nph 0.5.1
dc83a1e9b68f00b3be7e09febdb1a3f877321b9a

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @vacp2p/p2p

View File

@@ -1,34 +0,0 @@
name: Add Comment
description: "Add or update comment in the PR"
runs:
using: "composite"
steps:
- name: Add/Update Comment
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const marker = "${{ env.MARKER }}";
const body = fs.readFileSync("${{ env.COMMENT_SUMMARY_PATH }}", 'utf8');
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body && c.body.startsWith(marker));
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body,
});
}

View File

@@ -1,49 +0,0 @@
name: Discord Failure Notification
description: "Send Discord notification when CI jobs fail"
inputs:
webhook_url:
description: "Discord webhook URL"
required: true
workflow_name:
description: "Name of the workflow that failed"
required: false
default: ${{ github.workflow }}
branch:
description: "Branch name"
required: false
default: ${{ github.ref_name }}
repository:
description: "Repository name"
required: false
default: ${{ github.repository }}
run_id:
description: "GitHub run ID"
required: false
default: ${{ github.run_id }}
server_url:
description: "GitHub server URL"
required: false
default: ${{ github.server_url }}
runs:
using: "composite"
steps:
- name: Send Discord notification
shell: bash
run: |
curl -H "Content-Type: application/json" \
-X POST \
-d "{
\"embeds\": [{
\"title\": \"${{ inputs.workflow_name }} Job Failed\",
\"url\": \"${{ inputs.server_url }}/${{ inputs.repository }}/actions/runs/${{ inputs.run_id }}\",
\"description\": \"The workflow has failed on branch \`${{ inputs.branch }}\`\",
\"color\": 15158332,
\"fields\": [
{\"name\": \"Repository\", \"value\": \"${{ inputs.repository }}\", \"inline\": true},
{\"name\": \"Branch\", \"value\": \"${{ inputs.branch }}\", \"inline\": true}
],
\"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%S.000Z)\"
}]
}" \
"${{ inputs.webhook_url }}"

View File

@@ -1,24 +0,0 @@
name: Generate Plots
description: "Set up Python and run script to generate plots with Docker Stats"
runs:
using: "composite"
steps:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install Python dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install matplotlib
- name: Plot Docker Stats
shell: bash
run: python performance/scripts/plot_docker_stats.py
- name: Plot Latency History
shell: bash
run: python performance/scripts/plot_latency_history.py

View File

@@ -6,9 +6,9 @@ inputs:
cpu:
description: "CPU to build for"
default: "amd64"
nim_ref:
nim_branch:
description: "Nim version"
default: "version-2-0"
default: "version-1-6"
shell:
description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail"
@@ -61,7 +61,7 @@ runs:
- name: Restore Nim DLLs dependencies (Windows) from cache
if: inputs.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: external/dlls
key: 'dlls'
@@ -88,8 +88,6 @@ runs:
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
PLATFORM=arm64
else
PLATFORM=x86
fi
@@ -116,10 +114,10 @@ runs:
- name: Restore Nim from cache
id: nim-cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_ref }}-cache-${{ env.cache_nonce }}
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
@@ -128,6 +126,6 @@ runs:
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_ref }} \
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries

View File

@@ -1,21 +0,0 @@
name: Process Stats
description: "Set up Nim and run scripts to aggregate latency and process raw docker stats"
runs:
using: "composite"
steps:
- name: Set up Nim
uses: jiro4989/setup-nim-action@v2
with:
nim-version: "2.x"
repo-token: ${{ env.GITHUB_TOKEN }}
- name: Aggregate latency stats and prepare markdown for comment and summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_latency_stats ./performance/scripts/process_latency_stats.nim
- name: Process raw docker stats to csv files
shell: bash
run: |
nim c -r -d:release -o:/tmp/process_docker_stats ./performance/scripts/process_docker_stats.nim

View File

@@ -1,36 +0,0 @@
name: Publish Latency History
description: "Publish latency history CSVs in a configurable branch and folder"
runs:
using: "composite"
steps:
- name: Clone the branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_HISTORY }}
fetch-depth: 0
- name: Commit & push latency history CSVs
shell: bash
run: |
cd "$CHECKOUT_SUBFOLDER_HISTORY"
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
mkdir -p "$PUBLISH_DIR_LATENCY_HISTORY"
cp ../$SHARED_VOLUME_PATH/$LATENCY_HISTORY_PREFIX*.csv "$PUBLISH_DIR_LATENCY_HISTORY/"
git add "$PUBLISH_DIR_LATENCY_HISTORY"
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update latency history CSVs"
git push origin "$PUBLISH_BRANCH_NAME"
fi
cd ..

View File

@@ -1,56 +0,0 @@
name: Publish Plots
description: "Publish plots in performance_plots branch and add to the workflow summary"
runs:
using: "composite"
steps:
- name: Clone the performance_plots branch
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ env.PUBLISH_BRANCH_NAME }}
path: ${{ env.CHECKOUT_SUBFOLDER_SUBPLOTS }}
fetch-depth: 0
- name: Commit & push plots
shell: bash
run: |
cd $CHECKOUT_SUBFOLDER_SUBPLOTS
git fetch origin "$PUBLISH_BRANCH_NAME"
git reset --hard "origin/$PUBLISH_BRANCH_NAME"
# Remove any branch folder older than 7 days
DAYS=7
cutoff=$(( $(date +%s) - DAYS*24*3600 ))
scan_dir="${PUBLISH_DIR_PLOTS%/}"
find "$scan_dir" -mindepth 1 -maxdepth 1 -type d -print0 \
| while IFS= read -r -d $'\0' d; do \
ts=$(git log -1 --format=%ct -- "$d" 2>/dev/null || true); \
if [ -n "$ts" ] && [ "$ts" -le "$cutoff" ]; then \
echo "[cleanup] Deleting: $d"; \
rm -rf -- "$d"; \
fi; \
done
rm -rf $PUBLISH_DIR_PLOTS/$BRANCH_NAME
mkdir -p $PUBLISH_DIR_PLOTS/$BRANCH_NAME
cp ../$SHARED_VOLUME_PATH/*.png $PUBLISH_DIR_PLOTS/$BRANCH_NAME/ 2>/dev/null || true
cp ../$LATENCY_HISTORY_PATH/*.png $PUBLISH_DIR_PLOTS/ 2>/dev/null || true
git add -A "$PUBLISH_DIR_PLOTS/"
git status
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git commit -m "Update performance plots for $BRANCH_NAME"
git push origin $PUBLISH_BRANCH_NAME
fi
- name: Add plots to GitHub Actions summary
shell: bash
run: |
nim c -r -d:release -o:/tmp/add_plots_to_summary ./performance/scripts/add_plots_to_summary.nim

View File

@@ -1,12 +0,0 @@
name: Auto Assign PR to Creator
on:
pull_request:
types:
- opened
jobs:
assign_creator:
runs-on: ubuntu-latest
steps:
- uses: toshimaru/auto-author-assign@v1.6.2

43
.github/workflows/bumper.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Bumper
on:
push:
branches:
- unstable
- bumper
workflow_dispatch:
jobs:
bumpProjects:
runs-on: ubuntu-latest
strategy:
matrix:
target: [
{ repo: status-im/nimbus-eth2, branch: unstable },
{ repo: status-im/nwaku, branch: master },
{ repo: status-im/nim-codex, branch: main }
]
steps:
- name: Clone repo
uses: actions/checkout@v2
with:
repository: ${{ matrix.target.repo }}
ref: ${{ matrix.target.branch }}
path: nbc
submodules: true
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- name: Checkout this ref
run: |
cd nbc/vendor/nim-libp2p
git checkout $GITHUB_SHA
- name: Commit this bump
run: |
cd nbc
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}

View File

@@ -1,11 +1,10 @@
name: Continuous Integration
name: CI
on:
push:
branches:
- master
- unstable
pull_request:
merge_group:
workflow_dispatch:
concurrency:
@@ -13,69 +12,62 @@ concurrency:
cancel-in-progress: true
jobs:
test:
timeout-minutes: 40
build:
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
platform:
target:
- os: linux
cpu: amd64
- os: linux
cpu: i386
- os: linux-gcc-14
- os: macos
cpu: amd64
- os: macos-14
cpu: arm64
- os: windows
cpu: amd64
nim:
- ref: version-2-0
memory_management: refc
- ref: version-2-2
memory_management: refc
#- os: windows
#cpu: i386
branch: [version-1-2, version-1-6]
include:
- platform:
- target:
os: linux
builder: ubuntu-22.04
builder: ubuntu-20.04
shell: bash
- platform:
os: linux-gcc-14
builder: ubuntu-24.04
- target:
os: macos
builder: macos-12
shell: bash
- platform:
os: macos-14
builder: macos-14
shell: bash
- platform:
- target:
os: windows
builder: windows-2022
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.platform.os }}
cpu: ${{ matrix.platform.cpu }}
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
shell: ${{ matrix.shell }}
nim_ref: ${{ matrix.nim.ref }}
nim_branch: ${{ matrix.branch }}
- name: Setup Go
uses: actions/setup-go@v5
uses: actions/setup-go@v2
with:
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
go-version: '~1.15.5'
- name: Install p2pd
run: |
@@ -86,29 +78,15 @@ jobs:
uses: actions/cache@v3
with:
path: nimbledeps
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Use gcc 14
if : ${{ matrix.platform.os == 'linux-gcc-14'}}
run: |
# Add GCC-14 to alternatives
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
# Set GCC-14 as the default
sudo update-alternatives --set gcc /usr/bin/gcc-14
- name: Run tests
run: |
nim --version
nimble --version
gcc --version
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test

View File

@@ -1,12 +1,12 @@
name: Coverage
name: nim-libp2p codecov builds
on:
# On push to common branches, this computes the coverage that PRs will use for diff
#On push to common branches, this computes the "bases stats" for PRs
push:
branches:
- master
- unstable
pull_request:
merge_group:
workflow_dispatch:
concurrency:
@@ -14,13 +14,12 @@ concurrency:
cancel-in-progress: true
jobs:
codecov:
name: Run coverage and upload to codecov
runs-on: ubuntu-22.04
Coverage:
runs-on: ubuntu-20.04
env:
CICOV: YES
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0
@@ -33,7 +32,7 @@ jobs:
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
@@ -43,28 +42,24 @@ jobs:
run: |
nimble install_pinned
- name: Setup coverage
- name: Run
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
- name: Run test suite with coverage flags
run: |
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
nimble testnative
nimble testpubsub
nimble testfilter
- name: Run coverage
run: |
find nimcache -name *.c -delete
lcov --capture --directory nimcache --output-file coverage/coverage.info
shopt -s globstar
ls `pwd`/libp2p/{*,**/*}.nim
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
genhtml coverage/coverage.f.info --output-directory coverage/output
- name: Upload coverage to codecov
run: |
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
#- uses: actions/upload-artifact@master
# with:
# name: coverage
# path: coverage

View File

@@ -1,42 +0,0 @@
name: Daily amd64
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_amd64_latest:
name: Daily test amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily test amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
notify-on-failure:
name: Notify Discord on Failure
needs: [test_amd64_latest, test_amd64_pinned]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -1,107 +0,0 @@
name: Daily Common
# Serves as base workflow for daily tasks, it's not run by itself.
on:
workflow_call:
inputs:
pinned_deps:
description: 'Should dependencies be installed from pinned file or use latest versions'
required: false
type: boolean
default: false
nim:
description: 'Nim Configuration'
required: true
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
cpu:
description: 'CPU'
required: true
type: string
exclude:
description: 'Exclude matrix configurations'
required: false
type: string
default: "[]"
jobs:
delete_cache:
name: Delete github action's branch cache
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: snnaplab/delete-branch-cache-action@v1
test:
needs: delete_cache
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
platform:
- os: linux
builder: ubuntu-22.04
shell: bash
- os: macos
builder: macos-13
shell: bash
- os: windows
builder: windows-2022
shell: msys2 {0}
nim: ${{ fromJSON(inputs.nim) }}
cpu: ${{ fromJSON(inputs.cpu) }}
exclude: ${{ fromJSON(inputs.exclude) }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
runs-on: ${{ matrix.platform.builder }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.16.0'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |
nimble install_pinned
- name: Install dependencies (latest)
if: ${{ inputs.pinned_deps == false }}
run: |
nimble install -y --depsOnly
- name: Run tests
run: |
nim --version
nimble --version
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
- name: Run integration tests
if: ${{ matrix.platform.os == 'linux' && matrix.cpu == 'amd64' }}
run: |
nim --version
nimble --version
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble testintegration

View File

@@ -1,50 +0,0 @@
name: Daily i386
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_i386_latest:
name: Daily i386 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
test_i386_pinned:
name: Daily i386 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"
notify-on-failure:
name: Notify Discord on Failure
needs: [test_i386_latest, test_i386_pinned]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -1,39 +0,0 @@
name: Daily Nimbus
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
compile_nimbus:
timeout-minutes: 80
name: 'Compile Nimbus (linux-amd64)'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Compile nimbus using nim-libp2p
run: |
git clone --branch unstable --single-branch https://github.com/status-im/nimbus-eth2.git
cd nimbus-eth2
git submodule set-branch --branch ${{ github.sha }} vendor/nim-libp2p
make -j"$(nproc)"
make -j"$(nproc)" nimbus_beacon_node
notify-on-failure:
name: Notify Discord on Failure
needs: compile_nimbus
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -1,65 +0,0 @@
name: Dependencies
on:
push:
branches:
- master
workflow_dispatch:
jobs:
bumper:
# Pushes new refs to interested external repositories, so they can do early testing against libp2p's newer versions
runs-on: ubuntu-latest
name: Bump libp2p's version for ${{ matrix.target.repository }}:${{ matrix.target.ref }}
strategy:
fail-fast: false
matrix:
target:
- repository: status-im/nimbus-eth2
ref: unstable
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
- repository: waku-org/nwaku
ref: master
secret: ACTIONS_GITHUB_TOKEN_NWAKU
- repository: codex-storage/nim-codex
ref: master
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
steps:
- name: Clone target repository
uses: actions/checkout@v4
with:
repository: ${{ matrix.target.repository }}
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ secrets[matrix.target.secret] }}
- name: Checkout this ref in target repository
run: |
cd nbc
git submodule update --init vendor/nim-libp2p
cd vendor/nim-libp2p
git checkout $GITHUB_SHA
- name: Push this ref to target repository
run: |
cd nbc
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
notify-on-failure:
name: Notify Discord on Failure
needs: [bumper]
if: failure()
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Discord notification
uses: ./.github/actions/discord_notify
with:
webhook_url: ${{ secrets.DISCORD_WEBHOOK_URL }}

View File

@@ -1,27 +1,25 @@
name: Documentation Generation And Publishing
name: Docgen
on:
push:
branches:
- master
workflow_dispatch:
jobs:
build:
timeout-minutes: 20
name: 'Generate & upload documentation'
runs-on: ubuntu-latest
runs-on: 'ubuntu-20.04'
continue-on-error: true
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: true
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: '2.2.x'
nim-version: 'stable'
- name: Generate doc
run: |
@@ -29,15 +27,15 @@ jobs:
nimble --version
nimble install_pinned
# nim doc can "fail", but the doc is still generated
nim doc --git.url:https://github.com/vacp2p/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
# check that the folder exists
ls ${GITHUB_REF##*/}
- name: Clone the gh-pages branch
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
repository: vacp2p/nim-libp2p
repository: status-im/nim-libp2p
ref: gh-pages
path: subdoc
submodules: true
@@ -47,6 +45,9 @@ jobs:
run: |
cd subdoc
# Delete merged branches doc's
for branch in $(git branch -vv | grep ': gone]' | awk '{print $1}'); do rm -rf $branch; done
# Update / create this branch doc
rm -rf ${GITHUB_REF##*/}
mv ../${GITHUB_REF##*/} .
@@ -62,11 +63,12 @@ jobs:
git push origin gh-pages
update_site:
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/docs'
name: 'Rebuild website'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
@@ -77,12 +79,12 @@ jobs:
nim-version: 'stable'
- name: Generate website
run: pip install mkdocs-material && nimble -y website
run: pip install mkdocs-material && nimble website
- name: Clone the gh-pages branch
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
repository: vacp2p/nim-libp2p
repository: status-im/nim-libp2p
ref: gh-pages
path: subdoc
fetch-depth: 0
@@ -91,21 +93,11 @@ jobs:
run: |
cd subdoc
# Ensure the latest changes are fetched and reset to the remote branch
git fetch origin gh-pages
git reset --hard origin/gh-pages
rm -rf docs
mv ../site docs
git add .
if git diff-index --quiet HEAD --; then
echo "No changes to commit"
else
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name "${{ github.actor }}"
git commit -m "update website"
git push origin gh-pages
fi
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit -a -m "update website"
git push origin gh-pages

View File

@@ -1,60 +0,0 @@
name: Examples
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
examples:
timeout-minutes: 30
strategy:
fail-fast: false
defaults:
run:
shell: bash
name: "Build Examples"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
shell: bash
os: linux
cpu: amd64
nim_ref: version-2-2
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Build and run examples
run: |
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble examples

View File

@@ -1,62 +0,0 @@
name: Interoperability Tests
on:
pull_request:
merge_group:
push:
branches:
- master
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
run-transport-interop:
name: Run transport interoperability tests
runs-on: ubuntu-22.04
steps:
- name: Free Disk Space
# For some reason we have space issues while running this action. Likely while building the image.
# This action will free up some space to avoid the issue.
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: true
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/interop/transport/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
# nim-libp2p#1367: hole punching tests are temporary disabled as they keep failing
# and issue does not seem to be on nim-libp2p side
# run-hole-punching-interop:
# name: Run hole-punching interoperability tests
# runs-on: ubuntu-22.04
# steps:
# - uses: actions/checkout@v4
# - uses: docker/setup-buildx-action@v3
# - name: Build image
# run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
# - name: Run tests
# uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
# with:
# test-filter: nim-libp2p-head
# extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
# s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}

View File

@@ -1,27 +0,0 @@
name: Linters
on:
pull_request:
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
nph:
name: NPH
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests interop tools *.nim*"
fail: true
suggest: true

82
.github/workflows/multi_nim.yml vendored Normal file
View File

@@ -0,0 +1,82 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
- os: linux
cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-2, version-1-6, devel]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '~1.15.5'
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi

View File

@@ -1,94 +0,0 @@
name: Performance
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
performance:
timeout-minutes: 20
strategy:
fail-fast: false
defaults:
run:
shell: bash
env:
VACP2P: "vacp2p"
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
PR_NUMBER: ${{ github.event.number }}
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
MARKER: "<!-- perf-summary-marker -->"
COMMENT_SUMMARY_PATH: "/tmp/perf-summary.md"
SHARED_VOLUME_PATH: "performance/output"
DOCKER_STATS_PREFIX: "docker_stats_"
PUBLISH_BRANCH_NAME: "performance_plots"
CHECKOUT_SUBFOLDER_SUBPLOTS: "subplots"
PUBLISH_DIR_PLOTS: "plots"
CHECKOUT_SUBFOLDER_HISTORY: "history"
PUBLISH_DIR_LATENCY_HISTORY: "latency_history"
LATENCY_HISTORY_PATH: "history/latency_history"
LATENCY_HISTORY_PREFIX: "pr"
LATENCY_HISTORY_PLOT_FILENAME: "latency_history_all_scenarios.png"
name: "Performance"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image with cache
uses: docker/build-push-action@v6
with:
context: .
file: performance/Dockerfile
tags: test-node:latest
load: true
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Run
run: |
./performance/runner.sh
- name: Process latency and docker stats
uses: ./.github/actions/process_stats
- name: Publish history
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/publish_history
- name: Generate plots
if: github.repository_owner == env.VACP2P
uses: ./.github/actions/generate_plots
- name: Post/Update PR comment
if: github.event_name == 'pull_request'
uses: ./.github/actions/add_comment
- name: Upload performance artifacts
if: success() || failure()
uses: actions/upload-artifact@v4
with:
name: performance-artifacts
path: |
performance/output/pr*_latency.csv
performance/output/*.png
history/latency_history/*.png
if-no-files-found: ignore
retention-days: 7

View File

@@ -1,35 +0,0 @@
name: "Conventional Commits"
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: amannn/action-semantic-pull-request@v5
id: lint_pr_title
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: marocchino/sticky-pull-request-comment@v2
# When the previous steps fails, the workflow would stop. By adding this
# condition you can continue the execution with the populated error message.
if: always() && (steps.lint_pr_title.outputs.error_message != null)
with:
header: pr-title-lint-error
message: |
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
# Delete a previous comment when the issue has been resolved
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
uses: marocchino/sticky-pull-request-comment@v2
with:
header: pr-title-lint-error
delete: true

9
.gitignore vendored
View File

@@ -16,12 +16,3 @@ tests/pubsub/testgossipsub
examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests).
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
# Second and third rules are here to un-ignores all files with extension and Docker file,
# because it appears that vs code is skipping text search is some tests files without these rules.
tests/**/test*[^.]*
!tests/**/*.*
!tests/**/Dockerfile

37
.pinned
View File

@@ -1,21 +1,16 @@
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#61759a5e8df8f4d68bcd1b4b8c1adab3e72bbd8d
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/vacp2p/nim-quic@#9370190ded18d78a5a9990f57aa8cbbf947f3891
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
bearssl;https://github.com/status-im/nim-bearssl@#a647994910904b0103a05db3a5ec1ecfc4d91a88
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#75d030ff71264513fb9701c75a326cd36fcb4692
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#b42daf41d8eb4fbce40add6836bed838f8d85b6f
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
serialization;https://github.com/status-im/nim-serialization@#d77417cba6896c26287a68e6a95762e45a1b87e5
stew;https://github.com/status-im/nim-stew@#7184d2424dc3945657884646a72715d494917aad
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#691f069b209d372b1240d5ae1f57fb7bbafeaba7
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8

192
README.md
View File

@@ -5,8 +5,8 @@
<h3 align="center">The <a href="https://nim-lang.org/">Nim</a> implementation of the <a href="https://libp2p.io/">libp2p</a> Networking Stack.</h3>
<p align="center">
<a href="https://github.com/vacp2p/nim-libp2p/actions"><img src="https://github.com/vacp2p/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://codecov.io/gh/vacp2p/nim-libp2p"><img src="https://codecov.io/gh/vacp2p/nim-libp2p/branch/master/graph/badge.svg?token=UR5JRQ249W"/></a>
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://codecov.io/gh/status-im/nim-libp2p"><img src="https://codecov.io/gh/status-im/nim-libp2p/branch/master/graph/badge.svg?token=UR5JRQ249W"/></a>
</p>
@@ -20,78 +20,111 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [License](#license)
## Background
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
> The currently supported Nim versions are 2.0 and 2.2.
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
```
nimble install libp2p
```
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
```bash
nim c -r --threads:on examples/directchat.nim
```
**Go Daemon:**
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
## Modules
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
```
List of packages modules implemented in nim-libp2p:
You can now chat between the instances!
| Name | Description |
| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
| **Libp2p** | |
| [libp2p](libp2p/switch.nim) | The core of the project |
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| **Secure Channels** | |
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
| **Stream Multiplexers** | |
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
| **Data Types** | |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
| **Utilities** | |
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
| **Pubsub** | |
| [libp2p-pubsub](libp2p/protocols/pubsub/pubsub.nim) | Pub-Sub generic interface |
| [libp2p-floodsub](libp2p/protocols/pubsub/floodsub.nim) | FloodSub implementation |
| [libp2p-gossipsub](libp2p/protocols/pubsub/gossipsub.nim) | [GossipSub](https://docs.libp2p.io/concepts/publish-subscribe/) implementation |
![Chat example](https://imgur.com/caYRu8K.gif)
## Users
nim-libp2p is used by:
- [Nimbus](https://github.com/status-im/nimbus-eth2), an Ethereum client
- [nwaku](https://github.com/status-im/nwaku), a decentralized messaging application
- [nim-codex](https://github.com/status-im/nim-codex), a decentralized storage application
- (open a pull request if you want to be included here)
## Stability
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
Some modules are more recent and less stable.
The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
## Development
Clone the repository and install the dependencies:
Clone and Install dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
You can use `nix develop` to start a shell with Nim and Nimble.
nimble 0.20.1 is required for running `testnative`. At time of writing, this is not available in nixpkgs: If using `nix develop`, follow up with `nimble install nimble`, and use that (typically `~/.nimble/bin/nimble`).
### Testing
Run unit tests:
```sh
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
nimble tasks
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
@@ -99,36 +132,25 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
- **Join the Conversation**. Connect with other contributors in our [community channel](https://discord.com/channels/1204447718093750272/1351621032263417946). Ask questions, share ideas, get support, and stay informed about the latest updates from the maintainers.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
<a href="https://github.com/status-im/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=status-im/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable autotls support
```bash
nim c -d:libp2p_autotls_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
@@ -144,64 +166,6 @@ Specify gossipsub specific topics to measure in the metrics:
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## Modules
List of packages modules implemented in nim-libp2p:
| Name | Description |
| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
| **Libp2p** | |
| [libp2p](libp2p/switch.nim) | The core of the project |
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
| **Secure Channels** | |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
| **Stream Multiplexers** | |
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
| **Data Types** | |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
| **Utilities** | |
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
| **Pubsub** | |
| [libp2p-pubsub](libp2p/protocols/pubsub/pubsub.nim) | Pub-Sub generic interface |
| [libp2p-floodsub](libp2p/protocols/pubsub/floodsub.nim) | FloodSub implementation |
| [libp2p-gossipsub](libp2p/protocols/pubsub/gossipsub.nim) | [GossipSub](https://docs.libp2p.io/concepts/publish-subscribe/) implementation |
## Users
nim-libp2p is used by:
- [Nimbus](https://github.com/status-im/nimbus-eth2), an Ethereum client
- [nwaku](https://github.com/waku-org/nwaku), a decentralized messaging application
- [nim-codex](https://github.com/codex-storage/nim-codex), a decentralized storage application
- (open a pull request if you want to be included here)
## Stability
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
Some modules are more recent and less stable.
The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `2.0 & 2.2`
## License
Licensed and distributed under either of

View File

@@ -4,24 +4,20 @@ if dirExists("nimbledeps/pkgs"):
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--styleCheck:
usages
switch("warningAsError", "UseBase:on")
--styleCheck:
error
--mm:
refc
# reconsider when there's a version-2-2 branch worth testing with as we might switch to orc
--define:chronosStrictException
--styleCheck:usages
if (NimMajor, NimMinor) < (1, 6):
--styleCheck:hint
else:
--styleCheck:error
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
if defined(windows) and not defined(vcc):
--define:
nimRawSetjmp
--define:nimRawSetjmp
# begin Nimble config (version 1)
when fileExists("nimble.paths"):

View File

@@ -1,5 +1,5 @@
# nim-libp2p examples
In this folder, you'll find the sources of the [nim-libp2p website](https://vacp2p.github.io/nim-libp2p/docs/)
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
We recommand to follow the tutorials on the website, but feel free to grok the sources here!

View File

@@ -1,48 +1,40 @@
{.used.}
## # Circuit Relay example
##
## Circuit Relay can be used when a node cannot reach another node
## directly, but can reach it through another node (the Relay).
## directly, but can reach it through a another node (the Relay).
##
## That may happen because of NAT, Firewalls, or incompatible transports.
##
## More informations [here](https://docs.libp2p.io/concepts/circuit-relay/).
import chronos, stew/byteutils
import libp2p, libp2p/protocols/connectivity/relay/[relay, client]
import libp2p,
libp2p/protocols/connectivity/relay/[relay, client]
# Helper to create a circuit relay node
proc createCircuitRelaySwitch(r: Relay): Switch =
SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(r)
.build()
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(r)
.build()
proc main() {.async.} =
# Create a custom protocol
let customProtoCodec = "/test"
var proto = new LPProtocol
proto.codec = customProtoCodec
proto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
proto.handler = proc(conn: Connection, proto: string) {.async.} =
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
let
relay = Relay.new()
@@ -64,11 +56,8 @@ proc main() {.async.} =
let
# Create a relay address to swDst using swRel as the relay
addrs = MultiAddress
.init(
$swRel.peerInfo.addrs[0] & "/p2p/" & $swRel.peerInfo.peerId & "/p2p-circuit"
)
.get()
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
$swRel.peerInfo.peerId & "/p2p-circuit").get()
# Connect Dst to the relay
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
@@ -77,7 +66,7 @@ proc main() {.async.} =
let rsvp = await clDst.reserve(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
# Src dial Dst using the relay
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[addrs], customProtoCodec)
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
var msg = string.fromBytes(await conn.readLp(1024))

View File

@@ -1,13 +1,15 @@
{.used.}
when not (compileOption("threads")):
when not(compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
import strformat, strutils, stew/byteutils, chronos, libp2p
import
strformat, strutils,
stew/byteutils,
chronos,
libp2p
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
const Help =
"""
const Help = """
Commands: /[?|help|connect|disconnect|exit]
help: Prints this help
connect: dials a remote peer
@@ -15,11 +17,12 @@ const Help =
exit: closes the chat
"""
type Chat = ref object
switch: Switch # a single entry point for dialing and listening to peer
stdinReader: StreamTransport # transport streams between read & write file descriptor
conn: Connection # connection to the other peer
connected: bool # if the node is connected to another peer
type
Chat = ref object
switch: Switch # a single entry point for dialing and listening to peer
stdinReader: StreamTransport # transport streams between read & write file descriptor
conn: Connection # connection to the other peer
connected: bool # if the node is connected to another peer
##
# Stdout helpers, to write the prompt
@@ -38,23 +41,19 @@ proc writeStdout(c: Chat, str: string) =
##
const ChatCodec = "/nim-libp2p/chat/1.0.0"
type ChatProto = ref object of LPProtocol
type
ChatProto = ref object of LPProtocol
proc new(T: typedesc[ChatProto], c: Chat): T =
let chatproto = T()
# create handler for incoming connection
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
else:
await c.handlePeer(stream)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
proc handle(stream: Connection, proto: string) {.async.} =
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
await stream.close()
else:
await c.handlePeer(stream)
await stream.close()
# assign the new handler
@@ -78,9 +77,9 @@ proc handlePeer(c: Chat, conn: Connection) {.async.} =
strData = await conn.readLp(1024)
str = string.fromBytes(strData)
c.writeStdout $conn.peerId & ": " & $str
except LPStreamEOFError:
defer:
c.writeStdout $conn.peerId & " disconnected"
defer: c.writeStdout $conn.peerId & " disconnected"
await c.conn.close()
c.connected = false
@@ -89,7 +88,10 @@ proc dialPeer(c: Chat, address: string) {.async.} =
let
multiAddr = MultiAddress.init(address).tryGet()
# split the peerId part /p2p/...
peerIdBytes = multiAddr[multiCodec("p2p")].tryGet().protoAddress().tryGet()
peerIdBytes = multiAddr[multiCodec("p2p")]
.tryGet()
.protoAddress()
.tryGet()
remotePeer = PeerId.init(peerIdBytes).tryGet()
# split the wire address
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
@@ -122,6 +124,7 @@ proc readLoop(c: Chat) {.async.} =
let address = await c.stdinReader.readLine()
if address.len > 0:
await c.dialPeer(address)
elif line.startsWith("/exit"):
if c.connected and c.conn.closed.not:
await c.conn.close()
@@ -168,18 +171,16 @@ proc main() {.async.} =
var switch = SwitchBuilder
.new()
.withRng(rng)
# Give the application RNG
.withRng(rng) # Give the application RNG
.withAddress(localAddress)
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.build()
let chat = Chat(switch: switch, stdinReader: stdinReader)
let chat = Chat(
switch: switch,
stdinReader: stdinReader)
switch.mount(ChatProto.new(chat))

View File

@@ -1,3 +0,0 @@
{.used.}
import directchat, tutorial_6_game

View File

@@ -1,5 +0,0 @@
{.used.}
import
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery

View File

@@ -2,7 +2,8 @@ import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
import ../hexdump
const PubSubTopic = "test-net"
const
PubSubTopic = "test-net"
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
var peers = await api.pubsubListPeers(PubSubTopic)
@@ -36,12 +37,12 @@ proc main() {.async.} =
asyncSpawn monitor(api)
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
proc pubsubLogger(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty()
echo "= Recieved pubsub message with length ", msglen,
" bytes from peer ", message.peer.pretty()
echo dumpHex(message.data)
await api.dumpSubscribedPeers()
result = true

View File

@@ -2,16 +2,18 @@ import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
## nim c -r --threads:on chat.nim
when not (compileOption("threads")):
when not(compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
const ServerProtocols = @["/test-chat-stream"]
const
ServerProtocols = @["/test-chat-stream"]
type CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
type
CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
proc threadMain(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
@@ -80,7 +82,7 @@ proc serveThread(udata: CustomData) {.async.} =
relay = true
break
if relay:
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
else:
echo peer.pretty(), " [", addresses.join(", "), "]"
elif line.startsWith("/exit"):
@@ -93,8 +95,8 @@ proc serveThread(udata: CustomData) {.async.} =
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except CatchableError as err:
echo err.msg
except:
echo getCurrentException().msg
proc main() {.async.} =
var data = new CustomData

View File

@@ -1,43 +1,56 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Examples](#examples)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Run the build script while having the `go` command pointing to the correct Go version.
```sh
./scripts/build_p2pd.sh
```
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
# clone and install dependencies
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
nimble install
# perform unit tests
nimble test
# update the git submodule to install the go daemon
git submodule update --init --recursive
go version
git clone https://github.com/libp2p/go-libp2p-daemon
cd go-libp2p-daemon
git checkout v0.0.1
go install ./...
cd ..
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
# Usage
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Examples
## Example
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
## Getting Started
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)

View File

@@ -1,25 +1,26 @@
import chronos, nimcrypto, strutils, os
import ../../libp2p/daemon/daemonapi
const PubSubTopic = "test-net"
const
PubSubTopic = "test-net"
proc main(bn: string) {.async.} =
echo "= Starting P2P node"
var bootnodes = bn.split(",")
var api = await newDaemonApi(
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
)
var api = await newDaemonApi({DHTFull, PSGossipSub, WaitBootstrap},
bootstrapNodes = bootnodes,
peersRequired = 1)
var id = await api.identity()
echo "= P2P node ", id.peer.pretty(), " started:"
for item in id.addresses:
echo item
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
proc pubsubLogger(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty(), ": "
echo "= Recieved pubsub message with length ", msglen,
" bytes from peer ", message.peer.pretty(), ": "
var strdata = cast[string](message.data)
echo strdata
result = true

View File

@@ -1,7 +1,5 @@
{.used.}
import chronos # an efficient library for async
import stew/byteutils # various utils
import chronos # an efficient library for async
import stew/byteutils # various utils
import libp2p
##
@@ -9,23 +7,20 @@ import libp2p
##
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
type TestProto = ref object of LPProtocol # declare a custom protocol
type
TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
##
# Helper to create a switch/node
@@ -33,16 +28,11 @@ proc new(T: typedesc[TestProto]): T =
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng)
# Give the application RNG
.withAddress(ma)
# Our local address(es)
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.build()
result = switch
@@ -50,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async.} =
proc main() {.async, gcsafe.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port
@@ -83,8 +73,7 @@ proc main() {.async.} =
# use the second node to dial the first node
# using the first node peerid and address
# and specify our custom protocol codec
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
# conn is now a fully setup connection, we talk directly to the node1 custom protocol handler
await conn.writeLp("Hello p2p!") # writeLp send a length prefixed buffer over the wire
@@ -95,7 +84,6 @@ proc main() {.async.} =
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())

View File

@@ -35,24 +35,26 @@ proc dumpHex*(pbytes: pointer, nbytes: int, items = 1, ascii = true): string =
var asciiText = ""
while i < nbytes:
if i %% 16 == 0:
result = result & toHex(cast[BiggestInt](slider), sizeof(BiggestInt) * 2) & ": "
result = result & toHex(cast[BiggestInt](slider),
sizeof(BiggestInt) * 2) & ": "
var k = 0
while k < items:
var ch = cast[ptr char](cast[uint](slider) + k.uint)[]
if ord(ch) > 31 and ord(ch) < 127:
asciiText &= ch
else:
asciiText &= "."
if ord(ch) > 31 and ord(ch) < 127: asciiText &= ch else: asciiText &= "."
inc(k)
case items
case items:
of 1:
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]), hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]),
hexSize)
of 2:
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]), hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]),
hexSize)
of 4:
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]), hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]),
hexSize)
of 8:
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]), hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]),
hexSize)
else:
raise newException(ValueError, "Wrong items size!")
result = result & " "

View File

@@ -3,4 +3,4 @@
Welcome to the nim-libp2p documentation!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://vacp2p.github.io/nim-libp2p/master/libp2p.html).
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).

View File

@@ -1,4 +1,3 @@
{.used.}
## # Simple ping tutorial
##
## Hi all, welcome to the first nim-libp2p tutorial!
@@ -35,20 +34,15 @@ import libp2p/protocols/ping
## [chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
##
## Next, we'll create a helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
## Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng)
# Give the application RNG
.withAddress(ma)
# Our local address(es)
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.build()
return switch
@@ -59,11 +53,11 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async.} =
proc main() {.async, gcsafe.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
pingProtocol = Ping.new(rng = rng)
pingProtocol = Ping.new(rng=rng)
## We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
## The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
##
@@ -83,9 +77,8 @@ proc main() {.async.} =
## Now that we've started the nodes, they are listening for incoming peers.
## We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
##
## We'll **dial** the first switch from the second one, by specifying its **Peer ID**, its **MultiAddress** and the **`Ping` protocol codec**:
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
## We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
## We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
# ping the other node and echo the ping duration
echo "ping: ", await pingProtocol.ping(conn)
@@ -93,8 +86,7 @@ proc main() {.async.} =
# We must close the connection ourselves when we're done with it
await conn.close()
## And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())

View File

@@ -1,4 +1,3 @@
{.used.}
## # Custom protocol in libp2p
##
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
@@ -19,24 +18,19 @@ type TestProto = ref object of LPProtocol
## We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
##
## A protocol generally has two parts: a handling/server part, and a dialing/client part.
## These two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
## A protocol generally has two part: and handling/server part, and a dialing/client part.
## Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
##
## Let's start with the server part:
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)
@@ -47,31 +41,29 @@ proc new(T: typedesc[TestProto]): T =
proc hello(p: TestProto, conn: Connection) {.async.} =
await conn.writeLp("Hello p2p!")
## Again, pretty straightforward, we just send a message on the connection.
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async.} =
proc main() {.async, gcsafe.} =
let
rng = newRng()
testProto = TestProto.new()
switch1 = newStandardSwitch(rng = rng)
switch2 = newStandardSwitch(rng = rng)
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1.mount(testProto)
await switch1.start()
await switch2.start()
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
await testProto.hello(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
## This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
##

View File

@@ -1,4 +1,3 @@
{.used.}
## # Protobuf usage
##
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.
@@ -58,8 +57,8 @@ proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
#
# We are just checking the error, and ignoring whether the value
# is present or not (default values are valid).
discard ?pb.getField(1, res.name)
discard ?pb.getField(2, res.value)
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
@@ -73,10 +72,10 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ?pb.getRepeatedField(1, metrics)
discard ? pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ?Metric.decode(metric)
res.metrics &= ? Metric.decode(metric)
ok(res)
## ## Results instead of exceptions
@@ -103,24 +102,18 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
## ## Creating the protocol
## We'll next create a protocol, like in the last tutorial, to request these metrics from our host
type
MetricCallback = proc(): Future[MetricList] {.raises: [], gcsafe.}
MetricCallback = proc: Future[MetricList] {.raises: [], gcsafe.}
MetricProto = ref object of LPProtocol
metricGetter: MetricCallback
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
await conn.close()
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb
@@ -133,21 +126,21 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async.} =
proc main() {.async, gcsafe.} =
let rng = newRng()
proc randomMetricGenerator(): Future[MetricList] {.async.} =
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16
for i in 0 ..< metricCount + 1:
result.metrics.add(
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
)
result.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
return result
let
metricProto1 = MetricProto.new(randomMetricGenerator)
metricProto2 = MetricProto.new(randomMetricGenerator)
switch1 = newStandardSwitch(rng = rng)
switch2 = newStandardSwitch(rng = rng)
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1.mount(metricProto1)
@@ -155,17 +148,14 @@ proc main() {.async.} =
await switch2.start()
let
conn = await switch2.dial(
switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs
)
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs)
metrics = await metricProto2.fetch(conn)
await conn.close()
for metric in metrics.metrics:
echo metric.name, " = ", metric.value
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())

View File

@@ -1,4 +1,3 @@
{.used.}
## # GossipSub
##
## In this tutorial, we'll build a simple GossipSub network
@@ -8,7 +7,7 @@
## and allows to balance between latency, bandwidth usage,
## privacy and attack resistance.
##
## You'll find a good explanation of how GossipSub works
## You'll find a good explanation on how GossipSub works
## [here.](https://docs.libp2p.io/concepts/publish-subscribe/) There are a lot
## of parameters you can tweak to adjust how GossipSub behaves but here we'll
## use the sane defaults shipped with libp2p.
@@ -41,8 +40,8 @@ proc encode(m: Metric): ProtoBuffer =
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
var res: Metric
let pb = initProtoBuffer(buf)
discard ?pb.getField(1, res.name)
discard ?pb.getField(2, res.value)
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
@@ -57,11 +56,11 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ?pb.getRepeatedField(1, metrics)
discard ? pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ?Metric.decode(metric)
?pb.getRequiredField(2, res.hostname)
res.metrics &= ? Metric.decode(metric)
? pb.getRequiredField(2, res.hostname)
ok(res)
## This is exactly like the previous structure, except that we added
@@ -74,13 +73,11 @@ type Node = tuple[switch: Switch, gossip: GossipSub, hostname: string]
proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
# This procedure will handle one of the node of the network
node.gossip.addValidator(
["metrics"],
node.gossip.addValidator(["metrics"],
proc(topic: string, message: Message): Future[ValidationResult] {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr:
return ValidationResult.Reject
return ValidationResult.Accept,
if decoded.isErr: return ValidationResult.Reject
return ValidationResult.Accept
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
@@ -90,25 +87,23 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
# `John` will be responsible to log the metrics, the rest of the nodes
# will just forward them in the network
if node.hostname == "John":
node.gossip.subscribe(
"metrics",
proc(topic: string, data: seq[byte]) {.async.} =
let m = MetricList.decode(data).expect("metric can be decoded")
echo m
,
node.gossip.subscribe("metrics",
proc (topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
)
else:
node.gossip.subscribe("metrics", nil)
# Create random metrics 10 times and broadcast them
for _ in 0 ..< 10:
for _ in 0..<10:
await sleepAsync(500.milliseconds)
var metricList = MetricList(hostname: node.hostname)
let metricCount = rng[].generate(uint32) mod 4
for i in 0 ..< metricCount + 1:
metricList.metrics.add(
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
)
metricList.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
discard await node.gossip.publish("metrics", encode(metricList).buffer)
await node.switch.stop()
@@ -116,13 +111,13 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
## For our main procedure, we'll create a few nodes, and connect them together.
## Note that they are not all interconnected, but GossipSub will take care of
## broadcasting to the full network nonetheless.
proc main() {.async.} =
proc main {.async.} =
let rng = newRng()
var nodes: seq[Node]
for hostname in ["John", "Walter", "David", "Thuy", "Amy"]:
let
switch = newStandardSwitch(rng = rng)
switch = newStandardSwitch(rng=rng)
gossip = GossipSub.init(switch = switch, triggerSelf = true)
switch.mount(gossip)
await switch.start()
@@ -132,12 +127,11 @@ proc main() {.async.} =
for index, node in nodes:
# Connect to a few neighbors
for otherNodeIdx in index - 1 .. index + 2:
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index:
continue
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index: continue
let otherNode = nodes[otherNodeIdx]
await node.switch.connect(
otherNode.switch.peerInfo.peerId, otherNode.switch.peerInfo.addrs
)
otherNode.switch.peerInfo.peerId,
otherNode.switch.peerInfo.addrs)
var allFuts: seq[Future[void]]
for node in nodes:
@@ -159,8 +153,8 @@ waitFor(main())
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)

View File

@@ -1,9 +1,8 @@
{.used.}
## # Discovery Manager
##
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and
## spread informations (some metrics) on the network using gossipsub.
## For this tutorial, on the other hand, we'll go back to a simple example
## For this tutorial, on the other hand, we'll go back on a simple example
## we'll try to discover a specific peers to greet on the network.
##
## First, as usual, we import the dependencies:
@@ -21,30 +20,22 @@ import libp2p/discovery/discoverymngr
##
## Note that other discovery methods such as [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) or [discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) exist.
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
# Create a really simple protocol to log one message received then close the stream
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
## ## Bootnodes
@@ -58,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async.} =
proc main() {.async, gcsafe.} =
let bootNode = createSwitch()
await bootNode.start()
@@ -67,7 +58,7 @@ proc main() {.async.} =
switches: seq[Switch] = @[]
discManagers: seq[DiscoveryManager] = @[]
for i in 0 .. 5:
for i in 0..5:
let rdv = RendezVous.new()
# Create a remote future to await at the end of the program
let switch = createSwitch(rdv)
@@ -102,7 +93,7 @@ proc main() {.async.} =
# Use the discovery manager to find peers on the OddClub topic to greet them
let queryOddClub = dm.request(RdvNamespace("OddClub"))
for _ in 0 .. 2:
for _ in 0..2:
let
# getPeer give you a PeerAttribute containing informations about the peer.
res = await queryOddClub.getPeer()
@@ -118,7 +109,7 @@ proc main() {.async.} =
# Maybe it was because he wanted to join the EvenGang
let queryEvenGang = dm.request(RdvNamespace("EvenGang"))
for _ in 0 .. 2:
for _ in 0..2:
let
res = await queryEvenGang.getPeer()
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)

View File

@@ -1,4 +1,3 @@
{.used.}
## # Tron example
##
## In this tutorial, we will create a video game based on libp2p, using
@@ -52,7 +51,7 @@ proc new(_: type[Game]): Game =
tickTime: -3.0, # 3 seconds of "warm-up" time
localPlayer: Player(x: 4, y: 16, currentDir: 3, nextDir: 3, color: 8),
remotePlayer: Player(x: 27, y: 16, currentDir: 1, nextDir: 1, color: 12),
peerFound: newFuture[Connection](),
peerFound: newFuture[Connection]()
)
for pos in 0 .. result.gameMap.high:
if pos mod mapSize in [0, mapSize - 1] or pos div mapSize in [0, mapSize - 1]:
@@ -82,8 +81,7 @@ proc update(g: Game, dt: float32) =
# This is a hacky way to make this happen
waitFor(sleepAsync(1.milliseconds))
# Don't do anything if we are still waiting for an opponent
if not (g.peerFound.finished()) or isNil(g.tickFinished):
return
if not(g.peerFound.finished()) or isNil(g.tickFinished): return
g.tickTime += dt
# Update the wanted direction, making sure we can't go backward
@@ -100,8 +98,7 @@ proc tick(g: Game, p: Player) =
# Move player and check if he lost
p.x += directions[p.currentDir][1]
p.y += directions[p.currentDir][2]
if g.gameMap[p.y * mapSize + p.x] != 0:
p.lost = true
if g.gameMap[p.y * mapSize + p.x] != 0: p.lost = true
g.gameMap[p.y * mapSize + p.x] = p.color
proc mainLoop(g: Game, peer: Connection) {.async.} =
@@ -126,23 +123,16 @@ proc draw(g: Game) =
for pos, color in g.gameMap:
setColor(color)
boxFill(pos mod 32 * 4, pos div 32 * 4, 4, 4)
let text =
if not (g.peerFound.finished()):
"Matchmaking.."
elif g.tickTime < -1.5:
"Welcome to Etron"
elif g.tickTime < 0.0:
"- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
elif g.remotePlayer.lost and g.localPlayer.lost:
"DEUCE"
elif g.localPlayer.lost:
"YOU LOOSE"
elif g.remotePlayer.lost:
"YOU WON"
else:
""
let text = if not(g.peerFound.finished()): "Matchmaking.."
elif g.tickTime < -1.5: "Welcome to Etron"
elif g.tickTime < 0.0: "- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
elif g.remotePlayer.lost and g.localPlayer.lost: "DEUCE"
elif g.localPlayer.lost: "YOU LOOSE"
elif g.remotePlayer.lost: "YOU WON"
else: ""
printc(text, screenWidth div 2, screenHeight div 2)
## ## Matchmaking
## To find an opponent, we will broadcast our address on a
## GossipSub topic, and wait for someone to connect to us.
@@ -153,27 +143,20 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
defer:
await conn.closeWithEof()
try:
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
proc networking(g: Game) {.async.} =
@@ -181,10 +164,9 @@ proc networking(g: Game) {.async.} =
# the Discovery examples combined
let
rdv = RendezVous.new()
switch = SwitchBuilder
.new()
switch = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withYamux()
.withNoise()
@@ -192,7 +174,9 @@ proc networking(g: Game) {.async.} =
.build()
dm = DiscoveryManager()
gameProto = GameProto.new(g)
gossip = GossipSub.init(switch = switch, triggerSelf = false)
gossip = GossipSub.init(
switch = switch,
triggerSelf = false)
dm.add(RendezVousInterface.new(rdv))
switch.mount(gossip)
@@ -200,11 +184,10 @@ proc networking(g: Game) {.async.} =
gossip.subscribe(
"/tron/matchmaking",
proc(topic: string, data: seq[byte]) {.async.} =
proc (topic: string, data: seq[byte]) {.async.} =
# If we are still looking for an opponent,
# try to match anyone broadcasting its address
if g.peerFound.finished or g.hasCandidate:
return
# try to match anyone broadcasting it's address
if g.peerFound.finished or g.hasCandidate: return
g.hasCandidate = true
try:
@@ -220,12 +203,11 @@ proc networking(g: Game) {.async.} =
# We are "player 2"
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard,
discard
)
await switch.start()
defer:
await switch.stop()
defer: await switch.stop()
# As explained in the last tutorial, we need a bootnode to be able
# to find peers. We could use any libp2p running rendezvous (or any
@@ -261,8 +243,7 @@ proc networking(g: Game) {.async.} =
# We now wait for someone to connect to us (or for us to connect to someone)
let peerConn = await g.peerFound
defer:
await peerConn.closeWithEof()
defer: await peerConn.closeWithEof()
await g.mainLoop(peerConn)
@@ -271,14 +252,7 @@ let
netFut = networking(game)
nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(
proc() =
discard,
proc(dt: float32) =
game.update(dt),
proc() =
game.draw(),
)
nico.run(proc = discard, proc(dt: float32) = game.update(dt), proc = game.draw())
waitFor(netFut.cancelAndWait())
## And that's it! If you want to run this code locally, the simplest way is to use the

27
flake.lock generated
View File

@@ -1,27 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1752620740,
"narHash": "sha256-f3pO+9lg66mV7IMmmIqG4PL3223TYMlnlw+pnpelbss=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "32a4e87942101f1c9f9865e04dc3ddb175f5f32e",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,34 +0,0 @@
{
description = "nim-libp2p dev shell flake";
nixConfig = {
extra-substituters = [ "https://nix-cache.status.im/" ];
extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
};
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
};
outputs = { self, nixpkgs }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux" "armv7a-linux"
"x86_64-darwin" "aarch64-darwin"
"x86_64-windows"
];
forEach = nixpkgs.lib.genAttrs;
forAllSystems = forEach stableSystems;
pkgsFor = forEach stableSystems (
system: import nixpkgs { inherit system; }
);
in rec {
devShells = forAllSystems (system: {
default = pkgsFor.${system}.mkShell {
nativeBuildInputs = with pkgsFor.${system}; [
nim-2_2 nimble openssl.dev
];
};
});
};
}

View File

@@ -1,5 +0,0 @@
{
"opRetro": {
"projectId": "0xc9561ba3e4eca5483b40f8b1a254a73c91fefe4f8aee32dc20c0d96dcf33fe80"
}
}

View File

@@ -1,19 +0,0 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:latest as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs2 --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -1,138 +0,0 @@
import std/[os, options, strformat, sequtils]
import redis
import chronos, chronicles
import
../../libp2p/[
builders,
switch,
multicodec,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../../tests/[stubs/autonatclientstub, errorhelpers]
logScope:
topics = "hp interop node"
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder
.new()
.withRng(rng)
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng = rng))
return s
proc main() {.async.} =
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
debug "All relay addresses", relayAddr
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
try:
debug "Dialing relay...", relayMA
let relayId = await switch.connect(relayMA).wait(30.seconds)
debug "Connected to relay", relayId
except AsyncTimeoutError as e:
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
)
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(4.minutes))
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -1,7 +0,0 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp"
]
}

View File

@@ -1,18 +0,0 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:latest as builder
WORKDIR /app
COPY .pinned libp2p.nimble nim-libp2p/
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs2 -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -1,113 +0,0 @@
import std/[os, strutils, sequtils], chronos, redis, serialization, json_serialization
import ../../libp2p/[builders, protocols/ping, transports/wstransport]
type ResultJson = object
handshakePlusOneRTTMillis: float
pingRTTMilllis: float
let testTimeout =
try:
seconds(parseInt(getEnv("test_timeout_seconds")))
except CatchableError:
3.minutes
proc main() {.async.} =
let
transport = getEnv("transport")
muxer = getEnv("muxer")
secureChannel = getEnv("security")
isDialer = getEnv("is_dialer") == "true"
envIp = getEnv("ip", "0.0.0.0")
ip =
# nim-libp2p doesn't do snazzy ip expansion
if envIp == "0.0.0.0":
block:
let addresses =
getInterfaces().filterIt(it.name == "eth0").mapIt(it.addresses)
if addresses.len < 1 or addresses[0].len < 1:
quit "Can't find local ip!"
($addresses[0][0].host).split(":")[0]
else:
envIp
redisAddr = getEnv("redis_addr", "redis:6379").split(":")
# using synchronous redis because async redis is based on
# asyncdispatch instead of chronos
redisClient = open(redisAddr[0], Port(parseInt(redisAddr[1])))
switchBuilder = SwitchBuilder.new()
case transport
of "tcp":
discard switchBuilder.withTcpTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
)
of "quic-v1":
discard switchBuilder.withQuicTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
)
of "ws":
discard switchBuilder.withWsTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
)
else:
doAssert false
case secureChannel
of "noise":
discard switchBuilder.withNoise()
case muxer
of "yamux":
discard switchBuilder.withYamux()
of "mplex":
discard switchBuilder.withMplex()
let
rng = newRng()
switch = switchBuilder.withRng(rng).build()
pingProtocol = Ping.new(rng = rng)
switch.mount(pingProtocol)
await switch.start()
defer:
await switch.stop()
if not isDialer:
discard redisClient.rPush("listenerAddr", $switch.peerInfo.fullAddrs.tryGet()[0])
await sleepAsync(100.hours) # will get cancelled
else:
let listenerAddr =
try:
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
except Exception as e:
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
let
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
dialingStart = Moment.now()
remotePeerId = await switch.connect(remoteAddr)
stream = await switch.dial(remotePeerId, PingCodec)
pingDelay = await pingProtocol.ping(stream)
totalDelay = Moment.now() - dialingStart
await stream.close()
echo Json.encode(
ResultJson(
handshakePlusOneRTTMillis: float(totalDelay.milliseconds),
pingRTTMilllis: float(pingDelay.milliseconds),
)
)
try:
proc mainAsync(): Future[string] {.async.} =
# mainAsync wraps main and returns some value, as otherwise
# 'waitFor(fut)' has no type (or is ambiguous)
await main()
return "done"
discard waitFor(mainAsync().wait(testTimeout))
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg
quit(-1)

View File

@@ -1,16 +0,0 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws",
"quic-v1"
],
"secureChannels": [
"noise"
],
"muxers": [
"mplex",
"yamux"
]
}

View File

@@ -17,12 +17,11 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc
import
libp2p/[
import libp2p/[
protobuf/minprotobuf,
switch,
stream/lpstream,
@@ -34,43 +33,40 @@ when defined(nimdoc):
peerid,
peerinfo,
peerstore,
multiaddress,
]
multiaddress]
proc dummyPrivateProc*() =
## A private proc example
discard
else:
import
libp2p/[
protobuf/minprotobuf,
muxers/muxer,
muxers/mplex/mplex,
stream/lpstream,
stream/bufferstream,
stream/connection,
transports/transport,
transports/tcptransport,
protocols/secure/noise,
cid,
multihash,
multicodec,
errors,
switch,
peerid,
peerinfo,
multiaddress,
builders,
crypto/crypto,
protocols/pubsub,
]
libp2p/[protobuf/minprotobuf,
muxers/muxer,
muxers/mplex/mplex,
stream/lpstream,
stream/bufferstream,
stream/connection,
transports/transport,
transports/tcptransport,
transports/wstransport,
protocols/secure/noise,
protocols/ping,
cid,
multihash,
multibase,
multicodec,
errors,
switch,
peerid,
peerinfo,
multiaddress,
builders,
crypto/crypto,
protocols/pubsub]
export
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
minprotobuf, switch, peerid, peerinfo,
connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport,
tcptransport, noise, errors, cid, multihash,
multicodec, builders, pubsub
when defined(libp2p_quic_support):
import libp2p/transports/quictransport
export quictransport

View File

@@ -1,40 +1,40 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.13.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
packageName = "libp2p"
version = "1.0.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 2.0.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.11.0 & < 0.12.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.16",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
requires "nim >= 1.2.0",
"nimcrypto >= 0.4.1",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
"chronicles >= 0.10.2",
"chronos >= 3.0.6",
"metrics",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & < 0.1.0"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
let verbose = getEnv("V", "") notin ["", "0"]
let cfg =
" --styleCheck:usages --styleCheck:error" &
(if verbose: "" else: " --verbosity:0 --hints:off") & " --skipUserCfg -f" &
" --threads:on --opt:speed"
import hashes, strutils
proc runTest(filename: string, moreoptions: string = "") =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
import hashes
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
excstr.add(" " & getEnv("NIMFLAGS") & " ")
excstr.add(" --verbosity:0 --hints:off ")
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r -d:libp2p_quic_support -d:libp2p_autotls_support tests/" & filename
exec excstr & " -r " & " tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
var excstr = nimc & " " & lang & " " & cfg & " " & flags & " -p:. " & extraFlags
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
excstr.add(" examples/" & filename)
exec excstr
if run:
@@ -42,8 +42,7 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
rmFile "examples/" & filename.toExe
proc tutorialToMd(filename: string) =
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang &
" -r --verbosity:0 --hints:off tools/markdown_builder.nim "
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
writeFile(filename.replace(".nim", ".md"), markdown)
task testnative, "Runs libp2p native tests":
@@ -56,18 +55,38 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testpubsub", "-d:libp2p_gossipsub_1_4")
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest("pubsub/testpubsub", sign = false, verify = false, moreoptions = "-d:libp2p_pubsub_anonymize=true")
task testpubsub_slim, "Runs pubsub tests":
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task testintegration, "Runs integraion tests":
runTest("testintegration")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
runTest("testall")
exec "nimble testnative"
exec "nimble testpubsub"
exec "nimble testdaemon"
exec "nimble testinterop"
exec "nimble testfilter"
exec "nimble examples_build"
task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testnative"
exec "nimble testpubsub_slim"
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
@@ -79,12 +98,21 @@ task website, "Build the website":
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples, "Build and run examples":
exec "nimble install -y nimpng"
exec "nimble install -y nico --passNim=--skipParentCfg"
buildSample("examples_build", false, "--styleCheck:off") # build only
buildSample("examples_run", true)
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
if (NimMajor, NimMinor) > (1, 2):
# These tutorials relies on post 1.4 exception tracking
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
# Nico doesn't work in 1.2
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
# pin system
# while nimble lockfile
@@ -95,14 +123,12 @@ task pin, "Create a lockfile":
# pinner.nim was originally here
# but you can't read output from
# a command in a nimscript
exec nimc & " c -r tools/pinner.nim"
exec "nim c -r tools/pinner.nim"
import sequtils
import os
task install_pinned, "Reads the lockfile":
let toInstall = readFile(PinFile).splitWhitespace().mapIt(
(it.split(";", 1)[0], it.split(";", 1)[1])
)
let toInstall = readFile(PinFile).splitWhitespace().mapIt((it.split(";", 1)[0], it.split(";", 1)[1]))
# [('packageName', 'packageFullUri')]
rmDir("nimbledeps")
@@ -112,7 +138,8 @@ task install_pinned, "Reads the lockfile":
# Remove the automatically installed deps
# (inefficient you say?)
let nimblePkgs =
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs" else: "nimbledeps/pkgs2"
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for dependency in listDirs(nimblePkgs):
let
fileName = dependency.extractFilename
@@ -120,12 +147,14 @@ task install_pinned, "Reads the lockfile":
packageName = fileName.split('-')[0]
if toInstall.anyIt(
it[0] == packageName and (
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
)
) == false or fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
rmDir(dependency)
it[0] == packageName and
(
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
)
) == false or
fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
rmDir(dependency)
task unpin, "Restore global package use":
rmDir("nimbledeps")

View File

@@ -1,533 +0,0 @@
import json, uri
from times import DateTime, parse
import chronos/apps/http/httpclient, results, chronicles
import ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export ACMEError
logScope:
topics = "libp2p acme api"
const
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
Alg = "RS256"
DefaultChalCompletedRetries = 10
DefaultChalCompletedRetryTime = 1.seconds
DefaultFinalizeRetries = 10
DefaultFinalizeRetryTime = 1.seconds
DefaultRandStringSize = 256
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
type Authorization* = string
type Domain* = string
type Kid* = string
type Nonce* = string
type ACMEDirectory* = object
newNonce*: string
newOrder*: string
newAccount*: string
type ACMEApi* = ref object of RootObj
directory: Opt[ACMEDirectory]
session: HttpSessionRef
acmeServerURL*: Uri
type HTTPResponse* = object
body*: JsonNode
headers*: HttpTable
type JWK = object
kty: string
n: string
e: string
# whether the request uses Kid or not
type ACMERequestType = enum
ACMEJwkRequest
ACMEKidRequest
type ACMERequestHeader = object
alg: string
typ: string
nonce: Nonce
url: string
case kind: ACMERequestType
of ACMEJwkRequest:
jwk: JWK
of ACMEKidRequest:
kid: Kid
type Email = string
type ACMERegisterRequest* = object
termsOfServiceAgreed: bool
contact: seq[Email]
type ACMEAccountStatus = enum
valid = "valid"
deactivated = "deactivated"
revoked = "revoked"
type ACMERegisterResponseBody = object
status*: ACMEAccountStatus
type ACMERegisterResponse* = object
kid*: Kid
status*: ACMEAccountStatus
type ACMEChallengeStatus* {.pure.} = enum
PENDING = "pending"
PROCESSING = "processing"
VALID = "valid"
INVALID = "invalid"
type ACMEOrderStatus* {.pure.} = enum
PENDING = "pending"
READY = "ready"
PROCESSING = "processing"
VALID = "valid"
INVALID = "invalid"
type ACMEChallengeType* {.pure.} = enum
DNS01 = "dns-01"
HTTP01 = "http-01"
TLSALPN01 = "tls-alpn-01"
type ACMEChallengeToken* = string
type ACMEChallenge* = object
url*: string
`type`*: ACMEChallengeType
status*: ACMEChallengeStatus
token*: ACMEChallengeToken
type ACMEChallengeIdentifier = object
`type`: string
value: string
type ACMEChallengeRequest = object
identifiers: seq[ACMEChallengeIdentifier]
type ACMEChallengeResponseBody = object
status: ACMEOrderStatus
authorizations: seq[Authorization]
finalize: string
type ACMEChallengeResponse* = object
status*: ACMEOrderStatus
authorizations*: seq[Authorization]
finalize*: string
order*: string
type ACMEChallengeResponseWrapper* = object
finalize*: string
order*: string
dns01*: ACMEChallenge
type ACMEAuthorizationsResponse* = object
challenges*: seq[ACMEChallenge]
type ACMECompletedResponse* = object
url: string
type ACMECheckKind* = enum
ACMEOrderCheck
ACMEChallengeCheck
type ACMECheckResponse* = object
case kind: ACMECheckKind
of ACMEOrderCheck:
orderStatus: ACMEOrderStatus
of ACMEChallengeCheck:
chalStatus: ACMEChallengeStatus
retryAfter: Duration
type ACMEFinalizeResponse* = object
status: ACMEOrderStatus
type ACMEOrderResponse* = object
certificate: string
expires: string
type ACMECertificateResponse* = object
rawCertificate*: string
certificateExpiry*: DateTime
type ACMECertificate* = object
rawCertificate*: string
certificateExpiry*: DateTime
certKeyPair*: KeyPair
when defined(libp2p_autotls_support):
import options, sequtils, strutils, jwt, bearssl/pem
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
proc new*(
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
): ACMEApi =
let session = HttpSessionRef.new()
ACMEApi(
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
)
proc getDirectory(
self: ACMEApi
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("getDirectory"):
self.directory.valueOr:
let acmeResponse = await self.get(self.acmeServerURL / "directory")
let directory = acmeResponse.body.to(ACMEDirectory)
self.directory = Opt.some(directory)
directory
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone():
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
kid: kid.get(),
)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
proc createSignedAcmeRequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newAccount),
registerRequest,
key,
needsJwk = true,
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status,
kid: acmeResponse.headers.keyOrError("location"),
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newOrder),
orderRequest,
key,
kid = Opt.some(kid),
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
order: acmeResponse.headers.keyOrError("location"),
)
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(parseUri(authorizations[0]))
acmeResponse.body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.
async: (raises: [ACMEError, CancelledError])
.} =
let orderResponse = await self.requestNewOrder(domains, key, kid)
if orderResponse.status != ACMEOrderStatus.PENDING and
orderResponse.status != ACMEOrderStatus.READY:
# ready is a valid status when renewing certs before expiry
raise
newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
let authorizationsResponse =
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
if authorizationsResponse.challenges.len == 0:
raise newException(ACMEError, "No challenges received")
return ACMEChallengeResponseWrapper(
finalize: orderResponse.finalize,
order: orderResponse.order,
dns01: authorizationsResponse.challenges.filterIt(
it.`type` == ACMEChallengeType.DNS01
)[0],
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
)
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus:
parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc sendChallengeCompleted*(
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("sendChallengeCompleted"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse =
await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.PENDING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.VALID:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
certKeyPair: KeyPair,
key: KeyPair,
kid: Kid,
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalize, %*{"csr": createCSR(domain, certKeyPair)}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalize, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.VALID:
return true
of ACMEOrderStatus.PROCESSING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
error "Failed certificate finalization",
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
return false # do not try again
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
order: Uri,
certKeyPair: KeyPair,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse =
await self.requestFinalize(domain, finalize, certKeyPair, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(order, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, order: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get(order)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, order: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(order)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
await self.session.closeWait()
else:
{.hint: "autotls disabled. Use -d:libp2p_autotls_support".}

View File

@@ -1,98 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import chronicles
import ../../crypto/crypto
import ./api
export api
type KeyAuthorization* = string
type ACMEClient* = ref object
api: ACMEApi
key*: KeyPair
kid*: Kid
logScope:
topics = "libp2p acme client"
when defined(libp2p_autotls_support):
import uri
import chronos, results, stew/byteutils
import ../../crypto/rsa
import ./utils
proc new*(
T: typedesc[ACMEClient],
rng: ref HmacDrbgContext = newRng(),
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
key: Opt[KeyPair] = Opt.none(KeyPair),
kid: Kid = Kid(""),
): T {.raises: [].} =
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, rng[]).get()
T(api: api, key: key, kid: kid)
proc getOrInitKid*(
self: ACMEClient
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
if self.kid.len == 0:
let registerResponse = await self.api.requestRegister(self.key)
self.kid = registerResponse.kid
return self.kid
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toBytes).data))
proc getChallenge*(
self: ACMEClient, domains: seq[api.Domain]
): Future[ACMEChallengeResponseWrapper] {.
async: (raises: [ACMEError, CancelledError])
.} =
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
proc getCertificate*(
self: ACMEClient,
domain: api.Domain,
certKeyPair: KeyPair,
challenge: ACMEChallengeResponseWrapper,
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let chalURL = parseUri(challenge.dns01.url)
let orderURL = parseUri(challenge.order)
let finalizeURL = parseUri(challenge.finalize)
trace "Sending challenge completed notification"
discard await self.api.sendChallengeCompleted(
chalURL, self.key, await self.getOrInitKid()
)
trace "Checking for completed challenge"
let completed = await self.api.checkChallengeCompleted(
chalURL, self.key, await self.getOrInitKid()
)
if not completed:
raise newException(
ACMEError, "Failed to signal ACME server about challenge completion"
)
trace "Waiting for certificate to be finalized"
let finalized = await self.api.certificateFinalized(
domain, finalizeURL, orderURL, certKeyPair, self.key, await self.getOrInitKid()
)
if not finalized:
raise
newException(ACMEError, "Failed to finalize certificate for domain " & domain)
trace "Downloading certificate"
await self.api.downloadCertificate(orderURL)
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
await self.api.close()

View File

@@ -1,40 +0,0 @@
import uri
import chronos, chronos/apps/http/httpclient, json
import ./api, ./utils
export api
type MockACMEApi* = ref object of ACMEApi
mockedResponses*: seq[HTTPResponse]
proc new*(
T: typedesc[MockACMEApi]
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
let directory = ACMEDirectory(
newNonce: LetsEncryptURL & "/new-nonce",
newOrder: LetsEncryptURL & "/new-order",
newAccount: LetsEncryptURL & "/new-account",
)
MockACMEApi(
session: HttpSessionRef.new(),
directory: Opt.some(directory),
acmeServerURL: parseUri(LetsEncryptURL),
)
when defined(libp2p_autotls_support):
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return $self.acmeServerURL & "/acme/1234"
method post*(
self: MockACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)

View File

@@ -1,73 +0,0 @@
import ../../errors
type ACMEError* = object of LPError
when defined(libp2p_autotls_support):
import base64, strutils, chronos/apps/http/httpclient, json
import ../../transports/tls/certificate_ffi
import ../../transports/tls/certificate
import ../../crypto/crypto
import ../../crypto/rsa
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc thumbprint*(key: KeyPair): string =
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
let n = base64UrlEncode(nArray)
let e = base64UrlEncode(eArray)
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
let digest = sha256.digest($keyJson)
return base64UrlEncode(@(digest.data))
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let bodyBytes = await response.getBodyBytes()
if bodyBytes.len > 0:
return bytesToString(bodyBytes).parseJson()
return %*{} # empty body
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(
ACMEError, "Unexpected error occurred while getting body bytes", exc
)
except Exception as exc: # this is required for nim 1.6
raise newException(
ACMEError, "Unexpected error occurred while getting body bytes", exc
)
proc createCSR*(
domain: string, certKeyPair: KeyPair
): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
# convert KeyPair to cert_key_t
let rawSeckey: seq[byte] = certKeyPair.seckey.getRawBytes.valueOr:
raise newException(ACMEError, "Failed to get seckey raw bytes (DER)")
let seckeyBuffer = rawSeckey.toCertBuffer()
if cert_new_key_t(seckeyBuffer.unsafeAddr, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to convert key pair to cert_key_t")
# create CSR
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")
base64.encode(derCSR.toSeq, safe = true)

View File

@@ -1,33 +0,0 @@
when defined(libp2p_autotls_support):
import ./service, ./acme/client, ../peeridauth/client
import ../crypto/crypto, ../crypto/rsa, websock/websock
type MockAutotlsService* = ref object of AutotlsService
mockedCert*: TLSCertificate
mockedKey*: TLSPrivateKey
proc new*(
T: typedesc[MockAutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient:
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
running: newAsyncEvent(),
config: config,
rng: rng,
)
method getCertWhenReady*(
self: MockAutotlsService
): Future[AutotlsCert] {.async: (raises: [AutoTLSError, CancelledError]).} =
AutotlsCert.new(self.mockedCert, self.mockedKey, Moment.now)
method setup*(self: MockAutotlsService) {.base, async.} =
self.running.fire()

View File

@@ -1,295 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import chronos, chronicles, net, results
import chronos/apps/http/httpclient, bearssl/rand
import
./acme/client,
./utils,
../crypto/crypto,
../nameresolving/nameresolver,
../peeridauth/client,
../switch,
../peerinfo,
../wire
logScope:
topics = "libp2p autotls"
export LetsEncryptURL, AutoTLSError
const
DefaultDnsServers* =
@[
initTAddress("1.1.1.1:53"),
initTAddress("1.0.0.1:53"),
initTAddress("[2606:4700:4700::1111]:53"),
]
DefaultRenewCheckTime* = 1.hours
DefaultRenewBufferTime = 1.hours
DefaultIssueRetries = 3
DefaultIssueRetryTime = 1.seconds
AutoTLSBroker* = "registration.libp2p.direct"
AutoTLSDNSServer* = "libp2p.direct"
HttpOk* = 200
HttpCreated* = 201
# NoneIp is needed because nim 1.6.16 can't do proper generic inference
NoneIp = Opt.none(IpAddress)
type SigParam = object
k: string
v: seq[byte]
type AutotlsCert* = ref object
cert*: TLSCertificate
privkey*: TLSPrivateKey
expiry*: Moment
type AutotlsConfig* = ref object
acmeServerURL*: Uri
nameResolver*: NameResolver
ipAddress: Opt[IpAddress]
renewCheckTime*: Duration
renewBufferTime*: Duration
issueRetries*: int
issueRetryTime*: Duration
type AutotlsService* = ref object of Service
acmeClient*: ACMEClient
brokerClient*: PeerIDAuthClient
bearer*: Opt[BearerToken]
cert*: Opt[AutotlsCert]
certReady*: AsyncEvent
running*: AsyncEvent
config*: AutotlsConfig
managerFut: Future[void]
peerInfo: PeerInfo
rng*: ref HmacDrbgContext
when defined(libp2p_autotls_support):
import json, sequtils, bearssl/pem
import
../crypto/rsa,
../utils/heartbeat,
../transports/transport,
../utils/ipaddr,
../transports/tcptransport,
../nameresolving/dnsresolver
proc new*(
T: typedesc[AutotlsCert],
cert: TLSCertificate,
privkey: TLSPrivateKey,
expiry: Moment,
): T =
T(cert: cert, privkey: privkey, expiry: expiry)
method getCertWhenReady*(
self: AutotlsService
): Future[AutotlsCert] {.base, async: (raises: [AutoTLSError, CancelledError]).} =
await self.certReady.wait()
return self.cert.get
proc new*(
T: typedesc[AutotlsConfig],
ipAddress: Opt[IpAddress] = NoneIp,
nameServers: seq[TransportAddress] = DefaultDnsServers,
acmeServerURL: Uri = parseUri(LetsEncryptURL),
renewCheckTime: Duration = DefaultRenewCheckTime,
renewBufferTime: Duration = DefaultRenewBufferTime,
issueRetries: int = DefaultIssueRetries,
issueRetryTime: Duration = DefaultIssueRetryTime,
): T =
T(
nameResolver: DnsResolver.new(nameServers),
acmeServerURL: acmeServerURL,
ipAddress: ipAddress,
renewCheckTime: renewCheckTime,
renewBufferTime: renewBufferTime,
issueRetries: issueRetries,
issueRetryTime: issueRetryTime,
)
proc new*(
T: typedesc[AutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient:
ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
running: newAsyncEvent(),
config: config,
managerFut: nil,
peerInfo: nil,
rng: rng,
)
method setup*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
trace "Setting up AutotlsService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.config.ipAddress.isNone():
try:
self.config.ipAddress = Opt.some(getPublicIPAddress())
except ValueError as exc:
error "Failed to get public IP address", err = exc.msg
return false
except OSError as exc:
error "Failed to get public IP address", err = exc.msg
return false
self.managerFut = self.run(switch)
return hasBeenSetup
method issueCertificate(
self: AutotlsService
): Future[bool] {.
base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError])
.} =
trace "Issuing certificate"
if self.peerInfo.isNil():
error "Cannot issue new certificate: peerInfo not set"
return false
# generate autotls domain string: "*.{peerID}.libp2p.direct"
let baseDomain =
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
let domain = api.Domain("*." & baseDomain)
let acmeClient = self.acmeClient
trace "Requesting ACME challenge"
let dns01Challenge = await acmeClient.getChallenge(@[domain])
trace "Generating key authorization"
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
let addrs = await self.peerInfo.expandAddrs()
if addrs.len == 0:
error "Unable to authenticate with broker: no addresses"
return false
let strMultiaddresses: seq[string] = addrs.mapIt($it)
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
trace "Sending challenge to AutoTLS broker"
let (bearer, response) =
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
if self.bearer.isNone():
# save bearer token for future
self.bearer = Opt.some(bearer)
if response.status != HttpOk:
error "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
debug "Broker message",
body = bytesToString(response.body), peerinfo = self.peerInfo
return false
let dashedIpAddr = ($self.config.ipAddress.get()).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
debug "Waiting for DNS record to be set", ip = ip4Domain, acme = acmeChalDomain
let dnsSet = await checkDNSRecords(
self.config.nameResolver, self.config.ipAddress.get(), baseDomain, keyAuth
)
if not dnsSet:
error "DNS records not set"
return false
trace "Notifying challenge completion to ACME and downloading cert"
let certKeyPair = KeyPair.random(PKScheme.RSA, self.rng[]).get()
let certificate =
await acmeClient.getCertificate(domain, certKeyPair, dns01Challenge)
let derPrivKey = certKeyPair.seckey.rsakey.getBytes.valueOr:
raise newException(AutoTLSError, "Unable to get TLS private key")
let pemPrivKey: string = derPrivKey.pemEncode("PRIVATE KEY")
debug "autotls cert", pemPrivKey = pemPrivKey, cert = certificate.rawCertificate
trace "Installing certificate"
let newCert =
try:
AutotlsCert.new(
TLSCertificate.init(certificate.rawCertificate),
TLSPrivateKey.init(pemPrivKey),
asMoment(certificate.certificateExpiry),
)
except TLSStreamProtocolError:
error "Could not parse downloaded certificates"
return false
self.cert = Opt.some(newCert)
self.certReady.fire()
trace "Certificate installed"
true
proc hasTcpStarted(switch: Switch): bool =
switch.transports.filterIt(it of TcpTransport and it.running).len == 0
proc tryIssueCertificate(self: AutotlsService) {.async: (raises: [CancelledError]).} =
for _ in 0 ..< self.config.issueRetries:
try:
if await self.issueCertificate():
return
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to issue certificate", err = exc.msg
await sleepAsync(self.config.issueRetryTime)
error "Failed to issue certificate"
method run*(
self: AutotlsService, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Starting Autotls management"
self.running.fire()
self.peerInfo = switch.peerInfo
# ensure that there's at least one TcpTransport running
# for communicating with autotls broker
if switch.hasTcpStarted():
error "Could not find a running TcpTransport in switch"
return
heartbeat "Certificate Management", self.config.renewCheckTime:
if self.cert.isNone():
await self.tryIssueCertificate()
# AutotlsService will renew the cert 1h before it expires
let cert = self.cert.get
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
if waitTime <= self.config.renewBufferTime:
await self.tryIssueCertificate()
method stop*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
if not self.acmeClient.isNil():
await self.acmeClient.close()
if not self.brokerClient.isNil():
await self.brokerClient.close()
if not self.managerFut.isNil():
await self.managerFut.cancelAndWait()
self.managerFut = nil
return hasBeenStopped

View File

@@ -1,82 +0,0 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import chronos, chronicles
import ../errors
logScope:
topics = "libp2p utils"
const
DefaultDnsRetries = 3
DefaultDnsRetryTime = 1.seconds
type AutoTLSError* = object of LPError
when defined(libp2p_autotls_support):
import strutils
from times import DateTime, toTime, toUnix
import stew/base36
import
../peerid,
../multihash,
../cid,
../multicodec,
../nameresolving/nameresolver,
./acme/client
proc asMoment*(dt: DateTime): Moment =
let unixTime: int64 = dt.toTime.toUnix
return Moment.init(unixTime, Second)
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
var mh: MultiHash
let decodeResult = MultiHash.decode(peerId.data, mh)
if decodeResult.isErr() or decodeResult.get() == -1:
raise
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
if cidResult.isErr():
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
return Base36.encode(cidResult.get().data.buffer)
proc checkDNSRecords*(
nameResolver: NameResolver,
ipAddress: IpAddress,
baseDomain: api.Domain,
keyAuth: KeyAuthorization,
retries: int = DefaultDnsRetries,
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
# if my ip address is 100.10.10.3 then the ip4Domain will be:
# 100-10-10-3.{peerIdBase36}.libp2p.direct
# and acme challenge TXT domain will be:
# _acme-challenge.{peerIdBase36}.libp2p.direct
let dashedIpAddr = ($ipAddress).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
var txt: seq[string]
var ip4: seq[TransportAddress]
for _ in 0 .. retries:
txt = await nameResolver.resolveTxt(acmeChalDomain)
try:
ip4 = await nameResolver.resolveIp(ip4Domain, 0.Port)
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to resolve IP", description = exc.msg # retry
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
return true
await sleepAsync(DefaultDnsRetryTime)
return false

View File

@@ -9,68 +9,45 @@
## This module contains a Switch Building helper.
runnableExamples:
let switch = SwitchBuilder.new().withRng(rng).withAddresses(multiaddress)
# etc
.build()
let switch =
SwitchBuilder.new()
.withRng(rng)
.withAddresses(multiaddress)
# etc
.build()
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import options, tables, chronos, chronicles, sequtils
import
switch,
peerid,
peerinfo,
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport, wstransport, memorytransport],
options, tables, chronos, chronicles, sequtils,
switch, peerid, peerinfo, stream/connection, multiaddress,
crypto/crypto, transports/[transport, tcptransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[
autonat/server,
autonatv2/server,
autonatv2/service,
autonatv2/client,
relay/relay,
relay/client,
relay/rtransport,
],
connmanager,
upgrademngrs/muxedupgrade,
observedaddrmanager,
autotls/service,
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
nameresolving/nameresolver,
errors,
utility
import services/wildcardresolverservice
errors, utility
export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
TLSCertificate, TLSFlags, ServerFlags
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
type
TransportProvider* {.deprecated: "Use TransportBuilder instead".} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
TransportBuilder* {.public.} =
proc(config: TransportConfig): Transport {.gcsafe, raises: [].}
TransportConfig* = ref object
upgr*: Upgrade
privateKey*: PrivateKey
autotls*: AutotlsService
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [Defect].}
SecureProtocol* {.pure.} = enum
Noise
Noise,
Secio {.deprecated.}
SwitchBuilder* = ref object
privKey: Option[PrivateKey]
addresses: seq[MultiAddress]
secureManagers: seq[SecureProtocol]
muxers: seq[MuxerProvider]
transports: seq[TransportBuilder]
transports: seq[TransportProvider]
rng: ref HmacDrbgContext
maxConnections: int
maxIn: int
@@ -80,23 +57,18 @@ type
protoVersion: string
agentVersion: string
nameResolver: NameResolver
peerStoreCapacity: Opt[int]
peerStoreCapacity: Option[int]
autonat: bool
autonatV2ServerConfig: Opt[AutonatV2Config]
autonatV2Client: AutonatV2Client
autonatV2ServiceConfig: AutonatV2ServiceConfig
autotls: AutotlsService
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
enableWildcardResolver: bool
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
let address =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should initialize to default")
let address = MultiAddress
.init("/ip4/127.0.0.1/tcp/0")
.expect("Should initialize to default")
SwitchBuilder(
privKey: none(PrivateKey),
@@ -107,59 +79,53 @@ proc new*(T: type[SwitchBuilder]): T {.public.} =
maxOut: -1,
maxConnsPerPeer: MaxConnectionsPerPeer,
protoVersion: ProtoVersion,
agentVersion: AgentVersion,
enableWildcardResolver: true,
)
agentVersion: AgentVersion)
proc withPrivateKey*(
b: SwitchBuilder, privateKey: PrivateKey
): SwitchBuilder {.public.} =
proc withPrivateKey*(b: SwitchBuilder, privateKey: PrivateKey): SwitchBuilder {.public.} =
## Set the private key of the switch. Will be used to
## generate a PeerId
b.privKey = some(privateKey)
b
proc withAddresses*(
b: SwitchBuilder, addresses: seq[MultiAddress], enableWildcardResolver: bool = true
): SwitchBuilder {.public.} =
## | Set the listening addresses of the switch
## | Calling it multiple time will override the value
b.addresses = addresses
b.enableWildcardResolver = enableWildcardResolver
b
proc withAddress*(
b: SwitchBuilder, address: MultiAddress, enableWildcardResolver: bool = true
): SwitchBuilder {.public.} =
proc withAddress*(b: SwitchBuilder, address: MultiAddress): SwitchBuilder {.public.} =
## | Set the listening address of the switch
## | Calling it multiple time will override the value
b.withAddresses(@[address], enableWildcardResolver)
b.addresses = @[address]
b
proc withAddresses*(b: SwitchBuilder, addresses: seq[MultiAddress]): SwitchBuilder {.public.} =
## | Set the listening addresses of the switch
## | Calling it multiple time will override the value
b.addresses = addresses
b
proc withSignedPeerRecord*(b: SwitchBuilder, sendIt = true): SwitchBuilder {.public.} =
b.sendSignedPeerRecord = sendIt
b
proc withMplex*(
b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes, maxChannCount = 200
): SwitchBuilder {.public.} =
b: SwitchBuilder,
inTimeout = 5.minutes,
outTimeout = 5.minutes,
maxChannCount = 200): SwitchBuilder {.public.} =
## | Uses `Mplex <https://docs.libp2p.io/concepts/stream-multiplexing/#mplex>`_ as a multiplexer
## | `Timeout` is the duration after which a inactive connection will be closed
proc newMuxer(conn: Connection): Muxer =
Mplex.new(conn, inTimeout, outTimeout, maxChannCount)
Mplex.new(
conn,
inTimeout,
outTimeout,
maxChannCount)
assert b.muxers.countIt(it.codec == MplexCodec) == 0, "Mplex build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(
b: SwitchBuilder,
windowSize: int = YamuxDefaultWindowSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer =
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
@@ -169,81 +135,24 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.secureManagers.add(SecureProtocol.Noise)
b
proc withTransport*(
b: SwitchBuilder, prov: TransportBuilder
): SwitchBuilder {.public.} =
proc withTransport*(b: SwitchBuilder, prov: TransportProvider): SwitchBuilder {.public.} =
## Use a custom transport
runnableExamples:
let switch = SwitchBuilder
.new()
.withTransport(
proc(config: TransportConfig): Transport =
TcpTransport.new(flags, config.upgr)
)
let switch =
SwitchBuilder.new()
.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
.build()
b.transports.add(prov)
b
proc withTransport*(
b: SwitchBuilder, prov: TransportProvider
): SwitchBuilder {.deprecated: "Use TransportBuilder instead".} =
## Use a custom transport
runnableExamples:
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
let tBuilder: TransportBuilder = proc(config: TransportConfig): Transport =
prov(config.upgr, config.privateKey)
b.withTransport(tBuilder)
proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(config: TransportConfig): Transport =
TcpTransport.new(flags, config.upgr)
)
proc withWsTransport*(
b: SwitchBuilder,
tlsPrivateKey: TLSPrivateKey = nil,
tlsCertificate: TLSCertificate = nil,
tlsFlags: set[TLSFlags] = {},
flags: set[ServerFlags] = {},
): SwitchBuilder =
b.withTransport(
proc(config: TransportConfig): Transport =
WsTransport.new(
config.upgr, tlsPrivateKey, tlsCertificate, config.autotls, tlsFlags, flags
)
)
when defined(libp2p_quic_support):
import transports/quictransport
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(config: TransportConfig): Transport =
QuicTransport.new(config.upgr, config.privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(config: TransportConfig): Transport =
MemoryTransport.new(config.upgr)
)
proc withTcpTransport*(b: SwitchBuilder, flags: set[ServerFlags] = {}): SwitchBuilder {.public.} =
b.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
proc withMaxConnections*(
b: SwitchBuilder, maxConnections: int
): SwitchBuilder {.public.} =
proc withMaxConnections*(b: SwitchBuilder, maxConnections: int): SwitchBuilder {.public.} =
## Maximum concurrent connections of the switch. You should either use this, or
## `withMaxIn <#withMaxIn,SwitchBuilder,int>`_ & `withMaxOut<#withMaxOut,SwitchBuilder,int>`_
b.maxConnections = maxConnections
@@ -259,31 +168,23 @@ proc withMaxOut*(b: SwitchBuilder, maxOut: int): SwitchBuilder {.public.} =
b.maxOut = maxOut
b
proc withMaxConnsPerPeer*(
b: SwitchBuilder, maxConnsPerPeer: int
): SwitchBuilder {.public.} =
proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder {.public.} =
b.maxConnsPerPeer = maxConnsPerPeer
b
proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
b.peerStoreCapacity = Opt.some(capacity)
b.peerStoreCapacity = some(capacity)
b
proc withProtoVersion*(
b: SwitchBuilder, protoVersion: string
): SwitchBuilder {.public.} =
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
b.protoVersion = protoVersion
b
proc withAgentVersion*(
b: SwitchBuilder, agentVersion: string
): SwitchBuilder {.public.} =
proc withAgentVersion*(b: SwitchBuilder, agentVersion: string): SwitchBuilder {.public.} =
b.agentVersion = agentVersion
b
proc withNameResolver*(
b: SwitchBuilder, nameResolver: NameResolver
): SwitchBuilder {.public.} =
proc withNameResolver*(b: SwitchBuilder, nameResolver: NameResolver): SwitchBuilder {.public.} =
b.nameResolver = nameResolver
b
@@ -291,33 +192,11 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
b.autonat = true
b
proc withAutonatV2Server*(
b: SwitchBuilder, config: AutonatV2Config = AutonatV2Config.new()
): SwitchBuilder =
b.autonatV2ServerConfig = Opt.some(config)
b
proc withAutonatV2*(
b: SwitchBuilder, serviceConfig = AutonatV2ServiceConfig.new()
): SwitchBuilder =
b.autonatV2Client = AutonatV2Client.new(b.rng)
b.autonatV2ServiceConfig = serviceConfig
b
when defined(libp2p_autotls_support):
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
): SwitchBuilder {.public.} =
b.autotls = AutotlsService.new(config = config)
b
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
b.circuitRelay = r
b
proc withRendezVous*(
b: SwitchBuilder, rdv: RendezVous = RendezVous.new()
): SwitchBuilder =
proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): SwitchBuilder =
b.rdv = rdv
b
@@ -325,55 +204,40 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(
b: SwitchBuilder, observedAddrManager: ObservedAddrManager
): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [Defect, LPError], public.} =
proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if b.rng == nil: # newRng could fail
raise newException(Defect, "Cannot initialize RNG")
let pkRes = PrivateKey.random(b.rng[])
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
let
seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
if b.secureManagers.len == 0:
debug "no secure managers defined. Adding noise by default"
b.secureManagers.add(SecureProtocol.Noise)
var secureManagerInstances: seq[Secure]
var
secureManagerInstances: seq[Secure]
if SecureProtocol.Noise in b.secureManagers:
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
let peerInfo = PeerInfo.new(
seckey, b.addresses, protoVersion = b.protoVersion, agentVersion = b.agentVersion
)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
peerInfo = PeerInfo.new(
seckey,
b.addresses,
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let
connManager =
ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
muxedUpgrade = MuxedUpgrade.new(identify, b.muxers, secureManagerInstances, connManager, ms)
if not b.autotls.isNil():
b.services.insert(b.autotls, 0)
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(
tProvider(
TransportConfig(upgr: muxedUpgrade, privateKey: seckey, autotls: b.autotls)
)
)
transports
let
transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports
if b.secureManagers.len == 0:
b.secureManagers &= SecureProtocol.Noise
@@ -381,44 +245,26 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if isNil(b.rng):
b.rng = newRng()
let peerStore = block:
b.peerStoreCapacity.withValue(capacity):
PeerStore.new(identify, capacity)
let peerStore =
if isSome(b.peerStoreCapacity):
PeerStore.new(b.peerStoreCapacity.get())
else:
PeerStore.new(identify)
if b.enableWildcardResolver:
b.services.insert(WildcardAddressResolverService.new(), 0)
if not isNil(b.autonatV2Client):
b.services.add(
AutonatV2Service.new(
b.rng, client = b.autonatV2Client, config = b.autonatV2ServiceConfig
)
)
PeerStore.new()
let switch = newSwitch(
peerInfo = peerInfo,
transports = transports,
identity = identify,
secureManagers = secureManagerInstances,
connManager = connManager,
ms = ms,
nameResolver = b.nameResolver,
peerStore = peerStore,
services = b.services,
)
switch.mount(identify)
if not isNil(b.autonatV2Client):
b.autonatV2Client.setup(switch)
switch.mount(b.autonatV2Client)
b.autonatV2ServerConfig.withValue(config):
switch.mount(AutonatV2.new(switch, config = config))
services = b.services)
if b.autonat:
switch.mount(Autonat.new(switch))
let autonat = Autonat.new(switch)
switch.mount(autonat)
if not isNil(b.circuitRelay):
if b.circuitRelay of RelayClient:
@@ -432,103 +278,46 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
return switch
type TransportType* {.pure.} = enum
QUIC
TCP
Memory
proc newStandardSwitchBuilder*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): SwitchBuilder {.raises: [LPError], public.} =
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
secureManagers: openArray[SecureProtocol] = [
SecureProtocol.Noise,
],
transportFlags: set[ServerFlags] = {},
rng = newRng(),
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000): Switch
{.raises: [Defect, LPError], public.} =
## Helper for common switch configurations.
if SecureProtocol.Secio in secureManagers:
quit("Secio is deprecated!") # use of secio is unsafe
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
var b = SwitchBuilder
.new()
.withAddresses(addrs)
.withRng(rng)
.withSignedPeerRecord(sendSignedPeerRecord)
.withMaxConnections(maxConnections)
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity = peerStoreCapacity)
.withPeerStore(capacity=peerStoreCapacity)
.withMplex(inTimeout, outTimeout)
.withTcpTransport(transportFlags)
.withNameResolver(nameResolver)
.withNoise()
var addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
if privKey.isSome():
b = b.withPrivateKey(privKey.get())
case transport
of TransportType.QUIC:
when defined(libp2p_quic_support):
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/0.0.0.0/udp/0/quic-v1").tryGet()]
b = b.withQuicTransport().withAddresses(addrs)
else:
raiseAssert "QUIC not supported in this build"
of TransportType.TCP:
if addrs.len == 0:
addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()]
b = b.withTcpTransport(transportFlags).withAddresses(addrs).withMplex(
inTimeout, outTimeout
)
of TransportType.Memory:
if addrs.len == 0:
addrs = @[MultiAddress.init(MemoryAutoAddress).tryGet()]
b = b.withMemoryTransport().withAddresses(addrs).withMplex(inTimeout, outTimeout)
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = newSeq[MultiAddress](),
transport: TransportType = TransportType.TCP,
transportFlags: set[ServerFlags] = {},
rng = newRng(),
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000,
): Switch {.raises: [LPError], public.} =
newStandardSwitchBuilder(
privKey = privKey,
addrs = addrs,
transport = transport,
transportFlags = transportFlags,
rng = rng,
secureManagers = secureManagers,
inTimeout = inTimeout,
outTimeout = outTimeout,
maxConnections = maxConnections,
maxIn = maxIn,
maxOut = maxOut,
maxConnsPerPeer = maxConnsPerPeer,
nameResolver = nameResolver,
sendSignedPeerRecord = sendSignedPeerRecord,
peerStoreCapacity = peerStoreCapacity,
)
.build()
b.build()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -9,28 +9,23 @@
## This module implementes CID (Content IDentifier).
{.push raises: [].}
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
import ./utils/sequninit
import multibase, multicodec, multihash, vbuffer, varint
import stew/[base58, results]
export results
type
CidError* {.pure.} = enum
Error
Incorrect
Unsupported
Overrun
Error, Incorrect, Unsupported, Overrun
CidVersion* = enum
CIDvIncorrect
CIDv0
CIDv1
CIDvReserved
CIDvIncorrect, CIDv0, CIDv1, CIDvReserved
Cid* = object
cidver*: CidVersion
@@ -38,52 +33,54 @@ type
hpos*: int
data*: VBuffer
const ContentIdsList = [
multiCodec("raw"),
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("libp2p-key"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),
multiCodec("eth-tx-trie"),
multiCodec("eth-tx"),
multiCodec("eth-tx-receipt-trie"),
multiCodec("eth-tx-receipt"),
multiCodec("eth-state-trie"),
multiCodec("eth-account-snapshot"),
multiCodec("eth-storage-trie"),
multiCodec("bitcoin-block"),
multiCodec("bitcoin-tx"),
multiCodec("zcash-block"),
multiCodec("zcash-tx"),
multiCodec("stellar-block"),
multiCodec("stellar-tx"),
multiCodec("decred-block"),
multiCodec("decred-tx"),
multiCodec("dash-block"),
multiCodec("dash-tx"),
multiCodec("torrent-info"),
multiCodec("torrent-file"),
multiCodec("ed25519-pub"),
]
const
ContentIdsList = [
multiCodec("raw"),
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),
multiCodec("eth-tx-trie"),
multiCodec("eth-tx"),
multiCodec("eth-tx-receipt-trie"),
multiCodec("eth-tx-receipt"),
multiCodec("eth-state-trie"),
multiCodec("eth-account-snapshot"),
multiCodec("eth-storage-trie"),
multiCodec("bitcoin-block"),
multiCodec("bitcoin-tx"),
multiCodec("zcash-block"),
multiCodec("zcash-tx"),
multiCodec("stellar-block"),
multiCodec("stellar-tx"),
multiCodec("decred-block"),
multiCodec("decred-tx"),
multiCodec("dash-block"),
multiCodec("dash-tx"),
multiCodec("torrent-info"),
multiCodec("torrent-file"),
multiCodec("ed25519-pub")
]
proc initCidCodeTable(): Table[int, MultiCodec] {.compileTime.} =
for item in ContentIdsList:
result[int(item)] = item
const CodeContentIds = initCidCodeTable()
const
CodeContentIds = initCidCodeTable()
template orError*(exp: untyped, err: untyped): untyped =
exp.mapErr do(_: auto) -> auto:
err
(exp.mapErr do (_: auto) -> auto: err)
proc decode(data: openArray[byte]): Result[Cid, CidError] =
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
ok(
Cid(cidver: CIDv0, mcodec: multiCodec("dag-pb"), hpos: 0, data: initVBuffer(data))
)
ok(Cid(
cidver: CIDv0,
mcodec: multiCodec("dag-pb"),
hpos: 0,
data: initVBuffer(data)))
else:
var version, codec: uint64
var res, offset: int
@@ -104,18 +101,21 @@ proc decode(data: openArray[byte]): Result[Cid, CidError] =
err(CidError.Incorrect)
else:
offset += res
var mcodec =
CodeContentIds.getOrDefault(cast[int](codec), InvalidMultiCodec)
var mcodec = CodeContentIds.getOrDefault(cast[int](codec),
InvalidMultiCodec)
if mcodec == InvalidMultiCodec:
err(CidError.Incorrect)
else:
if not MultiHash.validate(
vb.buffer.toOpenArray(vb.offset, vb.buffer.high)
):
if not MultiHash.validate(vb.buffer.toOpenArray(vb.offset,
vb.buffer.high)):
err(CidError.Incorrect)
else:
vb.finish()
ok(Cid(cidver: CIDv1, mcodec: mcodec, hpos: offset, data: vb))
ok(Cid(
cidver: CIDv1,
mcodec: mcodec,
hpos: offset,
data: vb))
proc decode(data: openArray[char]): Result[Cid, CidError] =
var buffer: seq[byte]
@@ -124,7 +124,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
return err(CidError.Incorrect)
if len(data) == 46:
if data[0] == 'Q' and data[1] == 'm':
buffer = newSeqUninit[byte](BTCBase58.decodedLength(len(data)))
buffer = newSeq[byte](BTCBase58.decodedLength(len(data)))
if BTCBase58.decode(data, buffer, plen) != Base58Status.Success:
return err(CidError.Incorrect)
buffer.setLen(plen)
@@ -132,7 +132,7 @@ proc decode(data: openArray[char]): Result[Cid, CidError] =
let length = MultiBase.decodedLength(data[0], len(data))
if length == -1:
return err(CidError.Incorrect)
buffer = newSeqUninit[byte](length)
buffer = newSeq[byte](length)
if MultiBase.decode(data, buffer, plen) != MultiBaseStatus.Success:
return err(CidError.Incorrect)
buffer.setLen(plen)
@@ -175,9 +175,7 @@ proc mhash*(cid: Cid): Result[MultiHash, CidError] =
if cid.cidver notin {CIDv0, CIDv1}:
err(CidError.Incorrect)
else:
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(
CidError.Incorrect
)
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(CidError.Incorrect)
proc contentType*(cid: Cid): Result[MultiCodec, CidError] =
## Returns content type part of CID
@@ -190,15 +188,12 @@ proc version*(cid: Cid): CidVersion =
## Returns CID version
result = cid.cidver
proc init*[T: char | byte](
ctype: typedesc[Cid], data: openArray[T]
): Result[Cid, CidError] =
proc init*[T: char|byte](ctype: typedesc[Cid], data: openArray[T]): Result[Cid, CidError] =
## Create new content identifier using array of bytes or string ``data``.
decode(data)
proc init*(
ctype: typedesc[Cid], version: CidVersion, content: MultiCodec, hash: MultiHash
): Result[Cid, CidError] =
proc init*(ctype: typedesc[Cid], version: CidVersion, content: MultiCodec,
hash: MultiHash): Result[Cid, CidError] =
## Create new content identifier using content type ``content`` and
## MultiHash ``hash`` using version ``version``.
##
@@ -221,7 +216,8 @@ proc init*(
res.data.finish()
return ok(res)
elif version == CIDv1:
let mcodec = CodeContentIds.getOrDefault(cast[int](content), InvalidMultiCodec)
let mcodec = CodeContentIds.getOrDefault(cast[int](content),
InvalidMultiCodec)
if mcodec == InvalidMultiCodec:
return err(CidError.Incorrect)
res.mcodec = mcodec
@@ -240,9 +236,11 @@ proc `==`*(a: Cid, b: Cid): bool =
## are equal, ``false`` otherwise.
if a.mcodec == b.mcodec:
var ah, bh: MultiHash
if MultiHash.decode(a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
if MultiHash.decode(
a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
return false
if MultiHash.decode(b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
if MultiHash.decode(
b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
return false
result = (ah == bh)
@@ -266,6 +264,12 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
## Write CID value ``cid`` to buffer ``vb``.
vb.writeArray(cid.data.buffer)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
cid: Cid): string {.inline.} =
## Get MultiBase encoded representation of ``cid`` using encoding
## ``encoding``.
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
proc hash*(cid: Cid): Hash {.inline.} =
hash(cid.data.buffer)
@@ -275,6 +279,9 @@ proc `$`*(cid: Cid): string =
BTCBase58.encode(cid.data.buffer)
elif cid.cidver == CIDv1:
let res = MultiBase.encode("base58btc", cid.data.buffer)
res.get("")
if res.isOk():
res.get()
else:
""
else:
""

View File

@@ -7,11 +7,19 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[tables, sequtils, sets]
import std/[options, tables, sequtils, sets]
import pkg/[chronos, chronicles, metrics]
import peerinfo, peerstore, stream/connection, muxers/muxer, utils/semaphore, errors
import peerinfo,
peerstore,
stream/connection,
muxers/muxer,
utils/semaphore,
errors
logScope:
topics = "libp2p connmanager"
@@ -24,16 +32,14 @@ const
type
TooManyConnectionsError* = object of LPError
AlreadyExpectingConnectionError* = object of LPError
ConnEventKind* {.pure.} = enum
Connected
# A connection was made and securely upgraded - there may be
# more than one concurrent connection thus more than one upgrade
# event per peer.
Disconnected
# Peer disconnected - this event is fired once per upgrade
# when the associated connection is terminated.
Connected, # A connection was made and securely upgraded - there may be
# more than one concurrent connection thus more than one upgrade
# event per peer.
Disconnected # Peer disconnected - this event is fired once per upgrade
# when the associated connection is terminated.
ConnEvent* = object
case kind*: ConnEventKind
@@ -42,34 +48,38 @@ type
else:
discard
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void]
{.gcsafe, raises: [Defect].}
PeerEventKind* {.pure.} = enum
Left
Left,
Identified,
Joined
Identified
PeerEvent* = object
case kind*: PeerEventKind
of PeerEventKind.Joined, PeerEventKind.Identified:
initiator*: bool
else:
discard
of PeerEventKind.Joined:
initiator*: bool
else:
discard
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
MuxerHolder = object
muxer: Muxer
handle: Future[void]
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
inSema*: AsyncSemaphore
outSema*: AsyncSemaphore
muxed: Table[PeerId, seq[Muxer]]
conns: Table[PeerId, HashSet[Connection]]
muxed: Table[Connection, MuxerHolder]
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
expectedConnectionsOverLimit*: Table[(PeerId, Direction), Future[Muxer]]
expectedConnections: Table[PeerId, Future[Connection]]
peerStore*: PeerStore
ConnectionSlot* = object
@@ -79,13 +89,11 @@ type
proc newTooManyConnectionsError(): ref TooManyConnectionsError {.inline.} =
result = newException(TooManyConnectionsError, "Too many connections")
proc new*(
C: type ConnManager,
maxConnsPerPeer = MaxConnectionsPerPeer,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
): ConnManager =
proc new*(C: type ConnManager,
maxConnsPerPeer = MaxConnectionsPerPeer,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1): ConnManager =
var inSema, outSema: AsyncSemaphore
if maxIn > 0 or maxOut > 0:
inSema = newAsyncSemaphore(maxIn)
@@ -96,38 +104,51 @@ proc new*(
else:
raiseAssert "Invalid connection counts!"
C(maxConnsPerPeer: maxConnsPerPeer, inSema: inSema, outSema: outSema)
C(maxConnsPerPeer: maxConnsPerPeer,
inSema: inSema,
outSema: outSema)
proc connCount*(c: ConnManager, peerId: PeerId): int =
c.muxed.getOrDefault(peerId).len
c.conns.getOrDefault(peerId).len
proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
var peers = newSeq[PeerId]()
for peerId, mux in c.muxed:
if mux.anyIt(it.connection.dir == dir):
for peerId, conns in c.conns:
if conns.anyIt(it.dir == dir):
peers.add(peerId)
return peers
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
return c.muxed
proc addConnEventHandler*(
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
) =
proc addConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler):
return
c.connEvents[kind].incl(handler)
proc removeConnEventHandler*(
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
) =
c.connEvents[kind].excl(handler)
try:
if isNil(handler): return
c.connEvents[kind].incl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
proc triggerConnEvent*(
c: ConnManager, peerId: PeerId, event: ConnEvent
) {.async: (raises: [CancelledError]).} =
raiseAssert exc.msg
proc removeConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
try:
c.connEvents[kind].excl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -140,32 +161,54 @@ proc triggerConnEvent*(
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Exception in triggerConnEvent",
description = exc.msg, peer = peerId, event = $event
warn "Exception in triggerConnEvents",
msg = exc.msg, peer = peerId, event = $event
proc addPeerEventHandler*(
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
) =
proc addPeerEventHandler*(c: ConnManager,
handler: PeerEventHandler,
kind: PeerEventKind) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler):
return
c.peerEvents[kind].incl(handler)
if isNil(handler): return
try:
c.peerEvents[kind].incl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
proc removePeerEventHandler*(
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
) =
c.peerEvents[kind].excl(handler)
raiseAssert exc.msg
proc removePeerEventHandler*(c: ConnManager,
handler: PeerEventHandler,
kind: PeerEventKind) =
try:
c.peerEvents[kind].excl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
proc triggerPeerEvents*(
c: ConnManager, peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
return
try:
let count = c.connCount(peerId)
if event.kind == PeerEventKind.Joined and count != 1:
trace "peer already joined", peer = peerId, event = $event
return
elif event.kind == PeerEventKind.Left and count != 0:
trace "peer still connected or already left", peer = peerId, event = $event
return
trace "triggering peer events", peer = peerId, event = $event
var peerEvents: seq[Future[void]]
@@ -176,29 +219,33 @@ proc triggerPeerEvents*(
except CancelledError as exc:
raise exc
except CatchableError as exc: # handlers should not raise!
warn "Exception in triggerPeerEvents", description = exc.msg, peer = peerId
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
proc expectConnection*(
c: ConnManager, p: PeerId, dir: Direction
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
proc expectConnection*(c: ConnManager, p: PeerId): Future[Connection] {.async.} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
raise newException(
AlreadyExpectingConnectionError,
"Already expecting an incoming connection from that peer: " & shortLog(p),
)
if p in c.expectedConnections:
raise LPError.newException("Already expecting a connection from that peer")
let future = Future[Muxer].Raising([CancelledError]).init()
c.expectedConnectionsOverLimit[key] = future
let future = newFuture[Connection]()
c.expectedConnections[p] = future
try:
return await future
finally:
c.expectedConnectionsOverLimit.del(key)
c.expectedConnections.del(p)
proc contains*(c: ConnManager, conn: Connection): bool =
## checks if a connection is being tracked by the
## connection manager
##
if isNil(conn):
return
return conn in c.conns.getOrDefault(conn.peerId)
proc contains*(c: ConnManager, peerId: PeerId): bool =
peerId in c.muxed
peerId in c.conns
proc contains*(c: ConnManager, muxer: Muxer): bool =
## checks if a muxer is being tracked by the connection
@@ -206,230 +253,327 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
##
if isNil(muxer):
return false
return
let conn = muxer.connection
return muxer in c.muxed.getOrDefault(conn.peerId)
if conn notin c:
return
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
trace "Cleaning up muxer", m = muxer
if conn notin c.muxed:
return
await muxer.close()
if not (isNil(muxer.handler)):
return muxer == c.muxed.getOrDefault(conn).muxer
proc closeMuxerHolder(muxerHolder: MuxerHolder) {.async.} =
trace "Cleaning up muxer", m = muxerHolder.muxer
await muxerHolder.muxer.close()
if not(isNil(muxerHolder.handle)):
try:
await muxer.handler
await muxerHolder.handle # TODO noraises?
except CatchableError as exc:
trace "Exception in close muxer handler", description = exc.msg
trace "Cleaned up muxer", m = muxer
trace "Exception in close muxer handler", exc = exc.msg
trace "Cleaned up muxer", m = muxerHolder.muxer
proc delConn(c: ConnManager, conn: Connection) =
let peerId = conn.peerId
c.conns.withValue(peerId, peerConns):
peerConns[].excl(conn)
if peerConns[].len == 0:
c.conns.del(peerId) # invalidates `peerConns`
libp2p_peers.set(c.conns.len.int64)
trace "Removed connection", conn
proc cleanupConn(c: ConnManager, conn: Connection) {.async.} =
## clean connection's resources such as muxers and streams
if isNil(conn):
trace "Wont cleanup a nil connection"
return
# Remove connection from all tables without async breaks
var muxer = some(MuxerHolder())
if not c.muxed.pop(conn, muxer.get()):
muxer = none(MuxerHolder)
delConn(c, conn)
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
try:
trace "Triggering disconnect events", mux
let peerId = mux.connection.peerId
if muxer.isSome:
await closeMuxerHolder(muxer.get())
finally:
await conn.close()
let muxers = c.muxed.getOrDefault(peerId).filterIt(it != mux)
if muxers.len > 0:
c.muxed[peerId] = muxers
else:
c.muxed.del(peerId)
libp2p_peers.set(c.muxed.len.int64)
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
trace "Connection cleaned up", conn
if not (c.peerStore.isNil):
c.peerStore.cleanup(peerId)
proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} =
try:
trace "Triggering connect events", conn
conn.upgrade()
await c.triggerConnEvent(peerId, ConnEvent(kind: ConnEventKind.Disconnected))
let peerId = conn.peerId
await c.triggerPeerEvents(
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out))
await c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In))
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
warn "Unexpected exception in switch peer connection cleanup",
conn, msg = exc.msg
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
proc peerCleanup(c: ConnManager, conn: Connection) {.async.} =
try:
trace "Triggering disconnect events", conn
let peerId = conn.peerId
await c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
if not(c.peerStore.isNil):
c.peerStore.cleanup(peerId)
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler",
conn, msg = exc.msg
proc onClose(c: ConnManager, conn: Connection) {.async.} =
## connection close even handler
##
## triggers the connections resource cleanup
##
try:
await mux.connection.join()
trace "Connection closed, cleaning up", mux
await conn.join()
trace "Connection closed, cleaning up", conn
await c.cleanupConn(conn)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
debug "Unexpected cancellation in connection manager's cleanup", conn
except CatchableError as exc:
debug "Unexpected exception in connection manager's cleanup",
description = exc.msg, mux
errMsg = exc.msg, conn
finally:
await c.muxCleanup(mux)
trace "Triggering peerCleanup", conn
asyncSpawn c.peerCleanup(conn)
proc selectMuxer*(c: ConnManager, peerId: PeerId, dir: Direction): Muxer =
proc selectConn*(c: ConnManager,
peerId: PeerId,
dir: Direction): Connection =
## Select a connection for the provided peer and direction
##
let conns = toSeq(c.muxed.getOrDefault(peerId)).filterIt(it.connection.dir == dir)
let conns = toSeq(
c.conns.getOrDefault(peerId))
.filterIt( it.dir == dir )
if conns.len > 0:
return conns[0]
proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
proc selectConn*(c: ConnManager, peerId: PeerId): Connection =
## Select a connection for the provided giving priority
## to outgoing connections
##
var mux = c.selectMuxer(peerId, Direction.Out)
if isNil(mux):
mux = c.selectMuxer(peerId, Direction.In)
if isNil(mux):
var conn = c.selectConn(peerId, Direction.Out)
if isNil(conn):
conn = c.selectConn(peerId, Direction.In)
if isNil(conn):
trace "connection not found", peerId
return mux
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
## store the connection and muxer
return conn
proc selectMuxer*(c: ConnManager, conn: Connection): Muxer =
## select the muxer for the provided connection
##
if isNil(muxer):
raise newException(LPError, "muxer cannot be nil")
if isNil(conn):
return
if isNil(muxer.connection):
raise newException(LPError, "muxer's connection cannot be nil")
if conn in c.muxed:
return c.muxed.getOrDefault(conn).muxer
else:
debug "no muxer for connection", conn
if muxer.connection.closed or muxer.connection.atEof:
proc storeConn*(c: ConnManager, conn: Connection)
{.raises: [Defect, LPError].} =
## store a connection
##
if isNil(conn):
raise newException(LPError, "Connection cannot be nil")
if conn.closed or conn.atEof:
raise newException(LPError, "Connection closed or EOF")
let
peerId = muxer.connection.peerId
dir = muxer.connection.dir
let peerId = conn.peerId
# we use getOrDefault in the if below instead of [] to avoid the KeyError
if c.muxed.getOrDefault(peerId).len > c.maxConnsPerPeer:
let key = (peerId, dir)
let expectedConn = c.expectedConnectionsOverLimit.getOrDefault(key)
if expectedConn != nil and not expectedConn.finished:
expectedConn.complete(muxer)
else:
debug "Too many connections for peer",
conns = c.muxed.getOrDefault(peerId).len, peerId, dir
if peerId in c.expectedConnections and
not(c.expectedConnections.getOrDefault(peerId).finished):
c.expectedConnections.getOrDefault(peerId).complete(conn)
elif c.conns.getOrDefault(peerId).len > c.maxConnsPerPeer:
debug "Too many connections for peer",
conn, conns = c.conns.getOrDefault(peerId).len
raise newTooManyConnectionsError()
raise newTooManyConnectionsError()
var newPeer = false
c.muxed.withValue(peerId, muxers):
doAssert muxers[].len > 0
doAssert muxer notin muxers[]
muxers[].add(muxer)
do:
c.muxed[peerId] = @[muxer]
newPeer = true
libp2p_peers.set(c.muxed.len.int64)
c.conns.mgetOrPut(peerId, HashSet[Connection]()).incl(conn)
libp2p_peers.set(c.conns.len.int64)
asyncSpawn c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)
)
# Launch on close listener
# All the errors are handled inside `onClose()` procedure.
asyncSpawn c.onClose(conn)
if newPeer:
asyncSpawn c.triggerPeerEvents(
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out)
)
trace "Stored connection",
conn, direction = $conn.dir, connections = c.conns.len
asyncSpawn c.onClose(muxer)
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(
c: ConnManager
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
proc getOutgoingSlot*(
c: ConnManager, forceDial = false
): ConnectionSlot {.raises: [TooManyConnectionsError].} =
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
if forceDial:
c.outSema.forceAcquire()
elif not c.outSema.tryAcquire():
trace "Too many outgoing connections!",
available = c.outSema.count, max = c.outSema.size
trace "Too many outgoing connections!", count = c.outSema.count,
max = c.outSema.size
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
return if dir == In: c.inSema else: c.outSema
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
return semaphore(c, dir).count
case dir:
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
proc release*(cs: ConnectionSlot) =
semaphore(cs.connManager, cs.direction).release()
if cs.direction == In:
cs.connManager.inSema.release()
else:
cs.connManager.outSema.release()
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
if isNil(conn):
cs.release()
return
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
proc semaphoreMonitor() {.async.} =
try:
await conn.join()
except CatchableError as exc:
trace "Exception in semaphore monitor, ignoring", description = exc.msg
trace "Exception in semaphore monitor, ignoring", exc = exc.msg
cs.release()
asyncSpawn semaphoreMonitor()
proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
if isNil(mux):
cs.release()
return
cs.trackConnection(mux.connection)
proc getStream*(
c: ConnManager, muxer: Muxer
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed muxer
proc storeMuxer*(c: ConnManager,
muxer: Muxer,
handle: Future[void] = nil)
{.raises: [Defect, CatchableError].} =
## store the connection and muxer
##
if not (isNil(muxer)):
if isNil(muxer):
raise newException(CatchableError, "muxer cannot be nil")
if isNil(muxer.connection):
raise newException(CatchableError, "muxer's connection cannot be nil")
if muxer.connection notin c:
raise newException(CatchableError, "cant add muxer for untracked connection")
c.muxed[muxer.connection] = MuxerHolder(
muxer: muxer,
handle: handle)
trace "Stored muxer",
muxer, handle = not handle.isNil, connections = c.conns.len
asyncSpawn c.onConnUpgraded(muxer.connection)
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the provided peer
## with the given direction
##
let muxer = c.selectMuxer(c.selectConn(peerId, dir))
if not(isNil(muxer)):
return await muxer.newStream()
proc getStream*(
c: ConnManager, peerId: PeerId
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed peer from any connection
##
return await c.getStream(c.selectMuxer(peerId))
let muxer = c.selectMuxer(c.selectConn(peerId))
if not(isNil(muxer)):
return await muxer.newStream()
proc getStream*(
c: ConnManager, peerId: PeerId, dir: Direction
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from a connection with `dir`
proc getStream*(c: ConnManager,
conn: Connection): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed connection
##
return await c.getStream(c.selectMuxer(peerId, dir))
let muxer = c.selectMuxer(conn)
if not(isNil(muxer)):
return await muxer.newStream()
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId
let muxers = c.muxed.getOrDefault(peerId)
let conns = c.conns.getOrDefault(peerId)
for conn in conns:
trace "Removing connection", conn
delConn(c, conn)
var muxers: seq[MuxerHolder]
for conn in conns:
if conn in c.muxed:
muxers.add c.muxed[conn]
c.muxed.del(conn)
for muxer in muxers:
await closeMuxer(muxer)
await closeMuxerHolder(muxer)
for conn in conns:
await conn.close()
trace "Dropped peer", peerId
trace "Peer dropped", peerId
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
proc close*(c: ConnManager) {.async.} =
## cleanup resources for the connection
## manager
##
trace "Closing ConnManager"
let conns = c.conns
c.conns.clear()
let muxed = c.muxed
c.muxed.clear()
let expected = c.expectedConnectionsOverLimit
c.expectedConnectionsOverLimit.clear()
let expected = c.expectedConnections
c.expectedConnections.clear()
for _, fut in expected:
await fut.cancelAndWait()
for _, muxers in muxed:
for mux in muxers:
await closeMuxer(mux)
for _, muxer in muxed:
await closeMuxerHolder(muxer)
for _, conns2 in conns:
for conn in conns2:
await conn.close()
trace "Closed ConnManager"

View File

@@ -15,11 +15,14 @@
# RFC @ https://tools.ietf.org/html/rfc7539
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import bearssl/blockx
from stew/assign2 import assign
from stew/ptrops import baseAddr
from stew/ranges/ptr_arith import baseAddr
const
ChaChaPolyKeySize = 32
@@ -48,19 +51,17 @@ proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag =
# this is reconciled at runtime
# we do this in the global scope / module init
proc encrypt*(
_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte],
) =
let ad =
if aad.len > 0:
unsafeAddr aad[0]
else:
nil
proc encrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeAddr aad[0]
else:
nil
poly1305CtmulRun(
unsafeAddr key[0],
@@ -71,23 +72,20 @@ proc encrypt*(
uint(aad.len),
baseAddr(tag),
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
cast[Chacha20Run](chacha20CtRun), #[encrypt]#
1.cint,
)
cast[Chacha20Run](chacha20CtRun),
#[encrypt]# 1.cint)
proc decrypt*(
_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte],
) =
let ad =
if aad.len > 0:
unsafeAddr aad[0]
else:
nil
proc decrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeAddr aad[0]
else:
nil
poly1305CtmulRun(
unsafeAddr key[0],
@@ -98,6 +96,5 @@ proc decrypt*(
uint(aad.len),
baseAddr(tag),
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
cast[Chacha20Run](chacha20CtRun), #[decrypt]#
0.cint,
)
cast[Chacha20Run](chacha20CtRun),
#[decrypt]# 0.cint)

View File

@@ -8,18 +8,21 @@
# those terms.
## This module implements Public Key and Private Key interface for libp2p.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
from strutils import split, strip, cmpIgnoreCase
import ../utils/sequninit
const libp2p_pki_schemes* {.strdefine.} = "rsa,ed25519,secp256k1,ecnist"
type PKScheme* = enum
RSA = 0
Ed25519
Secp256k1
ECDSA
type
PKScheme* = enum
RSA = 0,
Ed25519,
Secp256k1,
ECDSA
proc initSupportedSchemes(list: static string): set[PKScheme] =
var res: set[PKScheme]
@@ -65,19 +68,17 @@ when supported(PKScheme.Ed25519):
import ed25519/ed25519
when supported(PKScheme.Secp256k1):
import secp
when supported(PKScheme.ECDSA):
import ecnist
# These used to be declared in `crypto` itself
export ecnist.ephemeral, ecnist.ECDHEScheme
# We are still importing `ecnist` because, it is used for SECIO handshake,
# but it will be impossible to create ECNIST keys or import ECNIST keys.
import bearssl/rand, bearssl/hash as bhash
import ecnist, bearssl/rand, bearssl/hash as bhash
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import results
import stew/results
export results, utility
# This is workaround for Nim's `import` bug
@@ -85,9 +86,11 @@ export rijndael, twofish, sha2, hash, hmac, ncrutils, rand
type
DigestSheme* = enum
Sha256
Sha256,
Sha512
ECDHEScheme* = EcCurveKind
PublicKey* = object
case scheme*: PKScheme
of PKScheme.RSA:
@@ -148,16 +151,15 @@ type
data*: seq[byte]
CryptoError* = enum
KeyError
SigError
HashError
KeyError,
SigError,
HashError,
SchemeError
CryptoResult*[T] = Result[T, CryptoError]
template orError*(exp: untyped, err: untyped): untyped =
exp.mapErr do(_: auto) -> auto:
err
(exp.mapErr do (_: auto) -> auto: err)
proc newRng*(): ref HmacDrbgContext =
# You should only create one instance of the RNG per application / library
@@ -173,11 +175,13 @@ proc newRng*(): ref HmacDrbgContext =
return nil
rng
proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
if x.len == 0:
return
proc shuffle*[T](
rng: ref HmacDrbgContext,
x: var openArray[T]) =
var randValues = newSeqUninit[byte](len(x) * 2)
if x.len == 0: return
var randValues = newSeqUninitialized[byte](len(x) * 2)
hmacDrbgGenerate(rng[], randValues)
for i in countdown(x.high, 1):
@@ -186,12 +190,9 @@ proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
y = rand mod i
swap(x[i], x[y])
proc random*(
T: typedesc[PrivateKey],
scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize,
): CryptoResult[PrivateKey] =
proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
## Generate random private key for scheme ``scheme``.
##
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
@@ -199,7 +200,7 @@ proc random*(
case scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
ok(PrivateKey(scheme: scheme, rsakey: rsakey))
else:
err(SchemeError)
@@ -211,8 +212,7 @@ proc random*(
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let eckey =
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
ok(PrivateKey(scheme: scheme, eckey: eckey))
else:
err(SchemeError)
@@ -223,9 +223,8 @@ proc random*(
else:
err(SchemeError)
proc random*(
T: typedesc[PrivateKey], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
): CryptoResult[PrivateKey] =
proc random*(T: typedesc[PrivateKey], rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
## Generate random private key using default public-key cryptography scheme.
##
## Default public-key cryptography schemes are following order:
@@ -239,21 +238,17 @@ proc random*(
let skkey = SkPrivateKey.random(rng)
ok(PrivateKey(scheme: PKScheme.Secp256k1, skkey: skkey))
elif supported(PKScheme.RSA):
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
ok(PrivateKey(scheme: PKScheme.RSA, rsakey: rsakey))
elif supported(PKScheme.ECDSA):
let eckey =
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
ok(PrivateKey(scheme: PKScheme.ECDSA, eckey: eckey))
else:
err(SchemeError)
proc random*(
T: typedesc[KeyPair],
scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize,
): CryptoResult[KeyPair] =
proc random*(T: typedesc[KeyPair], scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
## Generate random key pair for scheme ``scheme``.
##
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
@@ -261,52 +256,39 @@ proc random*(
case scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
let pair = ?RsaKeyPair.random(rng, bits).orError(CryptoError.KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey),
)
)
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey)))
else:
err(SchemeError)
of PKScheme.Ed25519:
when supported(PKScheme.Ed25519):
let pair = EdKeyPair.random(rng)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey),
)
)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey)))
else:
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(CryptoError.KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey),
)
)
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey)))
else:
err(SchemeError)
of PKScheme.Secp256k1:
when supported(PKScheme.Secp256k1):
let pair = SkKeyPair.random(rng)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey),
)
)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey)))
else:
err(SchemeError)
proc random*(
T: typedesc[KeyPair], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
): CryptoResult[KeyPair] =
proc random*(T: typedesc[KeyPair], rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
## Generate random private pair of keys using default public-key cryptography
## scheme.
##
@@ -316,36 +298,24 @@ proc random*(
## So will be used first available (supported) method.
when supported(PKScheme.Ed25519):
let pair = EdKeyPair.random(rng)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey),
)
)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey)))
elif supported(PKScheme.Secp256k1):
let pair = SkKeyPair.random(rng)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey),
)
)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey)))
elif supported(PKScheme.RSA):
let pair = ?RsaKeyPair.random(rng, bits).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey),
)
)
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey)))
elif supported(PKScheme.ECDSA):
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey),
)
)
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey)))
else:
err(SchemeError)
@@ -366,7 +336,7 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let eckey = ?key.eckey.getPublicKey().orError(KeyError)
let eckey = ? key.eckey.getPublicKey().orError(KeyError)
ok(PublicKey(scheme: ECDSA, eckey: eckey))
else:
err(SchemeError)
@@ -377,9 +347,8 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
else:
err(SchemeError)
proc toRawBytes*(
key: PrivateKey | PublicKey, data: var openArray[byte]
): CryptoResult[int] =
proc toRawBytes*(key: PrivateKey | PublicKey,
data: var openArray[byte]): CryptoResult[int] =
## Serialize private key ``key`` (using scheme's own serialization) and store
## it to ``data``.
##
@@ -438,7 +407,7 @@ proc toBytes*(key: PrivateKey, data: var openArray[byte]): CryptoResult[int] =
## Returns number of bytes (octets) needed to store private key ``key``.
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ?key.getRawBytes())
msg.write(2, ? key.getRawBytes())
msg.finish()
var blen = len(msg.buffer)
if len(data) >= blen:
@@ -452,7 +421,7 @@ proc toBytes*(key: PublicKey, data: var openArray[byte]): CryptoResult[int] =
## Returns number of bytes (octets) needed to store public key ``key``.
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ?key.getRawBytes())
msg.write(2, ? key.getRawBytes())
msg.finish()
var blen = len(msg.buffer)
if len(data) >= blen and blen > 0:
@@ -472,7 +441,7 @@ proc getBytes*(key: PrivateKey): CryptoResult[seq[byte]] =
## serialization).
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ?key.getRawBytes())
msg.write(2, ? key.getRawBytes())
msg.finish()
ok(msg.buffer)
@@ -481,7 +450,7 @@ proc getBytes*(key: PublicKey): CryptoResult[seq[byte]] =
## serialization).
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ?key.getRawBytes())
msg.write(2, ? key.getRawBytes())
msg.finish()
ok(msg.buffer)
@@ -489,7 +458,7 @@ proc getBytes*(sig: Signature): seq[byte] =
## Return signature ``sig`` in binary form.
result = sig.data
template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]): bool =
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
## Initialize private key ``key`` from libp2p's protobuf serialized raw
## binary form.
##
@@ -502,7 +471,7 @@ template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]):
var pb = initProtoBuffer(@data)
let r1 = pb.getField(1, id)
let r2 = pb.getField(2, buffer)
if not (r1.get(false) and r2.get(false)):
if not(r1.isOk() and r1.get() and r2.isOk() and r2.get()):
false
else:
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
@@ -513,7 +482,7 @@ template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]):
var nkey = PrivateKey(scheme: scheme)
else:
var nkey = PublicKey(scheme: scheme)
case scheme
case scheme:
of PKScheme.RSA:
when supported(PKScheme.RSA):
if init(nkey.rsakey, buffer).isOk:
@@ -551,15 +520,6 @@ template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]):
else:
false
{.push warning[ProveField]: off.} # https://github.com/nim-lang/Nim/issues/22060
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
initImpl(key, data)
proc init*(key: var PublicKey, data: openArray[byte]): bool =
initImpl(key, data)
{.pop.}
proc init*(sig: var Signature, data: openArray[byte]): bool =
## Initialize signature ``sig`` from raw binary form.
##
@@ -568,7 +528,7 @@ proc init*(sig: var Signature, data: openArray[byte]): bool =
sig.data = @data
result = true
proc init*[T: PrivateKey | PublicKey](key: var T, data: string): bool =
proc init*[T: PrivateKey|PublicKey](key: var T, data: string): bool =
## Initialize private/public key ``key`` from libp2p's protobuf serialized
## hexadecimal string representation.
##
@@ -582,23 +542,26 @@ proc init*(sig: var Signature, data: string): bool =
## Returns ``true`` on success.
sig.init(ncrutils.fromHex(data))
proc init*(t: typedesc[PrivateKey], data: openArray[byte]): CryptoResult[PrivateKey] =
proc init*(t: typedesc[PrivateKey],
data: openArray[byte]): CryptoResult[PrivateKey] =
## Create new private key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
err(CryptoError.KeyError)
err(KeyError)
else:
ok(res)
proc init*(t: typedesc[PublicKey], data: openArray[byte]): CryptoResult[PublicKey] =
proc init*(t: typedesc[PublicKey],
data: openArray[byte]): CryptoResult[PublicKey] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
err(CryptoError.KeyError)
err(KeyError)
else:
ok(res)
proc init*(t: typedesc[Signature], data: openArray[byte]): CryptoResult[Signature] =
proc init*(t: typedesc[Signature],
data: openArray[byte]): CryptoResult[Signature] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@@ -614,28 +577,24 @@ proc init*(t: typedesc[PrivateKey], data: string): CryptoResult[PrivateKey] =
when supported(PKScheme.RSA):
proc init*(t: typedesc[PrivateKey], key: rsa.RsaPrivateKey): PrivateKey =
PrivateKey(scheme: RSA, rsakey: key)
proc init*(t: typedesc[PublicKey], key: rsa.RsaPublicKey): PublicKey =
PublicKey(scheme: RSA, rsakey: key)
when supported(PKScheme.Ed25519):
proc init*(t: typedesc[PrivateKey], key: EdPrivateKey): PrivateKey =
PrivateKey(scheme: Ed25519, edkey: key)
proc init*(t: typedesc[PublicKey], key: EdPublicKey): PublicKey =
PublicKey(scheme: Ed25519, edkey: key)
when supported(PKScheme.Secp256k1):
proc init*(t: typedesc[PrivateKey], key: SkPrivateKey): PrivateKey =
PrivateKey(scheme: Secp256k1, skkey: key)
proc init*(t: typedesc[PublicKey], key: SkPublicKey): PublicKey =
PublicKey(scheme: Secp256k1, skkey: key)
when supported(PKScheme.ECDSA):
proc init*(t: typedesc[PrivateKey], key: ecnist.EcPrivateKey): PrivateKey =
PrivateKey(scheme: ECDSA, eckey: key)
proc init*(t: typedesc[PublicKey], key: ecnist.EcPublicKey): PublicKey =
PublicKey(scheme: ECDSA, eckey: key)
@@ -704,9 +663,9 @@ proc `==`*(key1, key2: PrivateKey): bool =
else:
false
proc `$`*(key: PrivateKey | PublicKey): string =
proc `$`*(key: PrivateKey|PublicKey): string =
## Get string representation of private/public key ``key``.
case key.scheme
case key.scheme:
of PKScheme.RSA:
when supported(PKScheme.RSA):
$(key.rsakey)
@@ -728,9 +687,9 @@ proc `$`*(key: PrivateKey | PublicKey): string =
else:
"unsupported secp256k1 key"
func shortLog*(key: PrivateKey | PublicKey): string =
func shortLog*(key: PrivateKey|PublicKey): string =
## Get short string representation of private/public key ``key``.
case key.scheme
case key.scheme:
of PKScheme.RSA:
when supported(PKScheme.RSA):
($key.rsakey).shortLog
@@ -756,15 +715,16 @@ proc `$`*(sig: Signature): string =
## Get string representation of signature ``sig``.
result = ncrutils.toHex(sig.data)
proc sign*(key: PrivateKey, data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
proc sign*(key: PrivateKey,
data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
## Sign message ``data`` using private key ``key`` and return generated
## signature in raw binary form.
var res: Signature
case key.scheme
case key.scheme:
of PKScheme.RSA:
when supported(PKScheme.RSA):
let sig = ?key.rsakey.sign(data).orError(SigError)
res.data = ?sig.getBytes().orError(SigError)
let sig = ? key.rsakey.sign(data).orError(SigError)
res.data = ? sig.getBytes().orError(SigError)
ok(res)
else:
err(SchemeError)
@@ -777,8 +737,8 @@ proc sign*(key: PrivateKey, data: openArray[byte]): CryptoResult[Signature] {.gc
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let sig = ?key.eckey.sign(data).orError(SigError)
res.data = ?sig.getBytes().orError(SigError)
let sig = ? key.eckey.sign(data).orError(SigError)
res.data = ? sig.getBytes().orError(SigError)
ok(res)
else:
err(SchemeError)
@@ -793,7 +753,7 @@ proc sign*(key: PrivateKey, data: openArray[byte]): CryptoResult[Signature] {.gc
proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
## Verify signature ``sig`` using message ``message`` and public key ``key``.
## Return ``true`` if message signature is valid.
case key.scheme
case key.scheme:
of PKScheme.RSA:
when supported(PKScheme.RSA):
var signature: RsaSignature
@@ -831,12 +791,12 @@ proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
else:
false
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.} =
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.}=
var ctx: hmactype
var j = 0
# We need to strip leading zeros, because Go bigint serialization do it.
var offset = 0
for i in 0 ..< len(secret):
for i in 0..<len(secret):
if secret[i] != 0x00'u8:
break
inc(offset)
@@ -857,9 +817,8 @@ template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.} =
ctx.update(a.data)
a = ctx.finish()
proc stretchKeys*(
cipherType: string, hashType: string, sharedSecret: seq[byte]
): Secret =
proc stretchKeys*(cipherType: string, hashType: string,
sharedSecret: seq[byte]): Secret =
## Expand shared secret to cryptographic keys.
if cipherType == "AES-128":
result.ivsize = aes128.sizeBlock
@@ -874,7 +833,7 @@ proc stretchKeys*(
var seed = "key expansion"
result.macsize = 20
let length = result.ivsize + result.keysize + result.macsize
result.data = newSeqUninit[byte](2 * length)
result.data = newSeq[byte](2 * length)
if hashType == "SHA256":
makeSecret(result.data, HMAC[sha256], sharedSecret, seed)
@@ -885,57 +844,65 @@ template goffset*(secret, id, o: untyped): untyped =
id * (len(secret.data) shr 1) + o
template ivOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(
secret.data, goffset(secret, id, 0), goffset(secret, id, secret.ivsize - 1)
)
toOpenArray(secret.data, goffset(secret, id, 0),
goffset(secret, id, secret.ivsize - 1))
template keyOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(
secret.data,
goffset(secret, id, secret.ivsize),
goffset(secret, id, secret.ivsize + secret.keysize - 1),
)
toOpenArray(secret.data, goffset(secret, id, secret.ivsize),
goffset(secret, id, secret.ivsize + secret.keysize - 1))
template macOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(
secret.data,
goffset(secret, id, secret.ivsize + secret.keysize),
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1),
)
toOpenArray(secret.data, goffset(secret, id, secret.ivsize + secret.keysize),
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1))
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
## Get array of bytes with with initial vector.
result = newSeqUninit[byte](secret.ivsize)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
result = newSeq[byte](secret.ivsize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeqUninit[byte](secret.keysize)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
result = newSeq[byte](secret.keysize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
offset += secret.ivsize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeqUninit[byte](secret.macsize)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
result = newSeq[byte](secret.macsize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
offset += secret.ivsize + secret.keysize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
proc getOrder*(
remotePubkey, localNonce: openArray[byte], localPubkey, remoteNonce: openArray[byte]
): CryptoResult[int] =
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
## Compare values and calculate `order` parameter.
var ctx: sha256
ctx.init()
@@ -946,9 +913,9 @@ proc getOrder*(
ctx.update(localPubkey)
ctx.update(remoteNonce)
var digest2 = ctx.finish()
var mh1 = ?MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
var mh2 = ?MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
var res = 0
var mh1 = ? MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
var mh2 = ? MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
var res = 0;
for i in 0 ..< len(mh1.data.buffer):
res = int(mh1.data.buffer[i]) - int(mh2.data.buffer[i])
if res != 0:
@@ -979,45 +946,96 @@ proc selectBest*(order: int, p1, p2: string): string =
if felement == selement:
return felement
proc createProposal*(nonce, pubkey: openArray[byte],
exchanges, ciphers, hashes: string): seq[byte] =
## Create SecIO proposal message using random ``nonce``, local public key
## ``pubkey``, comma-delimieted list of supported exchange schemes
## ``exchanges``, comma-delimeted list of supported ciphers ``ciphers`` and
## comma-delimeted list of supported hashes ``hashes``.
var msg = initProtoBuffer({WithUint32BeLength})
msg.write(1, nonce)
msg.write(2, pubkey)
msg.write(3, exchanges)
msg.write(4, ciphers)
msg.write(5, hashes)
msg.finish()
msg.buffer
proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
exchanges, ciphers, hashes: var string): bool =
## Parse incoming proposal message and decode remote random nonce ``nonce``,
## remote public key ``pubkey``, comma-delimieted list of supported exchange
## schemes ``exchanges``, comma-delimeted list of supported ciphers
## ``ciphers`` and comma-delimeted list of supported hashes ``hashes``.
##
## Procedure returns ``true`` on success and ``false`` on error.
var pb = initProtoBuffer(message)
let r1 = pb.getField(1, nonce)
let r2 = pb.getField(2, pubkey)
let r3 = pb.getField(3, exchanges)
let r4 = pb.getField(4, ciphers)
let r5 = pb.getField(5, hashes)
r1.isOk() and r1.get() and r2.isOk() and r2.get() and
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
r5.isOk() and r5.get()
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
## signature of proposal blocks ``signature``.
var msg = initProtoBuffer({WithUint32BeLength})
msg.write(1, epubkey)
msg.write(2, signature)
msg.finish()
msg.buffer
proc decodeExchange*(message: seq[byte],
pubkey, signature: var seq[byte]): bool =
## Parse incoming exchange message and decode remote ephemeral public key
## ``pubkey`` and signature ``signature``.
##
## Procedure returns ``true`` on success and ``false`` on error.
var pb = initProtoBuffer(message)
let r1 = pb.getField(1, pubkey)
let r2 = pb.getField(2, signature)
r1.isOk() and r1.get() and r2.isOk() and r2.get()
## Serialization/Deserialization helpers
proc write*(
vb: var VBuffer, pubkey: PublicKey
) {.inline, raises: [ResultError[CryptoError]].} =
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
## Write PublicKey value ``pubkey`` to buffer ``vb``.
vb.writeSeq(pubkey.getBytes().tryGet())
proc write*(
vb: var VBuffer, seckey: PrivateKey
) {.inline, raises: [ResultError[CryptoError]].} =
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
## Write PrivateKey value ``seckey`` to buffer ``vb``.
vb.writeSeq(seckey.getBytes().tryGet())
proc write*(
vb: var VBuffer, sig: PrivateKey
) {.inline, raises: [ResultError[CryptoError]].} =
proc write*(vb: var VBuffer, sig: PrivateKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
## Write Signature value ``sig`` to buffer ``vb``.
vb.writeSeq(sig.getBytes().tryGet())
proc write*[T: PublicKey | PrivateKey](
pb: var ProtoBuffer, field: int, key: T
) {.inline, raises: [ResultError[CryptoError]].} =
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
key: T) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
write(pb, field, key.getBytes().tryGet())
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.inline, raises: [].} =
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
inline, raises: [Defect].} =
write(pb, field, sig.getBytes())
proc getField*[T: PublicKey | PrivateKey](
pb: ProtoBuffer, field: int, value: var T
): ProtoResult[bool] =
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
value: var T): ProtoResult[bool] =
## Deserialize public/private key from protobuf's message ``pb`` using field
## index ``field``.
##
## On success deserialized key will be stored in ``value``.
var buffer: seq[byte]
var key: T
let res = ?pb.getField(field, buffer)
if not (res):
let res = ? pb.getField(field, buffer)
if not(res):
ok(false)
else:
if key.init(buffer):
@@ -1026,15 +1044,16 @@ proc getField*[T: PublicKey | PrivateKey](
else:
err(ProtoError.IncorrectBlob)
proc getField*(pb: ProtoBuffer, field: int, value: var Signature): ProtoResult[bool] =
proc getField*(pb: ProtoBuffer, field: int,
value: var Signature): ProtoResult[bool] =
## Deserialize signature from protobuf's message ``pb`` using field index
## ``field``.
##
## On success deserialized signature will be stored in ``value``.
var buffer: seq[byte]
var sig: Signature
let res = ?pb.getField(field, buffer)
if not (res):
let res = ? pb.getField(field, buffer)
if not(res):
ok(false)
else:
if sig.init(buffer):

View File

@@ -15,14 +15,18 @@
# RFC @ https://tools.ietf.org/html/rfc7748
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import bearssl/[ec, rand]
import results
import bearssl/[ec, rand, hash]
import stew/results
from stew/assign2 import assign
export results
const Curve25519KeySize* = 32
const
Curve25519KeySize* = 32
type
Curve25519* = object
@@ -34,12 +38,12 @@ proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key =
assert s.len == Curve25519KeySize
assign(result, s)
proc getBytes*(key: Curve25519Key): seq[byte] =
@key
proc getBytes*(key: Curve25519Key): seq[byte] = @key
proc byteswap(buf: var Curve25519Key) {.inline.} =
for i in 0 ..< 16:
let x = buf[i]
for i in 0..<16:
let
x = buf[i]
buf[i] = buf[31 - i]
buf[31 - i] = x
@@ -47,25 +51,31 @@ proc mul*(_: type[Curve25519], point: var Curve25519Key, multiplier: Curve25519K
let defaultBrEc = ecGetDefault()
# multiplier needs to be big-endian
var multiplierBs = multiplier
var
multiplierBs = multiplier
multiplierBs.byteswap()
let res = defaultBrEc.mul(
addr point[0],
Curve25519KeySize,
addr multiplierBs[0],
Curve25519KeySize,
EC_curve25519,
)
let
res = defaultBrEc.mul(
addr point[0],
Curve25519KeySize,
addr multiplierBs[0],
Curve25519KeySize,
EC_curve25519)
assert res == 1
proc mulgen(_: type[Curve25519], dst: var Curve25519Key, point: Curve25519Key) =
let defaultBrEc = ecGetDefault()
var rpoint = point
var
rpoint = point
rpoint.byteswap()
let size =
defaultBrEc.mulgen(addr dst[0], addr rpoint[0], Curve25519KeySize, EC_curve25519)
let
size = defaultBrEc.mulgen(
addr dst[0],
addr rpoint[0],
Curve25519KeySize,
EC_curve25519)
assert size == Curve25519KeySize
@@ -76,8 +86,7 @@ proc random*(_: type[Curve25519Key], rng: var HmacDrbgContext): Curve25519Key =
var res: Curve25519Key
let defaultBrEc = ecGetDefault()
let len = ecKeygen(
PrngClassPointerConst(addr rng.vtable), defaultBrEc, nil, addr res[0], EC_curve25519
)
addr rng.vtable, defaultBrEc, nil, addr res[0], EC_curve25519)
# Per bearssl documentation, the keygen only fails if the curve is
# unrecognised -
doAssert len == Curve25519KeySize, "Could not generate curve"

View File

@@ -14,19 +14,17 @@
## BearSSL library <https://bearssl.org/>
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import bearssl/[ec, rand, hash]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/ctops
import results
import ../utils/sequninit
import ../utility
import stew/[results, ctops]
export results
const
@@ -60,22 +58,23 @@ type
buffer*: seq[byte]
EcCurveKind* = enum
Secp256r1 = EC_secp256r1
Secp384r1 = EC_secp384r1
Secp256r1 = EC_secp256r1,
Secp384r1 = EC_secp384r1,
Secp521r1 = EC_secp521r1
EcPKI* = EcPrivateKey | EcPublicKey | EcSignature
EcError* = enum
EcRngError
EcKeyGenError
EcPublicKeyError
EcKeyIncorrectError
EcRngError,
EcKeyGenError,
EcPublicKeyError,
EcKeyIncorrectError,
EcSignatureError
EcResult*[T] = Result[T, EcError]
const EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
const
EcSupportedCurvesCint* = {cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)}
proc `-`(x: uint32): uint32 {.inline.} =
result = (0xFFFF_FFFF'u32 - x) + 1'u32
@@ -89,7 +88,7 @@ proc CMP(x, y: uint32): int32 {.inline.} =
proc EQ0(x: int32): uint32 {.inline.} =
var q = cast[uint32](x)
result = not (q or -q) shr 31
result = not(q or -q) shr 31
proc NEQ(x, y: uint32): uint32 {.inline.} =
var q = cast[uint32](x xor y)
@@ -114,7 +113,7 @@ proc checkScalar(scalar: openArray[byte], curve: cint): uint32 =
for u in scalar:
z = z or u
if len(scalar) == int(orderlen):
for i in 0 ..< len(scalar):
for i in 0..<len(scalar):
c = c or (-(cast[int32](EQ0(c))) and CMP(scalar[i], order[i]))
else:
c = -1
@@ -127,7 +126,8 @@ proc checkPublic(key: openArray[byte], curve: cint): uint32 =
var impl = ecGetDefault()
var orderlen: uint = 0
discard impl.order(curve, orderlen)
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)), addr x[0], uint(len(x)), curve)
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)),
addr x[0], uint(len(x)), curve)
proc getOffset(pubkey: EcPublicKey): int {.inline.} =
let o = cast[uint](pubkey.key.q) - cast[uint](unsafeAddr pubkey.buffer[0])
@@ -145,15 +145,21 @@ proc getOffset(seckey: EcPrivateKey): int {.inline.} =
template getPublicKeyLength*(curve: EcCurveKind): int =
case curve
of Secp256r1: PubKey256Length
of Secp384r1: PubKey384Length
of Secp521r1: PubKey521Length
of Secp256r1:
PubKey256Length
of Secp384r1:
PubKey384Length
of Secp521r1:
PubKey521Length
template getPrivateKeyLength*(curve: EcCurveKind): int =
case curve
of Secp256r1: SecKey256Length
of Secp384r1: SecKey384Length
of Secp521r1: SecKey521Length
of Secp256r1:
SecKey256Length
of Secp384r1:
SecKey384Length
of Secp521r1:
SecKey521Length
proc copy*[T: EcPKI](dst: var T, src: T): bool =
## Copy EC `private key`, `public key` or `signature` ``src`` to ``dst``.
@@ -195,7 +201,7 @@ proc copy*[T: EcPKI](src: T): T {.inline.} =
if not copy(result, src):
raise newException(EcKeyIncorrectError, "Incorrect key or signature")
proc clear*[T: EcPKI | EcKeyPair](pki: var T) =
proc clear*[T: EcPKI|EcKeyPair](pki: var T) =
## Wipe and clear EC `private key`, `public key` or `signature` object.
doAssert(not isNil(pki))
when T is EcPrivateKey:
@@ -226,8 +232,8 @@ proc clear*[T: EcPKI | EcKeyPair](pki: var T) =
pki.pubkey.key.curve = 0
proc random*(
T: typedesc[EcPrivateKey], kind: EcCurveKind, rng: var HmacDrbgContext
): EcResult[EcPrivateKey] =
T: typedesc[EcPrivateKey], kind: EcCurveKind,
rng: var HmacDrbgContext): EcResult[EcPrivateKey] =
## Generate new random EC private key using BearSSL's HMAC-SHA256-DRBG
## algorithm.
##
@@ -235,13 +241,9 @@ proc random*(
## secp521r1).
var ecimp = ecGetDefault()
var res = new EcPrivateKey
if ecKeygen(
PrngClassPointerConst(addr rng.vtable),
ecimp,
addr res.key,
addr res.buffer[0],
safeConvert[cint](kind),
) == 0:
if ecKeygen(addr rng.vtable, ecimp,
addr res.key, addr res.buffer[0],
cast[cint](kind)) == 0:
err(EcKeyGenError)
else:
ok(res)
@@ -255,7 +257,8 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
if seckey.key.curve in EcSupportedCurvesCint:
var res = new EcPublicKey
assert res.buffer.len > getPublicKeyLength(cast[EcCurveKind](seckey.key.curve))
if ecComputePub(ecimp, addr res.key, addr res.buffer[0], unsafeAddr seckey.key) == 0:
if ecComputePub(ecimp, addr res.key,
addr res.buffer[0], unsafeAddr seckey.key) == 0:
err(EcKeyIncorrectError)
else:
ok(res)
@@ -263,23 +266,23 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
err(EcKeyIncorrectError)
proc random*(
T: typedesc[EcKeyPair], kind: EcCurveKind, rng: var HmacDrbgContext
): EcResult[T] =
T: typedesc[EcKeyPair], kind: EcCurveKind,
rng: var HmacDrbgContext): EcResult[T] =
## Generate new random EC private and public keypair using BearSSL's
## HMAC-SHA256-DRBG algorithm.
##
## ``kind`` elliptic curve kind of your choice (secp256r1, secp384r1 or
## secp521r1).
let
seckey = ?EcPrivateKey.random(kind, rng)
pubkey = ?seckey.getPublicKey()
seckey = ? EcPrivateKey.random(kind, rng)
pubkey = ? seckey.getPublicKey()
key = EcKeyPair(seckey: seckey, pubkey: pubkey)
ok(key)
proc `$`*(seckey: EcPrivateKey): string =
## Return string representation of EC private key.
if isNil(seckey) or seckey.key.curve == 0 or seckey.key.xlen == 0 or
len(seckey.buffer) == 0:
len(seckey.buffer) == 0:
result = "Empty or uninitialized ECNIST key"
else:
if seckey.key.curve notin EcSupportedCurvesCint:
@@ -295,7 +298,7 @@ proc `$`*(seckey: EcPrivateKey): string =
proc `$`*(pubkey: EcPublicKey): string =
## Return string representation of EC public key.
if isNil(pubkey) or pubkey.key.curve == 0 or pubkey.key.qlen == 0 or
len(pubkey.buffer) == 0:
len(pubkey.buffer) == 0:
result = "Empty or uninitialized ECNIST key"
else:
if pubkey.key.curve notin EcSupportedCurvesCint:
@@ -368,7 +371,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var offset, length: int
var pubkey = ?seckey.getPublicKey()
var pubkey = ? seckey.getPublicKey()
var b = Asn1Buffer.init()
var p = Asn1Composite.init(Asn1Tag.Sequence)
var c0 = Asn1Composite.init(0)
@@ -384,14 +387,16 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
if offset < 0:
return err(EcKeyIncorrectError)
length = int(pubkey.key.qlen)
c1.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
c1.write(Asn1Tag.BitString,
pubkey.buffer.toOpenArray(offset, offset + length - 1))
c1.finish()
offset = seckey.getOffset()
if offset < 0:
return err(EcKeyIncorrectError)
length = int(seckey.key.xlen)
p.write(1'u64)
p.write(Asn1Tag.OctetString, seckey.buffer.toOpenArray(offset, offset + length - 1))
p.write(Asn1Tag.OctetString,
seckey.buffer.toOpenArray(offset, offset + length - 1))
p.write(c0)
p.write(c1)
p.finish()
@@ -405,6 +410,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
else:
err(EcKeyIncorrectError)
proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
## to ``data``.
@@ -430,7 +436,8 @@ proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
if offset < 0:
return err(EcKeyIncorrectError)
let length = int(pubkey.key.qlen)
p.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
p.write(Asn1Tag.BitString,
pubkey.buffer.toOpenArray(offset, offset + length - 1))
p.finish()
b.write(p)
b.finish()
@@ -459,10 +466,10 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
if isNil(seckey):
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninit[byte](0)
let length = ?seckey.toBytes(res)
var res = newSeq[byte]()
let length = ? seckey.toBytes(res)
res.setLen(length)
discard ?seckey.toBytes(res)
discard ? seckey.toBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -472,10 +479,10 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
if isNil(pubkey):
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninit[byte](0)
let length = ?pubkey.toBytes(res)
var res = newSeq[byte]()
let length = ? pubkey.toBytes(res)
res.setLen(length)
discard ?pubkey.toBytes(res)
discard ? pubkey.toBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -484,10 +491,10 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
## Serialize EC signature ``sig`` to ASN.1 DER binary form and return it.
if isNil(sig):
return err(EcSignatureError)
var res = newSeqUninit[byte](0)
let length = ?sig.toBytes(res)
var res = newSeq[byte]()
let length = ? sig.toBytes(res)
res.setLen(length)
discard ?sig.toBytes(res)
discard ? sig.toBytes(res)
ok(res)
proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
@@ -495,10 +502,10 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
if isNil(seckey):
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninit[byte](0)
let length = ?seckey.toRawBytes(res)
var res = newSeq[byte]()
let length = ? seckey.toRawBytes(res)
res.setLen(length)
discard ?seckey.toRawBytes(res)
discard ? seckey.toRawBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -508,10 +515,10 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
if isNil(pubkey):
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeqUninit[byte](0)
let length = ?pubkey.toRawBytes(res)
var res = newSeq[byte]()
let length = ? pubkey.toRawBytes(res)
res.setLen(length)
discard ?pubkey.toRawBytes(res)
discard ? pubkey.toRawBytes(res)
return ok(res)
else:
return err(EcKeyIncorrectError)
@@ -520,10 +527,10 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
## Serialize EC signature ``sig`` to raw binary form and return it.
if isNil(sig):
return err(EcSignatureError)
var res = newSeqUninit[byte](0)
let length = ?sig.toBytes(res)
var res = newSeq[byte]()
let length = ? sig.toBytes(res)
res.setLen(length)
discard ?sig.toBytes(res)
discard ? sig.toBytes(res)
ok(res)
proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
@@ -543,10 +550,8 @@ proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
let op2 = pubkey2.getOffset()
if op1 == -1 or op2 == -1:
return false
return CT.isEqual(
pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1),
)
return CT.isEqual(pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1))
proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
## Returns ``true`` if both keys ``seckey1`` and ``seckey2`` are equal.
@@ -565,10 +570,8 @@ proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
let op2 = seckey2.getOffset()
if op1 == -1 or op2 == -1:
return false
return CT.isEqual(
seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1),
)
return CT.isEqual(seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1))
proc `==`*(a, b: EcSignature): bool =
## Return ``true`` if both signatures ``sig1`` and ``sig2`` are equal.
@@ -602,36 +605,36 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error
var ab = Asn1Buffer.init(data)
field = ?ab.read()
field = ? ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ?ib.read()
field = ? ib.read()
if field.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
if field.vint != 1'u64:
return err(Asn1Error.Incorrect)
raw = ?ib.read()
raw = ? ib.read()
if raw.kind != Asn1Tag.OctetString:
return err(Asn1Error.Incorrect)
oid = ?ib.read()
oid = ? ib.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
if oid == Asn1OidSecp256r1:
curve = safeConvert[cint](Secp256r1)
curve = cast[cint](Secp256r1)
elif oid == Asn1OidSecp384r1:
curve = safeConvert[cint](Secp384r1)
curve = cast[cint](Secp384r1)
elif oid == Asn1OidSecp521r1:
curve = safeConvert[cint](Secp521r1)
curve = cast[cint](Secp521r1)
else:
return err(Asn1Error.Incorrect)
@@ -655,19 +658,19 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
var ab = Asn1Buffer.init(data)
field = ?ab.read()
field = ? ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ?ib.read()
field = ? ib.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ob = field.getBuffer()
oid = ?ob.read()
oid = ? ob.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
@@ -675,21 +678,21 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
if oid != Asn1OidEcPublicKey:
return err(Asn1Error.Incorrect)
oid = ?ob.read()
oid = ? ob.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
if oid == Asn1OidSecp256r1:
curve = safeConvert[cint](Secp256r1)
curve = cast[cint](Secp256r1)
elif oid == Asn1OidSecp384r1:
curve = safeConvert[cint](Secp384r1)
curve = cast[cint](Secp384r1)
elif oid == Asn1OidSecp521r1:
curve = safeConvert[cint](Secp521r1)
curve = cast[cint](Secp521r1)
else:
return err(Asn1Error.Incorrect)
raw = ?ib.read()
raw = ? ib.read()
if raw.kind != Asn1Tag.BitString:
return err(Asn1Error.Incorrect)
@@ -715,14 +718,16 @@ proc init*(sig: var EcSignature, data: openArray[byte]): Result[void, Asn1Error]
else:
err(Asn1Error.Incorrect)
proc init*[T: EcPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
proc init*[T: EcPKI](sospk: var T,
data: string): Result[void, Asn1Error] {.inline.} =
## Initialize EC `private key`, `public key` or `signature` ``sospk`` from
## ASN.1 DER hexadecimal string representation ``data``.
##
## Procedure returns ``Asn1Status``.
sospk.init(ncrutils.fromHex(data))
proc init*(t: typedesc[EcPrivateKey], data: openArray[byte]): EcResult[EcPrivateKey] =
proc init*(t: typedesc[EcPrivateKey],
data: openArray[byte]): EcResult[EcPrivateKey] =
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPrivateKey
@@ -732,7 +737,8 @@ proc init*(t: typedesc[EcPrivateKey], data: openArray[byte]): EcResult[EcPrivate
else:
ok(key)
proc init*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
proc init*(t: typedesc[EcPublicKey],
data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPublicKey
@@ -742,7 +748,8 @@ proc init*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKe
else:
ok(key)
proc init*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
proc init*(t: typedesc[EcSignature],
data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var sig: EcSignature
@@ -767,13 +774,13 @@ proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool =
## Procedure returns ``true`` on success, ``false`` otherwise.
var curve: cint
if len(data) == SecKey256Length:
curve = safeConvert[cint](Secp256r1)
curve = cast[cint](Secp256r1)
result = true
elif len(data) == SecKey384Length:
curve = safeConvert[cint](Secp384r1)
curve = cast[cint](Secp384r1)
result = true
elif len(data) == SecKey521Length:
curve = safeConvert[cint](Secp521r1)
curve = cast[cint](Secp521r1)
result = true
if result:
result = false
@@ -798,13 +805,13 @@ proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool =
if len(data) > 0:
if data[0] == 0x04'u8:
if len(data) == PubKey256Length:
curve = safeConvert[cint](Secp256r1)
curve = cast[cint](Secp256r1)
result = true
elif len(data) == PubKey384Length:
curve = safeConvert[cint](Secp384r1)
curve = cast[cint](Secp384r1)
result = true
elif len(data) == PubKey521Length:
curve = safeConvert[cint](Secp521r1)
curve = cast[cint](Secp521r1)
result = true
if result:
result = false
@@ -825,7 +832,8 @@ proc initRaw*(sig: var EcSignature, data: openArray[byte]): bool =
##
## Procedure returns ``true`` on success, ``false`` otherwise.
let length = len(data)
if (length == Sig256Length) or (length == Sig384Length) or (length == Sig521Length):
if (length == Sig256Length) or (length == Sig384Length) or
(length == Sig521Length):
result = true
if result:
sig = new EcSignature
@@ -838,9 +846,8 @@ proc initRaw*[T: EcPKI](sospk: var T, data: string): bool {.inline.} =
## Procedure returns ``true`` on success, ``false`` otherwise.
result = sospk.initRaw(ncrutils.fromHex(data))
proc initRaw*(
t: typedesc[EcPrivateKey], data: openArray[byte]
): EcResult[EcPrivateKey] =
proc initRaw*(t: typedesc[EcPrivateKey],
data: openArray[byte]): EcResult[EcPrivateKey] =
## Initialize EC private key from raw binary representation ``data`` and
## return constructed object.
var res: EcPrivateKey
@@ -849,7 +856,8 @@ proc initRaw*(
else:
ok(res)
proc initRaw*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
proc initRaw*(t: typedesc[EcPublicKey],
data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from raw binary representation ``data`` and
## return constructed object.
var res: EcPublicKey
@@ -858,7 +866,8 @@ proc initRaw*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPubli
else:
ok(res)
proc initRaw*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
proc initRaw*(t: typedesc[EcSignature],
data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var res: EcSignature
@@ -885,19 +894,16 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey =
let poffset = key.getOffset()
let soffset = sec.getOffset()
if poffset >= 0 and soffset >= 0:
let res = impl.mul(
addr key.buffer[poffset],
key.key.qlen,
unsafeAddr sec.buffer[soffset],
sec.key.xlen,
key.key.curve,
)
let res = impl.mul(addr key.buffer[poffset],
key.key.qlen,
unsafeAddr sec.buffer[soffset],
sec.key.xlen,
key.key.curve)
if res != 0:
result = key
proc toSecret*(
pubkey: EcPublicKey, seckey: EcPrivateKey, data: var openArray[byte]
): int =
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
data: var openArray[byte]): int =
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
## remote public key ``pubkey`` and local private key ``seckey`` and store
## shared secret to ``data``.
@@ -930,12 +936,11 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
var data: array[Secret521Length, byte]
let res = toSecret(pubkey, seckey, data)
if res > 0:
result = newSeqUninit[byte](res)
result = newSeq[byte](res)
copyMem(addr result[0], addr data[0], res)
proc sign*[T: byte | char](
seckey: EcPrivateKey, message: openArray[T]
): EcResult[EcSignature] {.gcsafe.} =
proc sign*[T: byte|char](seckey: EcPrivateKey,
message: openArray[T]): EcResult[EcSignature] {.gcsafe.} =
## Get ECDSA signature of data ``message`` using private key ``seckey``.
if isNil(seckey):
return err(EcKeyIncorrectError)
@@ -944,7 +949,7 @@ proc sign*[T: byte | char](
var impl = ecGetDefault()
if seckey.key.curve in EcSupportedCurvesCint:
var sig = new EcSignature
sig.buffer = newSeqUninit[byte](256)
sig.buffer = newSeq[byte](256)
var kv = addr sha256Vtable
kv.init(addr hc.vtable)
if len(message) > 0:
@@ -952,8 +957,8 @@ proc sign*[T: byte | char](
else:
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
let res =
ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key, addr sig.buffer[0])
let res = ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key,
addr sig.buffer[0])
# Clear context with initial value
kv.init(addr hc.vtable)
if res != 0:
@@ -964,9 +969,8 @@ proc sign*[T: byte | char](
else:
err(EcKeyIncorrectError)
proc verify*[T: byte | char](
sig: EcSignature, message: openArray[T], pubkey: EcPublicKey
): bool {.inline.} =
proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
pubkey: EcPublicKey): bool {.inline.} =
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.
##
@@ -984,41 +988,9 @@ proc verify*[T: byte | char](
else:
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
let res = ecdsaI31VrfyAsn1(
impl,
addr hash[0],
uint(len(hash)),
unsafeAddr pubkey.key,
addr sig.buffer[0],
uint(len(sig.buffer)),
)
let res = ecdsaI31VrfyAsn1(impl, addr hash[0], uint(len(hash)),
unsafeAddr pubkey.key,
addr sig.buffer[0], uint(len(sig.buffer)))
# Clear context with initial value
kv.init(addr hc.vtable)
result = (res == 1)
type ECDHEScheme* = EcCurveKind
proc ephemeral*(scheme: ECDHEScheme, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ?EcKeyPair.random(Secp256r1, rng)
elif scheme == Secp384r1:
keypair = ?EcKeyPair.random(Secp384r1, rng)
elif scheme == Secp521r1:
keypair = ?EcKeyPair.random(Secp521r1, rng)
ok(keypair)
proc ephemeral*(scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,45 +9,28 @@
# https://tools.ietf.org/html/rfc5869
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import nimcrypto
import bearssl/[kdf, hash]
import bearssl/[kdf, rand, hash]
type HkdfResult*[len: static int] = array[len, byte]
proc hkdf*[T: sha256, len: static int](
_: type[T],
salt, ikm, info: openArray[byte],
outputs: var openArray[HkdfResult[len]],
) =
var ctx: HkdfContext
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openArray[byte]; outputs: var openArray[HkdfResult[len]]) =
var
ctx: HkdfContext
hkdfInit(
ctx,
addr sha256Vtable,
if salt.len > 0:
unsafeAddr salt[0]
else:
nil,
csize_t(salt.len),
)
ctx, addr sha256Vtable,
if salt.len > 0: unsafeAddr salt[0] else: nil, csize_t(salt.len))
hkdfInject(
ctx,
if ikm.len > 0:
unsafeAddr ikm[0]
else:
nil,
csize_t(ikm.len),
)
ctx, if ikm.len > 0: unsafeAddr ikm[0] else: nil, csize_t(ikm.len))
hkdfFlip(ctx)
for i in 0 .. outputs.high:
for i in 0..outputs.high:
discard hkdfProduce(
ctx,
if info.len > 0:
unsafeAddr info[0]
else:
nil,
csize_t(info.len),
addr outputs[i][0],
csize_t(outputs[i].len),
)
if info.len > 0: unsafeAddr info[0]
else: nil, csize_t(info.len),
addr outputs[i][0], csize_t(outputs[i].len))

View File

@@ -9,46 +9,47 @@
## This module implements minimal ASN.1 encoding/decoding primitives.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import stew/[endians2, ctops]
import results
import stew/[endians2, results, ctops]
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import ../utils/sequninit
type
Asn1Error* {.pure.} = enum
Overflow
Incomplete
Indefinite
Incorrect
NoSupport
Overflow,
Incomplete,
Indefinite,
Incorrect,
NoSupport,
Overrun
Asn1Result*[T] = Result[T, Asn1Error]
Asn1Class* {.pure.} = enum
Universal = 0x00
Universal = 0x00,
Application = 0x01
ContextSpecific = 0x02
Private = 0x03
Asn1Tag* {.pure.} = enum
## Protobuf's field types enum
NoSupport
Boolean
Integer
BitString
OctetString
Null
Oid
Sequence
NoSupport,
Boolean,
Integer,
BitString,
OctetString,
Null,
Oid,
Sequence,
Context
Asn1Buffer* = object of RootObj ## ASN.1's message representation object
Asn1Buffer* = object of RootObj
## ASN.1's message representation object
buffer*: seq[byte]
offset*: int
length*: int
@@ -74,23 +75,37 @@ type
idx*: int
const
Asn1OidSecp256r1* =
[0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8]
Asn1OidSecp256r1* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8
]
## Encoded OID for `secp256r1` curve (1.2.840.10045.3.1.7)
Asn1OidSecp384r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8]
Asn1OidSecp384r1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8
]
## Encoded OID for `secp384r1` curve (1.3.132.0.34)
Asn1OidSecp521r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8]
Asn1OidSecp521r1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8
]
## Encoded OID for `secp521r1` curve (1.3.132.0.35)
Asn1OidSecp256k1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8]
Asn1OidSecp256k1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8
]
## Encoded OID for `secp256k1` curve (1.3.132.0.10)
Asn1OidEcPublicKey* = [0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8]
Asn1OidEcPublicKey* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8
]
## Encoded OID for Elliptic Curve Public Key (1.2.840.10045.2.1)
Asn1OidRsaEncryption* =
[0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8, 0x01'u8, 0x01'u8]
Asn1OidRsaEncryption* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8,
0x01'u8, 0x01'u8
]
## Encoded OID for RSA Encryption (1.2.840.113549.1.1.1)
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8] ## Encoded boolean ``TRUE``.
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8] ## Encoded boolean ``FALSE``.
Asn1Null* = [0x05'u8, 0x00'u8] ## Encoded ``NULL`` value.
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8]
## Encoded boolean ``TRUE``.
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8]
## Encoded boolean ``FALSE``.
Asn1Null* = [0x05'u8, 0x00'u8]
## Encoded ``NULL`` value.
template toOpenArray*(ab: Asn1Buffer): untyped =
toOpenArray(ab.buffer, ab.offset, ab.buffer.high)
@@ -104,10 +119,10 @@ template toOpenArray*(af: Asn1Field): untyped =
template isEmpty*(ab: Asn1Buffer): bool =
ab.offset >= len(ab.buffer)
template isEnough*(ab: Asn1Buffer, length: int64): bool =
template isEnough*(ab: Asn1Buffer, length: int): bool =
len(ab.buffer) >= ab.offset + length
proc len*[T: Asn1Buffer | Asn1Composite](abc: T): int {.inline.} =
proc len*[T: Asn1Buffer|Asn1Composite](abc: T): int {.inline.} =
len(abc.buffer) - abc.offset
proc len*(field: Asn1Field): int {.inline.} =
@@ -116,22 +131,31 @@ proc len*(field: Asn1Field): int {.inline.} =
template getPtr*(field: untyped): pointer =
cast[pointer](unsafeAddr field.buffer[field.offset])
proc extend*[T: Asn1Buffer | Asn1Composite](abc: var T, length: int) {.inline.} =
proc extend*[T: Asn1Buffer|Asn1Composite](abc: var T, length: int) {.inline.} =
## Extend buffer or composite's internal buffer by ``length`` octets.
abc.buffer.setLen(len(abc.buffer) + length)
proc code*(tag: Asn1Tag): byte {.inline.} =
## Converts Nim ``tag`` enum to ASN.1 tag code.
case tag
of Asn1Tag.NoSupport: 0x00'u8
of Asn1Tag.Boolean: 0x01'u8
of Asn1Tag.Integer: 0x02'u8
of Asn1Tag.BitString: 0x03'u8
of Asn1Tag.OctetString: 0x04'u8
of Asn1Tag.Null: 0x05'u8
of Asn1Tag.Oid: 0x06'u8
of Asn1Tag.Sequence: 0x30'u8
of Asn1Tag.Context: 0xA0'u8
case tag:
of Asn1Tag.NoSupport:
0x00'u8
of Asn1Tag.Boolean:
0x01'u8
of Asn1Tag.Integer:
0x02'u8
of Asn1Tag.BitString:
0x03'u8
of Asn1Tag.OctetString:
0x04'u8
of Asn1Tag.Null:
0x05'u8
of Asn1Tag.Oid:
0x06'u8
of Asn1Tag.Sequence:
0x30'u8
of Asn1Tag.Context:
0xA0'u8
proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
## Encode ASN.1 DER length part of TLV triple and return number of bytes
@@ -160,7 +184,8 @@ proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
# then 9, so it is safe to convert it to `int`.
int(res)
proc asn1EncodeInteger*(dest: var openArray[byte], value: openArray[byte]): int =
proc asn1EncodeInteger*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
## and return number of bytes (octets) used.
##
@@ -170,16 +195,17 @@ proc asn1EncodeInteger*(dest: var openArray[byte], value: openArray[byte]): int
var buffer: array[16, byte]
var lenlen = 0
let offset = block:
var o = 0
for i in 0 ..< len(value):
if value[o] != 0x00:
break
inc(o)
if o < len(value):
o
else:
o - 1
let offset =
block:
var o = 0
for i in 0 ..< len(value):
if value[o] != 0x00:
break
inc(o)
if o < len(value):
o
else:
o - 1
let destlen =
if len(value) > 0:
@@ -201,10 +227,12 @@ proc asn1EncodeInteger*(dest: var openArray[byte], value: openArray[byte]): int
if value[offset] >= 0x80'u8:
dest[1 + lenlen] = 0x00'u8
shift = 2
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset], len(value) - offset)
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset],
len(value) - offset)
destlen
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
## bytes (octets) used.
##
@@ -239,7 +267,8 @@ proc asn1EncodeNull*(dest: var openArray[byte]): int =
dest[1] = 0x00'u8
res
proc asn1EncodeOctetString*(dest: var openArray[byte], value: openArray[byte]): int =
proc asn1EncodeOctetString*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
## bytes (octets) used.
##
@@ -256,9 +285,8 @@ proc asn1EncodeOctetString*(dest: var openArray[byte], value: openArray[byte]):
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeBitString*(
dest: var openArray[byte], value: openArray[byte], bits = 0
): int =
proc asn1EncodeBitString*(dest: var openArray[byte],
value: openArray[byte], bits = 0): int =
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
## (octets) used.
##
@@ -279,7 +307,7 @@ proc asn1EncodeBitString*(
let bytelen = (bitlen + 7) shr 3
# Number of unused bits
let unused = (8 - (bitlen and 7)) and 7
let mask = not ((1'u8 shl unused) - 1'u8)
let mask = not((1'u8 shl unused) - 1'u8)
var lenlen = asn1EncodeLength(buffer, uint64(bytelen + 1))
let res = 1 + lenlen + 1 + len(value)
if len(dest) >= res:
@@ -293,6 +321,55 @@ proc asn1EncodeBitString*(
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
dest[0] = cast[byte](value)
1
else:
var s = 0
var res = 0
while v != 0:
v = v shr 7
s += 7
inc(res)
if len(dest) >= res:
var k = 0
while s != 0:
s -= 7
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
inc(k)
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[int]): int =
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
## return number of bytes (octets) used.
##
## If length of ``dest`` is less then number of required bytes to encode
## ``value``, then result of encoding will not be stored in ``dest``
## but number of bytes (octets) required will be returned.
var buffer: array[16, byte]
var res = 1
var oidlen = 1
for i in 2..<len(value):
oidlen += asn1EncodeTag(buffer, cast[uint64](value[i]))
res += asn1EncodeLength(buffer, uint64(oidlen))
res += oidlen
if len(dest) >= res:
let last = dest.high
var offset = 1
dest[0] = Asn1Tag.Oid.code()
offset += asn1EncodeLength(dest.toOpenArray(offset, last), uint64(oidlen))
dest[offset] = cast[byte](value[0] * 40 + value[1])
offset += 1
for i in 2..<len(value):
offset += asn1EncodeTag(dest.toOpenArray(offset, last),
cast[uint64](value[i]))
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -312,7 +389,8 @@ proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeSequence*(dest: var openArray[byte], value: openArray[byte]): int =
proc asn1EncodeSequence*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
## (octets) used.
##
@@ -328,7 +406,8 @@ proc asn1EncodeSequence*(dest: var openArray[byte], value: openArray[byte]): int
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeComposite*(dest: var openArray[byte], value: Asn1Composite): int =
proc asn1EncodeComposite*(dest: var openArray[byte],
value: Asn1Composite): int =
## Encode composite value and return number of bytes (octets) used.
##
## If length of ``dest`` is less then number of required bytes to encode
@@ -340,12 +419,12 @@ proc asn1EncodeComposite*(dest: var openArray[byte], value: Asn1Composite): int
if len(dest) >= res:
dest[0] = value.tag.code()
copyMem(addr dest[1], addr buffer[0], lenlen)
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0], len(value.buffer))
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0],
len(value.buffer))
res
proc asn1EncodeContextTag*(
dest: var openArray[byte], value: openArray[byte], tag: int
): int =
proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
tag: int): int =
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
## return number of bytes (octets) used.
##
@@ -364,29 +443,26 @@ proc asn1EncodeContextTag*(
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc getLength(ab: var Asn1Buffer): Asn1Result[int] =
proc getLength(ab: var Asn1Buffer): Asn1Result[uint64] =
## Decode length part of ASN.1 TLV triplet.
if not ab.isEmpty():
let b = ab.buffer[ab.offset]
if (b and 0x80'u8) == 0x00'u8:
let length = safeConvert[int](b)
let length = cast[uint64](b)
ab.offset += 1
return ok(length)
if b == 0x80'u8:
return err(Asn1Error.Indefinite)
if b == 0xFF'u8:
return err(Asn1Error.Incorrect)
let octets = safeConvert[int](b and 0x7F'u8)
if octets > 8:
let octets = cast[uint64](b and 0x7F'u8)
if octets > 8'u64:
return err(Asn1Error.Overflow)
if ab.isEnough(octets):
var lengthU: uint64 = 0
for i in 0 ..< octets:
lengthU = (lengthU shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i + 1])
if lengthU > uint64(int64.high):
return err(Asn1Error.Overflow)
let length = int(lengthU)
ab.offset = ab.offset + octets + 1
if ab.isEnough(int(octets)):
var length: uint64 = 0
for i in 0..<int(octets):
length = (length shl 8) or cast[uint64](ab.buffer[ab.offset + i + 1])
ab.offset = ab.offset + int(octets) + 1
return ok(length)
else:
return err(Asn1Error.Incomplete)
@@ -398,8 +474,8 @@ proc getTag(ab: var Asn1Buffer, tag: var int): Asn1Result[Asn1Class] =
if not ab.isEmpty():
let
b = ab.buffer[ab.offset]
c = safeConvert[int]((b and 0xC0'u8) shr 6)
tag = safeConvert[int](b and 0x3F)
c = int((b and 0xC0'u8) shr 6)
tag = int(b and 0x3F)
ab.offset += 1
if c >= 0 and c < 4:
ok(cast[Asn1Class](c))
@@ -413,14 +489,14 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
var
field: Asn1Field
tag, ttag, offset: int
length, tlength: int
length, tlength: uint64
aclass: Asn1Class
inclass: bool
inclass = false
while true:
offset = ab.offset
aclass = ?ab.getTag(tag)
aclass = ? ab.getTag(tag)
case aclass
of Asn1Class.ContextSpecific:
@@ -429,9 +505,9 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
else:
inclass = true
ttag = tag
tlength = ?ab.getLength()
tlength = ? ab.getLength()
of Asn1Class.Universal:
length = ?ab.getLength()
length = ? ab.getLength()
if inclass:
if length >= tlength:
@@ -443,35 +519,31 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if length != 1:
return err(Asn1Error.Incorrect)
if not ab.isEnough(length):
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
let b = ab.buffer[ab.offset]
if b != 0xFF'u8 and b != 0x00'u8:
return err(Asn1Error.Incorrect)
return err(Asn1Error.Incorrect)
field = Asn1Field(
kind: Asn1Tag.Boolean,
klass: aclass,
index: ttag,
offset: ab.offset,
length: 1,
buffer: ab.buffer,
)
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
index: ttag, offset: int(ab.offset),
length: 1, buffer: ab.buffer)
field.vbool = (b == 0xFF'u8)
ab.offset += 1
return ok(field)
of Asn1Tag.Integer.code():
# INTEGER
if length == 0:
return err(Asn1Error.Incorrect)
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
# Count number of leading zeroes
var zc = 0
while (zc < length) and (ab.buffer[ab.offset + zc] == 0x00'u8):
while (zc < int(length)) and (ab.buffer[ab.offset + zc] == 0x00'u8):
inc(zc)
if zc > 1:
@@ -479,87 +551,68 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if zc == 0:
# Negative or Positive integer
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
# Negative integer
if length <= 8:
# We need this transformation because our field.vint is uint64.
for i in 0 ..< 8:
if i < 8 - length:
if i < 8 - int(length):
field.vint = (field.vint shl 8) or 0xFF'u64
else:
let offset = ab.offset + i - (8 - length)
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
let offset = ab.offset + i - (8 - int(length))
field.vint = (field.vint shl 8) or uint64(ab.buffer[offset])
else:
# Positive integer
if length <= 8:
for i in 0 ..< length:
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
for i in 0 ..< int(length):
field.vint = (field.vint shl 8) or
uint64(ab.buffer[ab.offset + i])
ab.offset += int(length)
return ok(field)
else:
if length == 1:
# Zero value integer
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
vint: 0'u64,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), vint: 0'u64,
buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
else:
# Positive integer with leading zero
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: length - 1,
buffer: ab.buffer,
)
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset) + 1,
length: int(length) - 1, buffer: ab.buffer)
if length <= 9:
for i in 1 ..< length:
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
for i in 1 ..< int(length):
field.vint = (field.vint shl 8) or
uint64(ab.buffer[ab.offset + i])
ab.offset += int(length)
return ok(field)
of Asn1Tag.BitString.code():
# BIT STRING
if length == 0:
# BIT STRING should include `unused` bits field, so length should be
# bigger then 1.
return err(Asn1Error.Incorrect)
elif length == 1:
if ab.buffer[ab.offset] != 0x00'u8:
return err(Asn1Error.Incorrect)
else:
# Zero-length BIT STRING.
field = Asn1Field(
kind: Asn1Tag.BitString,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: 0,
ubits: 0,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
length: 0, ubits: 0, buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
else:
if not ab.isEnough(length):
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
let unused = ab.buffer[ab.offset]
@@ -567,84 +620,66 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
# Number of unused bits should not be bigger then `7`.
return err(Asn1Error.Incorrect)
let mask = (1'u8 shl safeConvert[int](unused)) - 1'u8
if (ab.buffer[ab.offset + length - 1] and mask) != 0x00'u8:
let mask = (1'u8 shl int(unused)) - 1'u8
if (ab.buffer[ab.offset + int(length) - 1] and mask) != 0x00'u8:
## All unused bits should be set to `0`.
return err(Asn1Error.Incorrect)
field = Asn1Field(
kind: Asn1Tag.BitString,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: length - 1,
ubits: safeConvert[int](unused),
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
length: int(length - 1), ubits: int(unused),
buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
of Asn1Tag.OctetString.code():
# OCTET STRING
if not ab.isEnough(length):
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
field = Asn1Field(
kind: Asn1Tag.OctetString,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
of Asn1Tag.Null.code():
# NULL
if length != 0:
return err(Asn1Error.Incorrect)
field = Asn1Field(
kind: Asn1Tag.Null,
klass: aclass,
index: ttag,
offset: ab.offset,
length: 0,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
offset: int(ab.offset), length: 0, buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
of Asn1Tag.Oid.code():
# OID
if not ab.isEnough(length):
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
field = Asn1Field(
kind: Asn1Tag.Oid,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
of Asn1Tag.Sequence.code():
# SEQUENCE
if not ab.isEnough(length):
if not ab.isEnough(int(length)):
return err(Asn1Error.Incomplete)
field = Asn1Field(
kind: Asn1Tag.Sequence,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
else:
return err(Asn1Error.NoSupport)
inclass = false
ttag = 0
else:
return err(Asn1Error.NoSupport)
@@ -662,9 +697,9 @@ proc `==`*(field: Asn1Field, data: openArray[byte]): bool =
if length > 0:
if field.length == len(data):
CT.isEqual(
field.buffer.toOpenArray(field.offset, field.offset + field.length - 1),
data.toOpenArray(0, field.length - 1),
)
field.buffer.toOpenArray(field.offset,
field.offset + field.length - 1),
data.toOpenArray(0, field.length - 1))
else:
false
else:
@@ -680,15 +715,15 @@ proc init*(t: typedesc[Asn1Buffer], data: string): Asn1Buffer =
proc init*(t: typedesc[Asn1Buffer]): Asn1Buffer =
## Initialize empty ``Asn1Buffer``.
Asn1Buffer(buffer: newSeqUninit[byte](0))
Asn1Buffer(buffer: newSeq[byte]())
proc init*(t: typedesc[Asn1Composite], tag: Asn1Tag): Asn1Composite =
## Initialize ``Asn1Composite`` with tag ``tag``.
Asn1Composite(tag: tag, buffer: newSeqUninit[byte](0))
Asn1Composite(tag: tag, buffer: newSeq[byte]())
proc init*(t: typedesc[Asn1Composite], idx: int): Asn1Composite =
## Initialize ``Asn1Composite`` with tag context-specific id ``id``.
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeqUninit[byte](0))
Asn1Composite(tag: Asn1Tag.Context, idx: idx, buffer: newSeq[byte]())
proc `$`*(buffer: Asn1Buffer): string =
## Return string representation of ``buffer``.
@@ -742,14 +777,13 @@ proc `$`*(field: Asn1Field): string =
res.add(ncrutils.toHex(field.toOpenArray()))
res
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, tag: Asn1Tag) =
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag) =
## Write empty value to buffer or composite with ``tag``.
##
## This procedure must be used to write `NULL`, `0` or empty `BIT STRING`,
## `OCTET STRING` types.
doAssert(
tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString, Asn1Tag.OctetString}
)
doAssert(tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString,
Asn1Tag.OctetString})
var length: int
if tag == Asn1Tag.Null:
length = asn1EncodeNull(abc.toOpenArray())
@@ -771,23 +805,22 @@ proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, tag: Asn1Tag) =
discard asn1EncodeOctetString(abc.toOpenArray(), tmp.toOpenArray(0, -1))
abc.offset += length
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: uint64) =
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: uint64) =
## Write uint64 ``value`` to buffer or composite as ASN.1 `INTEGER`.
let length = asn1EncodeInteger(abc.toOpenArray(), value)
abc.extend(length)
discard asn1EncodeInteger(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: bool) =
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: bool) =
## Write bool ``value`` to buffer or composite as ASN.1 `BOOLEAN`.
let length = asn1EncodeBoolean(abc.toOpenArray(), value)
abc.extend(length)
discard asn1EncodeBoolean(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer | Asn1Composite](
abc: var T, tag: Asn1Tag, value: openArray[byte], bits = 0
) =
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
value: openArray[byte], bits = 0) =
## Write array ``value`` using ``tag``.
##
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,
@@ -795,9 +828,8 @@ proc write*[T: Asn1Buffer | Asn1Composite](
##
## For `BIT STRING` you can use ``bits`` argument to specify number of used
## bits.
doAssert(
tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString, Asn1Tag.Oid}
)
doAssert(tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString,
Asn1Tag.Oid})
var length: int
if tag == Asn1Tag.Integer:
length = asn1EncodeInteger(abc.toOpenArray(), value)
@@ -817,7 +849,7 @@ proc write*[T: Asn1Buffer | Asn1Composite](
discard asn1EncodeOid(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: Asn1Composite) =
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: Asn1Composite) =
doAssert(len(value) > 0, "Composite value not finished")
var length: int
if value.tag == Asn1Tag.Sequence:
@@ -834,6 +866,6 @@ proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: Asn1Composite) =
discard asn1EncodeContextTag(abc.toOpenArray(), value.buffer, value.idx)
abc.offset += length
proc finish*[T: Asn1Buffer | Asn1Composite](abc: var T) {.inline.} =
proc finish*[T: Asn1Buffer|Asn1Composite](abc: var T) {.inline.} =
## Finishes buffer or composite and prepares it for writing.
abc.offset = 0

View File

@@ -13,15 +13,16 @@
## BearSSL library <https://bearssl.org/>
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import bearssl/[rsa, rand, hash]
import minasn1
import results
import stew/ctops
import stew/[results, ctops]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utils/sequninit
export Asn1Error, results
@@ -32,17 +33,32 @@ const
MinKeySize* = 2048
## Minimal allowed RSA key size in bits.
## https://github.com/libp2p/go-libp2p-core/blob/master/crypto/rsa_common.go#L13
DefaultKeySize* = 3072 ## Default RSA key size in bits.
DefaultKeySize* = 3072
## Default RSA key size in bits.
RsaOidSha1* = [byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A]
RsaOidSha1* = [
byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A
]
## RSA PKCS#1.5 SHA-1 hash object identifier.
RsaOidSha224* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04]
RsaOidSha224* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x04
]
## RSA PKCS#1.5 SHA-224 hash object identifier.
RsaOidSha256* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
RsaOidSha256* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x01
]
## RSA PKCS#1.5 SHA-256 hash object identifier.
RsaOidSha384* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02]
RsaOidSha384* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x02
]
## RSA PKCS#1.5 SHA-384 hash object identifier.
RsaOidSha512* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
RsaOidSha512* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x03
]
## RSA PKCS#1.5 SHA-512 hash object identifier.
type
@@ -66,9 +82,9 @@ type
RsaKP* = RsaPrivateKey | RsaKeyPair
RsaError* = enum
RsaGenError
RsaKeyIncorrectError
RsaSignatureError
RsaGenError,
RsaKeyIncorrectError,
RsaSignatureError,
RsaLowSecurityError
RsaResult*[T] = Result[T, RsaError]
@@ -96,18 +112,15 @@ template getArray*(bs, os, ls: untyped): untyped =
template trimZeroes(b: seq[byte], pt, ptlen: untyped) =
var length = ptlen
for i in 0 ..< length:
for i in 0..<length:
if pt[] != byte(0x00):
break
pt = cast[ptr byte](cast[uint](pt) + 1)
ptlen -= 1
proc random*[T: RsaKP](
t: typedesc[T],
rng: var HmacDrbgContext,
bits = DefaultKeySize,
pubexp = DefaultPublicExponent,
): RsaResult[T] =
proc random*[T: RsaKP](t: typedesc[T], rng: var HmacDrbgContext,
bits = DefaultKeySize,
pubexp = DefaultPublicExponent): RsaResult[T] =
## Generate new random RSA private key using BearSSL's HMAC-SHA256-DRBG
## algorithm.
##
@@ -125,19 +138,14 @@ proc random*[T: RsaKP](
length = eko + ((bits + 7) shr 3)
let res = new T
res.buffer = newSeqUninit[byte](length)
res.buffer = newSeq[byte](length)
var keygen = rsaKeygenGetDefault()
if keygen(
addr rng.vtable,
addr res.seck,
addr res.buffer[sko],
addr res.pubk,
addr res.buffer[pko],
cuint(bits),
pubexp,
) == 0:
if keygen(addr rng.vtable,
addr res.seck, addr res.buffer[sko],
addr res.pubk, addr res.buffer[pko],
cuint(bits), pubexp) == 0:
return err(RsaGenError)
let
@@ -165,12 +173,11 @@ proc copy*[T: RsaPKI](key: T): T =
doAssert(not isNil(key))
when T is RsaPrivateKey:
if len(key.buffer) > 0:
let length =
key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
key.pubk.elen.uint + key.pexplen.uint
let length = key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
key.pubk.elen.uint + key.pexplen.uint
result = new RsaPrivateKey
result.buffer = newSeqUninit[byte](length)
result.buffer = newSeq[byte](length)
let po: uint = 0
let qo = po + key.seck.plen
let dpo = qo + key.seck.qlen
@@ -208,7 +215,7 @@ proc copy*[T: RsaPKI](key: T): T =
if len(key.buffer) > 0:
let length = key.key.nlen + key.key.elen
result = new RsaPublicKey
result.buffer = newSeqUninit[byte](length)
result.buffer = newSeq[byte](length)
let no = 0
let eo = no + key.key.nlen
copyMem(addr result.buffer[no], key.key.n, key.key.nlen)
@@ -227,11 +234,12 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
doAssert(not isNil(key))
let length = key.pubk.nlen + key.pubk.elen
result = new RsaPublicKey
result.buffer = newSeqUninit[byte](length)
result.buffer = newSeq[byte](length)
result.key.n = addr result.buffer[0]
result.key.e = addr result.buffer[key.pubk.nlen]
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e), key.pubk.elen)
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e),
key.pubk.elen)
result.key.nlen = key.pubk.nlen
result.key.elen = key.pubk.elen
@@ -243,7 +251,7 @@ proc pubkey*(pair: RsaKeyPair): RsaPublicKey {.inline.} =
## Get RSA public key from pair ``pair``.
result = cast[RsaPrivateKey](pair).getPublicKey()
proc clear*[T: RsaPKI | RsaKeyPair](pki: var T) =
proc clear*[T: RsaPKI|RsaKeyPair](pki: var T) =
## Wipe and clear EC private key, public key or scalar object.
doAssert(not isNil(pki))
when T is RsaPrivateKey:
@@ -287,14 +295,21 @@ proc toBytes*(key: RsaPrivateKey, data: var openArray[byte]): RsaResult[int] =
var b = Asn1Buffer.init()
var p = Asn1Composite.init(Asn1Tag.Sequence)
p.write(0'u64)
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n, key.pubk.nlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e, key.pubk.elen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n,
key.pubk.nlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e,
key.pubk.elen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pexp, key.pexplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p, key.seck.plen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q, key.seck.qlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp, key.seck.dplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq, key.seck.dqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq, key.seck.iqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p,
key.seck.plen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q,
key.seck.qlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp,
key.seck.dplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq,
key.seck.dqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq,
key.seck.iqlen))
p.finish()
b.write(p)
b.finish()
@@ -358,8 +373,8 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
## return it.
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeqUninit[byte](4096)
let length = ?key.toBytes(res)
var res = newSeq[byte](4096)
let length = ? key.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -371,8 +386,8 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
## return it.
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeqUninit[byte](4096)
let length = ?key.toBytes(res)
var res = newSeq[byte](4096)
let length = ? key.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -383,8 +398,8 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
## Serialize RSA signature ``sig`` to raw binary form and return it.
if isNil(sig):
return err(RsaSignatureError)
var res = newSeqUninit[byte](4096)
let length = ?sig.toBytes(res)
var res = newSeq[byte](4096)
let length = ? sig.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -396,19 +411,20 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
## ``data``.
##
## Procedure returns ``Asn1Status``.
var field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
var
field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
# Asn1Field is not trivial so avoid too much Result
var ab = Asn1Buffer.init(data)
field = ?ab.read()
field = ? ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ?ib.read()
field = ? ib.read()
if field.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
@@ -416,48 +432,48 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
if field.vint != 0'u64:
return err(Asn1Error.Incorrect)
rawn = ?ib.read()
rawn = ? ib.read()
if rawn.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawpube = ?ib.read()
rawpube = ? ib.read()
if rawpube.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawprie = ?ib.read()
rawprie = ? ib.read()
if rawprie.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawp = ?ib.read()
rawp = ? ib.read()
if rawp.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawq = ?ib.read()
rawq = ? ib.read()
if rawq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawdp = ?ib.read()
rawdp = ? ib.read()
if rawdp.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawdq = ?ib.read()
rawdq = ? ib.read()
if rawdq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawiq = ?ib.read()
rawiq = ? ib.read()
if rawiq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
if len(rawn) >= (MinKeySize shr 3) and len(rawp) > 0 and len(rawq) > 0 and
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
key = new RsaPrivateKey
key.buffer = @data
key.pubk.n = addr key.buffer[rawn.offset]
@@ -489,52 +505,52 @@ proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error
var field, rawn, rawe: Asn1Field
var ab = Asn1Buffer.init(data)
field = ?ab.read()
field = ? ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ?ib.read()
field = ? ib.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ob = field.getBuffer()
field = ?ob.read()
field = ? ob.read()
if field.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
elif field != Asn1OidRsaEncryption:
return err(Asn1Error.Incorrect)
field = ?ob.read()
field = ? ob.read()
if field.kind != Asn1Tag.Null:
return err(Asn1Error.Incorrect)
field = ?ib.read()
field = ? ib.read()
if field.kind != Asn1Tag.BitString:
return err(Asn1Error.Incorrect)
var vb = field.getBuffer()
field = ?vb.read()
field = ? vb.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var sb = field.getBuffer()
rawn = ?sb.read()
rawn = ? sb.read()
if rawn.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawe = ?sb.read()
rawe = ? sb.read()
if rawe.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
@@ -562,16 +578,16 @@ proc init*(sig: var RsaSignature, data: openArray[byte]): Result[void, Asn1Error
else:
err(Asn1Error.Incorrect)
proc init*[T: RsaPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
proc init*[T: RsaPKI](sospk: var T,
data: string): Result[void, Asn1Error] {.inline.} =
## Initialize EC `private key`, `public key` or `scalar` ``sospk`` from
## hexadecimal string representation ``data``.
##
## Procedure returns ``Result[void, Asn1Status]``.
sospk.init(ncrutils.fromHex(data))
proc init*(
t: typedesc[RsaPrivateKey], data: openArray[byte]
): RsaResult[RsaPrivateKey] =
proc init*(t: typedesc[RsaPrivateKey],
data: openArray[byte]): RsaResult[RsaPrivateKey] =
## Initialize RSA private key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPrivateKey
@@ -580,7 +596,8 @@ proc init*(
else:
ok(res)
proc init*(t: typedesc[RsaPublicKey], data: openArray[byte]): RsaResult[RsaPublicKey] =
proc init*(t: typedesc[RsaPublicKey],
data: openArray[byte]): RsaResult[RsaPublicKey] =
## Initialize RSA public key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPublicKey
@@ -589,7 +606,8 @@ proc init*(t: typedesc[RsaPublicKey], data: openArray[byte]): RsaResult[RsaPubli
else:
ok(res)
proc init*(t: typedesc[RsaSignature], data: openArray[byte]): RsaResult[RsaSignature] =
proc init*(t: typedesc[RsaSignature],
data: openArray[byte]): RsaResult[RsaSignature] =
## Initialize RSA signature from raw binary representation ``data`` and
## return constructed object.
var res: RsaSignature
@@ -616,11 +634,14 @@ proc `$`*(key: RsaPrivateKey): string =
result.add("\nq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.q, key.seck.qlen)))
result.add("\ndp = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp, key.seck.dplen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp,
key.seck.dplen)))
result.add("\ndq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq, key.seck.dqlen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq,
key.seck.dqlen)))
result.add("\niq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq, key.seck.iqlen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq,
key.seck.iqlen)))
result.add("\npre = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.pexp, key.pexplen)))
result.add("\nm = ")
@@ -665,38 +686,23 @@ proc `==`*(a, b: RsaPrivateKey): bool =
false
else:
if a.seck.nBitlen == b.seck.nBitlen:
if a.seck.nBitlen > 0'u:
let r1 = CT.isEqual(
getArray(a.buffer, a.seck.p, a.seck.plen),
getArray(b.buffer, b.seck.p, b.seck.plen),
)
let r2 = CT.isEqual(
getArray(a.buffer, a.seck.q, a.seck.qlen),
getArray(b.buffer, b.seck.q, b.seck.qlen),
)
let r3 = CT.isEqual(
getArray(a.buffer, a.seck.dp, a.seck.dplen),
getArray(b.buffer, b.seck.dp, b.seck.dplen),
)
let r4 = CT.isEqual(
getArray(a.buffer, a.seck.dq, a.seck.dqlen),
getArray(b.buffer, b.seck.dq, b.seck.dqlen),
)
let r5 = CT.isEqual(
getArray(a.buffer, a.seck.iq, a.seck.iqlen),
getArray(b.buffer, b.seck.iq, b.seck.iqlen),
)
let r6 = CT.isEqual(
getArray(a.buffer, a.pexp, a.pexplen), getArray(b.buffer, b.pexp, b.pexplen)
)
let r7 = CT.isEqual(
getArray(a.buffer, a.pubk.n, a.pubk.nlen),
getArray(b.buffer, b.pubk.n, b.pubk.nlen),
)
let r8 = CT.isEqual(
getArray(a.buffer, a.pubk.e, a.pubk.elen),
getArray(b.buffer, b.pubk.e, b.pubk.elen),
)
if cast[int](a.seck.nBitlen) > 0:
let r1 = CT.isEqual(getArray(a.buffer, a.seck.p, a.seck.plen),
getArray(b.buffer, b.seck.p, b.seck.plen))
let r2 = CT.isEqual(getArray(a.buffer, a.seck.q, a.seck.qlen),
getArray(b.buffer, b.seck.q, b.seck.qlen))
let r3 = CT.isEqual(getArray(a.buffer, a.seck.dp, a.seck.dplen),
getArray(b.buffer, b.seck.dp, b.seck.dplen))
let r4 = CT.isEqual(getArray(a.buffer, a.seck.dq, a.seck.dqlen),
getArray(b.buffer, b.seck.dq, b.seck.dqlen))
let r5 = CT.isEqual(getArray(a.buffer, a.seck.iq, a.seck.iqlen),
getArray(b.buffer, b.seck.iq, b.seck.iqlen))
let r6 = CT.isEqual(getArray(a.buffer, a.pexp, a.pexplen),
getArray(b.buffer, b.pexp, b.pexplen))
let r7 = CT.isEqual(getArray(a.buffer, a.pubk.n, a.pubk.nlen),
getArray(b.buffer, b.pubk.n, b.pubk.nlen))
let r8 = CT.isEqual(getArray(a.buffer, a.pubk.e, a.pubk.elen),
getArray(b.buffer, b.pubk.e, b.pubk.elen))
r1 and r2 and r3 and r4 and r5 and r6 and r7 and r8
else:
true
@@ -734,17 +740,14 @@ proc `==`*(a, b: RsaPublicKey): bool =
elif isNil(b) and (not isNil(a)):
false
else:
let r1 = CT.isEqual(
getArray(a.buffer, a.key.n, a.key.nlen), getArray(b.buffer, b.key.n, b.key.nlen)
)
let r2 = CT.isEqual(
getArray(a.buffer, a.key.e, a.key.elen), getArray(b.buffer, b.key.e, b.key.elen)
)
let r1 = CT.isEqual(getArray(a.buffer, a.key.n, a.key.nlen),
getArray(b.buffer, b.key.n, b.key.nlen))
let r2 = CT.isEqual(getArray(a.buffer, a.key.e, a.key.elen),
getArray(b.buffer, b.key.e, b.key.elen))
(r1 and r2)
proc sign*[T: byte | char](
key: RsaPrivateKey, message: openArray[T]
): RsaResult[RsaSignature] {.gcsafe.} =
proc sign*[T: byte|char](key: RsaPrivateKey,
message: openArray[T]): RsaResult[RsaSignature] {.gcsafe.} =
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
## key ``key``.
if isNil(key):
@@ -754,7 +757,7 @@ proc sign*[T: byte | char](
var hash: array[32, byte]
let impl = rsaPkcs1SignGetDefault()
var res = new RsaSignature
res.buffer = newSeqUninit[byte]((key.seck.nBitlen + 7) shr 3)
res.buffer = newSeq[byte]((key.seck.nBitlen + 7) shr 3)
var kv = addr sha256Vtable
kv.init(addr hc.vtable)
if len(message) > 0:
@@ -763,16 +766,16 @@ proc sign*[T: byte | char](
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
var oid = RsaOidSha256
let implRes =
impl(addr oid[0], addr hash[0], uint(len(hash)), addr key.seck, addr res.buffer[0])
let implRes = impl(addr oid[0],
addr hash[0], uint(len(hash)),
addr key.seck, addr res.buffer[0])
if implRes == 0:
err(RsaSignatureError)
else:
ok(res)
proc verify*[T: byte | char](
sig: RsaSignature, message: openArray[T], pubkey: RsaPublicKey
): bool {.inline.} =
proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
pubkey: RsaPublicKey): bool {.inline.} =
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.
##
@@ -792,13 +795,8 @@ proc verify*[T: byte | char](
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
var oid = RsaOidSha256
let res = impl(
addr sig.buffer[0],
uint(len(sig.buffer)),
addr oid[0],
uint(len(check)),
addr pubkey.key,
addr check[0],
)
let res = impl(addr sig.buffer[0], uint(len(sig.buffer)),
addr oid[0],
uint(len(check)), addr pubkey.key, addr check[0])
if res == 1:
result = equalMem(addr check[0], addr hash[0], len(hash))

View File

@@ -7,19 +7,26 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import bearssl/rand
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
import ../utils/sequninit
import
secp256k1,
stew/[byteutils, results],
nimcrypto/[hash, sha2]
export sha2, results, rand
const
SkRawPrivateKeySize* = 256 div 8 ## Size of private key in octets (bytes)
SkRawPrivateKeySize* = 256 div 8
## Size of private key in octets (bytes)
SkRawSignatureSize* = SkRawPrivateKeySize * 2 + 1
## Size of signature in octets (bytes)
SkRawPublicKeySize* = SkRawPrivateKeySize + 1 ## Size of public key in octets (bytes)
SkRawPublicKeySize* = SkRawPrivateKeySize + 1
## Size of public key in octets (bytes)
# This is extremely confusing but it's to avoid.. confusion between Eth standard and Secp standard
type
@@ -28,6 +35,9 @@ type
SkSignature* = distinct secp256k1.SkSignature
SkKeyPair* = distinct secp256k1.SkKeyPair
template pubkey*(v: SkKeyPair): SkPublicKey = SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
template seckey*(v: SkKeyPair): SkPrivateKey = SkPrivateKey(secp256k1.SkKeyPair(v).seckey)
proc random*(t: typedesc[SkPrivateKey], rng: var HmacDrbgContext): SkPrivateKey =
#TODO is there a better way?
var rngPtr = addr rng
@@ -52,31 +62,31 @@ template pubkey*(v: SkKeyPair): SkPublicKey =
proc init*(key: var SkPrivateKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `private key` ``key`` from raw binary
## representation ``data``.
key = SkPrivateKey(?secp256k1.SkSecretKey.fromRaw(data))
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
ok()
proc init*(key: var SkPrivateKey, data: string): SkResult[void] =
## Initialize Secp256k1 `private key` ``key`` from hexadecimal string
## representation ``data``.
key = SkPrivateKey(?secp256k1.SkSecretKey.fromHex(data))
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
ok()
proc init*(key: var SkPublicKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `public key` ``key`` from raw binary
## representation ``data``.
key = SkPublicKey(?secp256k1.SkPublicKey.fromRaw(data))
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
ok()
proc init*(key: var SkPublicKey, data: string): SkResult[void] =
## Initialize Secp256k1 `public key` ``key`` from hexadecimal string
## representation ``data``.
key = SkPublicKey(?secp256k1.SkPublicKey.fromHex(data))
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
ok()
proc init*(sig: var SkSignature, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `signature` ``sig`` from raw binary
## representation ``data``.
sig = SkSignature(?secp256k1.SkSignature.fromDer(data))
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
ok()
proc init*(sig: var SkSignature, data: string): SkResult[void] =
@@ -86,9 +96,8 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
var buffer: seq[byte]
try:
buffer = hexToSeqByte(data)
except ValueError as e:
let errMsg = "secp: Hex to bytes failed: " & e.msg
return err(errMsg.cstring)
except ValueError:
return err("secp: Hex to bytes failed")
init(sig, buffer)
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
@@ -148,7 +157,7 @@ proc toBytes*(key: SkPrivateKey, data: var openArray[byte]): SkResult[int] =
## Procedure returns number of bytes (octets) needed to store
## Secp256k1 private key.
if len(data) >= SkRawPrivateKeySize:
data[0 ..< SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
data[0..<SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
ok(SkRawPrivateKeySize)
else:
err("secp: Not enough bytes")
@@ -160,7 +169,7 @@ proc toBytes*(key: SkPublicKey, data: var openArray[byte]): SkResult[int] =
## Procedure returns number of bytes (octets) needed to store
## Secp256k1 public key.
if len(data) >= SkRawPublicKeySize:
data[0 ..< SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
data[0..<SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
ok(SkRawPublicKeySize)
else:
err("secp: Not enough bytes")
@@ -183,32 +192,26 @@ proc getBytes*(key: SkPublicKey): seq[byte] {.inline.} =
proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
## Serialize Secp256k1 `signature` and return it.
result = newSeqUninit[byte](72)
result = newSeq[byte](72)
let length = toBytes(sig, result)
result.setLen(length)
proc sign*[T: byte | char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
proc sign*[T: byte|char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
## Sign message `msg` using private key `key` and return signature object.
let h = sha256.digest(msg)
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
proc verify*[T: byte | char](
sig: SkSignature, msg: openArray[T], key: SkPublicKey
): bool =
proc verify*[T: byte|char](sig: SkSignature, msg: openArray[T],
key: SkPublicKey): bool =
let h = sha256.digest(msg)
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
func clear*(key: var SkPrivateKey) =
clear(secp256k1.SkSecretKey(key))
func clear*(key: var SkPrivateKey) = clear(secp256k1.SkSecretKey(key))
func `$`*(key: SkPrivateKey): string =
$secp256k1.SkSecretKey(key)
func `$`*(key: SkPublicKey): string =
$secp256k1.SkPublicKey(key)
func `$`*(key: SkSignature): string =
$secp256k1.SkSignature(key)
func `$`*(key: SkKeyPair): string =
$secp256k1.SkKeyPair(key)
func `$`*(key: SkPrivateKey): string = $secp256k1.SkSecretKey(key)
func `$`*(key: SkPublicKey): string = $secp256k1.SkPublicKey(key)
func `$`*(key: SkSignature): string = $secp256k1.SkSignature(key)
func `$`*(key: SkKeyPair): string = $secp256k1.SkKeyPair(key)
func `==`*(a, b: SkPrivateKey): bool =
secp256k1.SkSecretKey(a) == secp256k1.SkSecretKey(b)

File diff suppressed because it is too large Load Diff

View File

@@ -7,29 +7,31 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos
const DefaultPoolSize* = 8 ## Default pool size
const
DefaultPoolSize* = 8
## Default pool size
type
ConnectionFlags = enum
None
Busy
None, Busy
PoolItem = object
transp*: StreamTransport
flags*: set[ConnectionFlags]
PoolState = enum
Connecting
Connected
Closing
Closed
Connecting, Connected, Closing, Closed
TransportPool* = ref object ## Transports pool object
TransportPool* = ref object
## Transports pool object
transports: seq[PoolItem]
busyCount: int
state: PoolState
@@ -46,16 +48,13 @@ proc waitAll[T](futs: seq[Future[T]]): Future[void] =
dec(counter)
if counter == 0:
retFuture.complete()
for fut in futs:
fut.addCallback(cb)
return retFuture
proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
@@ -63,12 +62,12 @@ proc newPool*(
pool.transports = newSeq[PoolItem](poolsize)
var conns = newSeq[Future[StreamTransport]](poolsize)
pool.state = Connecting
for i in 0 ..< poolsize:
for i in 0..<poolsize:
conns[i] = connect(address, bufferSize)
# Waiting for all connections to be established.
await waitAll(conns)
# Checking connections and preparing pool.
for i in 0 ..< poolsize:
for i in 0..<poolsize:
if conns[i].failed:
raise conns[i].error
else:
@@ -80,9 +79,7 @@ proc newPool*(
pool.state = Connected
result = pool
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
@@ -104,9 +101,7 @@ proc acquire*(
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
proc release*(pool: TransportPool, transp: StreamTransport) =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
@@ -122,9 +117,7 @@ proc release*(
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
proc join*(pool: TransportPool) {.async.} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
@@ -136,9 +129,7 @@ proc join*(
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
proc close*(pool: TransportPool) {.async.} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing
@@ -146,7 +137,7 @@ proc close*(
await pool.join()
# Closing all transports
var pending = newSeq[Future[void]](len(pool.transports))
for i in 0 ..< len(pool.transports):
for i in 0..<len(pool.transports):
let transp = pool.transports[i].transp
transp.close()
pending[i] = transp.join()

View File

@@ -25,40 +25,34 @@
## 5. LocalAddress: optional bytes
## 6. RemoteAddress: optional bytes
## 7. Message: required bytes
import os
import os, options
import nimcrypto/utils, stew/endians2
import
protobuf/minprotobuf,
stream/connection,
protocols/secure/secure,
multiaddress,
peerid,
varint,
muxers/mplex/coder
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
multiaddress, peerid, varint, muxers/mplex/coder
from times import
getTime, toUnix, fromUnix, nanosecond, format, Time, NanosecondRange, initTime
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
NanosecondRange, initTime
from strutils import toHex, repeat
export peerid, multiaddress
export peerid, options, multiaddress
type
FlowDirection* = enum
Outgoing
Incoming
Outgoing, Incoming
ProtoMessage* = object
timestamp*: uint64
direction*: FlowDirection
message*: seq[byte]
seqID*: Opt[uint64]
mtype*: Opt[uint64]
local*: Opt[MultiAddress]
remote*: Opt[MultiAddress]
seqID*: Option[uint64]
mtype*: Option[uint64]
local*: Option[MultiAddress]
remote*: Option[MultiAddress]
const libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
## default directory where all the dumps will be stored, if the path
## relative it will be created in home directory. You can overload this path
## using ``-d:libp2p_dump_dir=<otherpath>``.
const
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
## default directory where all the dumps will be stored, if the path
## relative it will be created in home directory. You can overload this path
## using ``-d:libp2p_dump_dir=<otherpath>``.
proc getTimestamp(): uint64 =
## This procedure is present because `stdlib.times` missing it.
@@ -71,14 +65,14 @@ proc getTimedate(value: uint64): string =
let time = initTime(int64(value div 1_000_000_000), value mod 1_000_000_000)
time.format("yyyy-MM-dd HH:mm:ss'.'fffzzz")
proc dumpMessage*(conn: SecureConn, direction: FlowDirection, data: openArray[byte]) =
proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
data: openArray[byte]) =
## Store unencrypted message ``data`` to dump file, all the metadata will be
## extracted from ``conn`` instance.
var pb = initProtoBuffer(options = {WithVarintLength})
pb.write(2, getTimestamp())
pb.write(4, uint64(direction))
conn.observedAddr.withValue(oaddr):
pb.write(6, oaddr)
pb.write(6, conn.observedAddr)
pb.write(7, data)
pb.finish()
@@ -92,7 +86,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection, data: openArray[by
# This is debugging procedure so it should not generate any exceptions,
# and we going to return at every possible OS error.
if not (dirExists(dirName)):
if not(dirExists(dirName)):
try:
createDir(dirName)
except CatchableError:
@@ -106,7 +100,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection, data: openArray[by
finally:
close(handle)
proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
## Decode protobuf's message ProtoMessage from array of bytes ``data``.
var
pb = initProtoBuffer(data)
@@ -114,12 +108,13 @@ proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
ma1, ma2: MultiAddress
pmsg: ProtoMessage
let
r2 = pb.getField(2, pmsg.timestamp)
r4 = pb.getField(4, value)
r7 = pb.getField(7, pmsg.message)
if not r2.get(false) or not r4.get(false) or not r7.get(false):
return Opt.none(ProtoMessage)
let res2 = pb.getField(2, pmsg.timestamp)
if res2.isErr() or not(res2.get()):
return none[ProtoMessage]()
let res4 = pb.getField(4, value)
if res4.isErr() or not(res4.get()):
return none[ProtoMessage]()
# `case` statement could not work here with an error "selector must be of an
# ordinal type, float or string"
@@ -129,27 +124,30 @@ proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
elif value == uint64(Incoming):
Incoming
else:
return Opt.none(ProtoMessage)
return none[ProtoMessage]()
let r1 = pb.getField(1, value)
if r1.get(false):
pmsg.seqID = Opt.some(value)
let res7 = pb.getField(7, pmsg.message)
if res7.isErr() or not(res7.get()):
return none[ProtoMessage]()
let r3 = pb.getField(3, value)
if r3.get(false):
pmsg.mtype = Opt.some(value)
value = 0'u64
let res1 = pb.getField(1, value)
if res1.isOk() and res1.get():
pmsg.seqID = some(value)
value = 0'u64
let res3 = pb.getField(3, value)
if res3.isOk() and res3.get():
pmsg.mtype = some(value)
let res5 = pb.getField(5, ma1)
if res5.isOk() and res5.get():
pmsg.local = some(ma1)
let res6 = pb.getField(6, ma2)
if res6.isOk() and res6.get():
pmsg.remote = some(ma2)
let
r5 = pb.getField(5, ma1)
r6 = pb.getField(6, ma2)
if r5.get(false):
pmsg.local = Opt.some(ma1)
if r6.get(false):
pmsg.remote = Opt.some(ma2)
some(pmsg)
Opt.some(pmsg)
iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
iterator messages*(data: seq[byte]): Option[ProtoMessage] =
## Iterate over sequence of bytes and decode all the ``ProtoMessage``
## messages we found.
var value: uint64
@@ -158,11 +156,13 @@ iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
while offset < len(data):
value = 0
size = 0
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1), size, value)
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1),
size, value)
if res.isOk():
if (value > 0'u64) and (value < uint64(len(data) - offset)):
offset += size
yield decodeDumpMessage(data.toOpenArray(offset, offset + int(value) - 1))
yield decodeDumpMessage(data.toOpenArray(offset,
offset + int(value) - 1))
# value is previously checked to be less then len(data) which is `int`.
offset += int(value)
else:
@@ -182,15 +182,10 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
for k in 0 ..< groupBy:
let ch = pbytes[offset + k]
ascii.add(
if ord(ch) > 31 and ord(ch) < 127:
char(ch)
else:
'.'
)
ascii.add(if ord(ch) > 31 and ord(ch) < 127: char(ch) else: '.')
let item =
case groupBy
case groupBy:
of 1:
toHex(pbytes[offset])
of 2:
@@ -212,7 +207,8 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
res.add("\p")
if (offset mod 16) != 0:
let spacesCount = ((16 - (offset mod 16)) div groupBy) * (groupBy * 2 + 1) + 1
let spacesCount = ((16 - (offset mod 16)) div groupBy) *
(groupBy * 2 + 1) + 1
res = res & repeat(' ', spacesCount)
res = res & ascii
@@ -240,28 +236,31 @@ proc toString*(msg: ProtoMessage, dump = true): string =
var res = getTimedate(msg.timestamp)
let direction =
case msg.direction
of Incoming: " << "
of Outgoing: " >> "
let address = block:
let local = block:
msg.local.withValue(loc):
"[" & $loc & "]"
else:
"[LOCAL]"
let remote = block:
msg.remote.withValue(rem):
"[" & $rem & "]"
else:
"[REMOTE]"
local & direction & remote
let seqid = block:
msg.seqID.withValue(seqid):
"seqID = " & $seqid & " "
of Incoming:
" << "
of Outgoing:
" >> "
let address =
block:
let local =
if msg.local.isSome():
"[" & $(msg.local.get()) & "]"
else:
"[LOCAL]"
let remote =
if msg.remote.isSome():
"[" & $(msg.remote.get()) & "]"
else:
"[REMOTE]"
local & direction & remote
let seqid =
if msg.seqID.isSome():
"seqID = " & $(msg.seqID.get()) & " "
else:
""
let mtype = block:
msg.mtype.withValue(typ):
"type = " & $typ & " "
let mtype =
if msg.mtype.isSome():
"type = " & $(msg.mtype.get()) & " "
else:
""
res.add(" ")

View File

@@ -7,88 +7,72 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import chronos
import results
import peerid, stream/connection, transports/transport, muxers/muxer
import stew/results
import peerid,
stream/connection,
transports/transport
export results
type
Dial* = ref object of RootObj
DialFailedError* = object of LPError
method connect*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##
doAssert(false, "[Dial.connect] abstract method not implemented!")
doAssert(false, "Not implemented!")
method connect*(
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
self: Dial,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "[Dial.connect] abstract method not implemented!")
doAssert(false, "Not implemented!")
method dial*(
self: Dial, peerId: PeerId, protos: seq[string]
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
self: Dial,
peerId: PeerId,
protos: seq[string],
): Future[Connection] {.async, base.} =
## create a protocol stream over an
## existing connection
##
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial,
peerId: Opt[PeerId],
hostname: string,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.base, async: (raises: [CancelledError]).} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
method dialAndUpgrade*(
self: Dial, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
base, async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
doAssert(false, "[Dial.dialAndUpgrade] abstract method not implemented!")
doAssert(false, "Not implemented!")
method dial*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false): Future[Connection] {.async, base.} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
doAssert(false, "[Dial.dial] abstract method not implemented!")
doAssert(false, "Not implemented!")
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method negotiateStream*(
self: Dial, conn: Connection, protos: seq[string]
): Future[Connection] {.base, async: (raises: [CatchableError]).} =
doAssert(false, "[Dial.negotiateStream] abstract method not implemented!")
method addTransport*(
self: Dial,
transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.
base, async: (raises: [DialFailedError, CancelledError])
.} =
doAssert(false, "[Dial.tryDial] abstract method not implemented!")
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")

View File

@@ -7,24 +7,24 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/tables
import std/[sugar, tables, sequtils]
import pkg/[chronos, chronicles, metrics, results]
import stew/results
import pkg/[chronos,
chronicles,
metrics]
import
dial,
peerid,
peerinfo,
peerstore,
multicodec,
muxers/muxer,
multistream,
connmanager,
stream/connection,
transports/transport,
nameresolving/nameresolver,
upgrademngrs/upgrade,
errors
import dial,
peerid,
peerinfo,
multicodec,
multistream,
connmanager,
stream/connection,
transports/transport,
nameresolving/nameresolver,
upgrademngrs/upgrade,
errors
export dial, errors, results
@@ -34,228 +34,187 @@ logScope:
declareCounter(libp2p_total_dial_attempts, "total attempted dials")
declareCounter(libp2p_successful_dials, "dialed successful peers")
declareCounter(libp2p_failed_dials, "failed dials")
declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
type Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
type
DialFailedError* = object of LPError
Dialer* = ref object of Dial
localPeerId*: PeerId
ms: MultistreamSelect
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress):
Future[Connection] {.async.} =
method dialAndUpgrade*(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
addrs: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(addrs): # check if it can dial it
trace "Dialing address", addrs, peerId = peerId.get(default(PeerId)), hostname
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId, hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, addrs, peerId)
await transport.dial(hostname, address, peerId)
except CancelledError as exc:
trace "Dialing canceled",
description = exc.msg, peerId = peerId.get(default(PeerId))
debug "Dialing canceled", msg = exc.msg, peerId
raise exc
except CatchableError as exc:
debug "Dialing failed",
description = exc.msg, peerId = peerId.get(default(PeerId))
debug "Dialing failed", msg = exc.msg, peerId
libp2p_failed_dials.inc()
return nil # Try the next address
# also keep track of the connection's bottom unsafe transport direction
# required by gossipsub scoring
dialed.transportDir = Direction.Out
libp2p_successful_dials.inc()
let mux =
let conn =
try:
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CancelledError as exc:
await dialed.close()
raise exc
await transport.upgradeOutgoing(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Connection upgrade failed",
description = exc.msg, peerId = peerId.get(default(PeerId))
if dialed.dir == Direction.Out:
debug "Upgrade failed", msg = exc.msg, peerId
if exc isnot CancelledError:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
doAssert not isNil(conn), "connection died after upgradeOutgoing"
debug "Dial successful", conn, peerId = conn.peerId
return conn
return nil
proc expandDnsAddr(
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
if not DNS.matchPartial(address):
return @[(address, peerId)]
self: Dialer,
peerId: Opt[PeerId],
address: MultiAddress): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
if not DNSADDR.matchPartial(address): return @[(address, peerId)]
if isNil(self.nameResolver):
info "Can't resolve DNSADDR without NameResolver", ma = address
info "Can't resolve DNSADDR without NameResolver", ma=address
return @[]
trace "Start trying to resolve addresses"
let
toResolve =
if peerId.isSome:
try:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
except ResultError[void]:
raiseAssert "checked with if"
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
else:
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
debug "resolved addresses",
originalAddresses = toResolve, resolvedAddresses = resolved
for resolvedAddress in resolved:
let lastPart = resolvedAddress[^1].tryGet()
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
var peerIdBytes: seq[byte]
try:
let
peerIdBytes = lastPart.protoArgument().tryGet()
except ResultError[string] as e:
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0..^2].tryGet(), Opt.some(addrPeerId)))
else:
result.add((resolvedAddress, peerId))
method dialAndUpgrade*(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
debug "Dialing peer", peerId
for rawAddress in addrs:
# resolve potential dnsaddr
let addresses = await self.expandDnsAddr(peerId, rawAddress)
for (expandedAddress, addrPeerId) in addresses:
# DNS resolution
let
hostname = expandedAddress.getHostname()
resolvedAddresses =
if isNil(self.nameResolver):
@[expandedAddress]
else:
await self.nameResolver.resolveMAddress(expandedAddress)
debug "Expanded address and hostname",
expandedAddress = expandedAddress,
hostname = hostname,
resolvedAddresses = resolvedAddresses
if isNil(self.nameResolver): @[expandedAddress]
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress)
if not isNil(result):
return result
proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
let muxer = self.connManager.selectMuxer(peerId)
if muxer == nil:
return Opt.none(Muxer)
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Connection]] {.async.} =
var conn = self.connManager.selectConn(peerId)
if conn == nil:
return Opt.none(Connection)
trace "Reusing existing connection", muxer, direction = $muxer.connection.dir
return Opt.some(muxer)
if conn.atEof or conn.closed:
# This connection should already have been removed from the connection
# manager - it's essentially a bug that we end up here - we'll fail
# for now, hoping that this will clean themselves up later...
warn "dead connection in connection manager", conn
await conn.close()
raise newException(DialFailedError, "Zombie connection encountered")
trace "Reusing existing connection", conn, direction = $conn.dir
return Opt.some(conn)
proc internalConnect(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true):
Future[Connection] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(DialFailedError, "internalConnect can't dial self!")
raise newException(CatchableError, "can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
await lock.acquire()
defer:
try:
lock.release()
except AsyncLockError as e:
raiseAssert "lock must have been acquired in line above: " & e.msg
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot =
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CancelledError as exc:
slot.release()
raise exc
except CatchableError as exc:
slot.release()
raise newException(
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(
DialFailedError, "Unable to establish outgoing link in internalConnect"
)
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
return muxed
except CancelledError as exc:
await muxed.close()
raise exc
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(
DialFailedError,
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
exc,
)
await lock.acquire()
if peerId.isSome and reuseConnection:
let connOpt = await self.tryReusingConnection(peerId.get())
if connOpt.isSome:
return connOpt.get()
let slot = self.connManager.getOutgoingSlot(forceDial)
let conn =
try:
await self.dialAndUpgrade(peerId, addrs)
except CatchableError as exc:
slot.release()
raise exc
slot.trackConnection(conn)
if isNil(conn): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
# A disconnect could have happened right after
# we've added the connection so we check again
# to prevent races due to that.
if conn.closed() or conn.atEof():
# This can happen when the other ends drops us
# before we get a chance to return the connection
# back to the dialer.
trace "Connection dead on arrival", conn
raise newLPStreamClosedError()
return conn
finally:
if lock.locked():
lock.release()
method connect*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -263,40 +222,44 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard
await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection)
method connect*(
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
parseFullAddress(address).toOpt().withValue(fullAddress):
return (
await self.internalConnect(Opt.some(fullAddress[0]), @[fullAddress[1]], false)
).connection.peerId
let fullAddress = parseFullAddress(address)
if fullAddress.isOk:
return (await self.internalConnect(
Opt.some(fullAddress.get()[0]),
@[fullAddress.get()[1]],
false)).peerId
else:
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).peerId
if allowUnknownPeerId == false:
raise newException(
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
)
return
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
method negotiateStream*(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async: (raises: [CatchableError]).} =
proc negotiateStream(
self: Dialer,
conn: Connection,
protos: seq[string]): Future[Connection] {.async.} =
trace "Negotiating stream", conn, protos
let selected = await MultistreamSelect.select(conn, protos)
let selected = await self.ms.select(conn, protos)
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
return conn
method tryDial*(
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async.} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.
@@ -304,60 +267,50 @@ method tryDial*(
trace "Check if it can dial", peerId, addrs
try:
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if mux.isNil():
raise newException(DialFailedError, "No valid multiaddress in tryDial")
await mux.close()
return mux.connection.observedAddr
let conn = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if conn.isNil():
raise newException(DialFailedError, "No valid multiaddress")
await conn.close()
return conn.observedAddr
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
raise newException(DialFailedError, exc.msg)
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
peerId: PeerId,
protos: seq[string]): Future[Connection] {.async.} =
## create a protocol stream over an
## existing connection
##
trace "Dialing (existing)", peerId, protos
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(
DialFailedError,
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", description = exc.msg
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
return await self.negotiateStream(stream, protos)
method dial*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false): Future[Connection] {.async.} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
var
conn: Muxer
conn: Connection
stream: Connection
proc cleanup() {.async: (raises: []).} =
if not (isNil(stream)):
proc cleanup() {.async.} =
if not(isNil(stream)):
await stream.closeWithEOF()
if not (isNil(conn)):
if not(isNil(conn)):
await conn.close()
try:
@@ -367,36 +320,32 @@ method dial*(
stream = await self.connManager.getStream(conn)
if isNil(stream):
raise newException(
DialFailedError,
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
)
raise newException(DialFailedError,
"Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", conn, description = exc.msg
trace "Dial canceled", conn
await cleanup()
raise exc
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
debug "Error dialing", conn, msg = exc.msg
await cleanup()
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
raise exc
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t
proc new*(
T: type Dialer,
localPeerId: PeerId,
connManager: ConnManager,
peerStore: PeerStore,
transports: seq[Transport],
nameResolver: NameResolver = nil,
): Dialer =
T(
localPeerId: localPeerId,
T: type Dialer,
localPeerId: PeerId,
connManager: ConnManager,
transports: seq[Transport],
ms: MultistreamSelect,
nameResolver: NameResolver = nil): Dialer =
T(localPeerId: localPeerId,
connManager: connManager,
transports: transports,
peerStore: peerStore,
nameResolver: nameResolver,
)
ms: ms,
nameResolver: nameResolver)

View File

@@ -7,15 +7,18 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/sequtils
import chronos, chronicles, results
import chronos, chronicles, stew/results
import ../errors
type
BaseAttr = ref object of RootObj
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [].}
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
Attribute[T] = ref object of BaseAttr
value: T
@@ -33,12 +36,12 @@ proc ofType*[T](f: BaseAttr, _: type[T]): bool =
proc to*[T](f: BaseAttr, _: type[T]): T =
Attribute[T](f).value
proc add*[T](pa: var PeerAttributes, value: T) =
pa.attributes.add(
Attribute[T](
proc add*[T](pa: var PeerAttributes,
value: T) =
pa.attributes.add(Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
)
)
@@ -57,9 +60,8 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
return Opt.some(f.to(T))
Opt.none(T)
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr:
raise newException(KeyError, "Attribute not found")
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -71,7 +73,7 @@ proc match*(pa, candidate: PeerAttributes): bool =
return true
type
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [], gcsafe.}
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
DiscoveryInterface* = ref object of RootObj
onPeerFound*: PeerFoundCallback
@@ -79,21 +81,16 @@ type
advertisementUpdated*: AsyncEvent
advertiseLoop*: Future[void]
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
AdvertiseError* = object of DiscoveryError
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
doAssert(false, "Not implemented!")
method request*(
self: DiscoveryInterface, pa: PeerAttributes
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
method advertise*(
self: DiscoveryInterface
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
method advertise*(self: DiscoveryInterface) {.async, base.} =
doAssert(false, "Not implemented!")
type
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
DiscoveryQuery* = ref object
attr: PeerAttributes
peers: AsyncQueue[PeerAttributes]
@@ -107,13 +104,13 @@ type
proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
dm.interfaces &= di
di.onPeerFound = proc(pa: PeerAttributes) =
di.onPeerFound = proc (pa: PeerAttributes) =
for query in dm.queries:
if query.attr.match(pa):
try:
query.peers.putNoWait(pa)
except AsyncQueueFullError as exc:
debug "Cannot push discovered peer to queue", description = exc.msg
debug "Cannot push discovered peer to queue"
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
@@ -128,29 +125,30 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
pa.add(value)
return dm.request(pa)
proc advertise*[T](dm: DiscoveryManager, value: T) =
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
for i in dm.interfaces:
i.toAdvertise.add(value)
i.toAdvertise = pa
if i.advertiseLoop.isNil:
i.advertisementUpdated = newAsyncEvent()
i.advertiseLoop = i.advertise()
else:
i.advertisementUpdated.fire()
proc advertise*[T](dm: DiscoveryManager, value: T) =
var pa: PeerAttributes
pa.add(value)
dm.advertise(pa)
template forEach*(query: DiscoveryQuery, code: untyped) =
## Will execute `code` for each discovered peer. The
## peer attritubtes are available through the variable
## `peer`
proc forEachInternal(
q: DiscoveryQuery
) {.async: (raises: [CancelledError, DiscoveryError]).} =
proc forEachInternal(q: DiscoveryQuery) {.async.} =
while true:
let peer {.inject.} =
try:
await q.getPeer()
except DiscoveryFinished:
return
try: await q.getPeer()
except DiscoveryFinished: return
code
asyncSpawn forEachInternal(query)
@@ -158,28 +156,22 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
proc stop*(query: DiscoveryQuery) =
query.finished = true
for r in query.futs:
if not r.finished():
r.cancelSoon()
if not r.finished(): r.cancel()
proc stop*(dm: DiscoveryManager) =
for q in dm.queries:
q.stop()
for i in dm.interfaces:
if isNil(i.advertiseLoop):
continue
i.advertiseLoop.cancelSoon()
if isNil(i.advertiseLoop): continue
i.advertiseLoop.cancel()
proc getPeer*(
query: DiscoveryQuery
): Future[PeerAttributes] {.
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
.} =
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
let getter = query.peers.popFirst()
try:
await getter or allFinished(query.futs)
except CancelledError as exc:
getter.cancelSoon()
getter.cancel()
raise exc
if not finished(getter):

View File

@@ -7,33 +7,36 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import sequtils
import chronos
import ./discoverymngr, ../protocols/rendezvous, ../peerid
import ./discoverymngr,
../protocols/rendezvous,
../peerid
type
RendezVousInterface* = ref object of DiscoveryInterface
rdv*: RendezVous
timeToRequest: Duration
timeToAdvertise: Duration
ttl: Duration
RdvNamespace* = distinct string
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(
self: RendezVousInterface, pa: PeerAttributes
) {.async: (raises: [DiscoveryError, CancelledError]).} =
var namespace = Opt.none(string)
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
var namespace = ""
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = Opt.some(string attr.to(RdvNamespace))
namespace = string attr.to(RdvNamespace)
elif attr.ofType(DiscoveryService):
namespace = Opt.some(string attr.to(DiscoveryService))
namespace = string attr.to(DiscoveryService)
elif attr.ofType(PeerId):
namespace = Opt.some($attr.to(PeerId))
namespace = $attr.to(PeerId)
else:
# unhandled type
return
@@ -44,15 +47,13 @@ method request*(
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace.get()))
peer.add(RdvNamespace(namespace.get()))
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)
method advertise*(
self: RendezVousInterface
) {.async: (raises: [CancelledError, AdvertiseError]).} =
method advertise*(self: RendezVousInterface) {.async.} =
while true:
var toAdvertise: seq[string]
for attr in self.toAdvertise:
@@ -65,18 +66,12 @@ method advertise*(
self.advertisementUpdated.clear()
for toAdv in toAdvertise:
try:
await self.rdv.advertise(toAdv, self.ttl)
except CatchableError as error:
debug "RendezVous advertise error: ", description = error.msg
await self.rdv.advertise(toAdv, self.timeToAdvertise)
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(
T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = 1.minutes,
ttl: Duration = MinimumDuration,
): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)

View File

@@ -19,30 +19,58 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
# TODO https://github.com/nim-lang/Nim/issues/22936
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:
quote:
quote do:
for res in `futs`:
if res.failed:
let exc = res.error
let exc = res.readError()
# We still don't abort but warn
debug "A future has failed, enable trace logging for details",
error = exc.name
trace "Exception message", description = exc.msg, stack = getStackTrace()
debug "A future has failed, enable trace logging for details", error = exc.name
trace "Exception message", msg= exc.msg, stack = getStackTrace()
else:
quote:
quote do:
for res in `futs`:
block check:
if res.failed:
let exc = res.error
for i in 0 ..< `nexclude`:
let exc = res.readError()
for i in 0..<`nexclude`:
if exc of `exclude`[i]:
trace "A future has failed", error = exc.name, description = exc.msg
trace "A future has failed", error=exc.name, msg=exc.msg
break check
# We still don't abort but warn
debug "A future has failed, enable trace logging for details",
error = exc.name
trace "Exception details", description = exc.msg
debug "A future has failed, enable trace logging for details", error=exc.name
trace "Exception details", msg=exc.msg
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
var futs: seq[Future[T]]
for fut in args:
futs &= fut
proc call() {.async.} =
var first: ref CatchableError = nil
futs = await allFinished(futs)
for fut in futs:
if fut.failed:
let err = fut.readError()
if err of Defect:
raise err
else:
if err of CancelledError:
raise err
if isNil(first):
first = err
if not isNil(first):
raise first
return call()
template tryAndWarn*(message: static[string]; body: untyped): untyped =
try:
body
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "An exception has ocurred, enable trace logging for details", name = exc.name, msg = message
trace "Exception details", exc = exc.msg

File diff suppressed because it is too large Load Diff

View File

@@ -13,41 +13,36 @@
## 1. base32z
##
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import results
import stew/[base32, base58, base64]
import ./utils/sequninit
import stew/[base32, base58, base64, results]
type
MultiBaseStatus* {.pure.} = enum
Error
Success
Overrun
Incorrect
BadCodec
NotSupported
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
MultiBase* = object
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [].}
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
MBCodec = object
code: char
name: string
encr: proc(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
decr: proc(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
encr: proc(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
decr: proc(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
encl: MBCodeSize
decl: MBCodeSize
proc idd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
@@ -57,9 +52,9 @@ proc idd(
outlen = length
result = MultiBaseStatus.Success
proc ide(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc ide(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
@@ -69,37 +64,31 @@ proc ide(
outlen = length
result = MultiBaseStatus.Success
proc idel(length: int): int =
length
proc idel(length: int): int = length
proc iddl(length: int): int = length
proc iddl(length: int): int =
length
proc b16d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b16d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
discard
proc b16e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b16e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
discard
proc b16ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b16ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
discard
proc b16ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b16ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
discard
proc b16el(length: int): int =
length shl 1
proc b16dl(length: int): int =
(length + 1) div 2
proc b16el(length: int): int = length shl 1
proc b16dl(length: int): int = (length + 1) div 2
proc b32ce(r: Base32Status): MultiBaseStatus {.inline.} =
result = MultiBaseStatus.Error
@@ -128,253 +117,218 @@ proc b64ce(r: Base64Status): MultiBaseStatus {.inline.} =
elif r == Base64Status.Success:
result = MultiBaseStatus.Success
proc b32hd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32hd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
proc b32he(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32he(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
proc b32hud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32hud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
proc b32hue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32hue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
proc b32hpd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32hpd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
proc b32hpe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32hpe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
proc b32hpud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32hpud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
proc b32hpue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32hpue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
proc b32d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
proc b32e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
proc b32ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
proc b32ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
proc b32pd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
proc b32pe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
proc b32pud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b32pud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
proc b32pue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b32pue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
proc b32el(length: int): int =
Base32Lower.encodedLength(length)
proc b32el(length: int): int = Base32Lower.encodedLength(length)
proc b32dl(length: int): int = Base32Lower.decodedLength(length)
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
proc b32dl(length: int): int =
Base32Lower.decodedLength(length)
proc b32pel(length: int): int =
Base32LowerPad.encodedLength(length)
proc b32pdl(length: int): int =
Base32LowerPad.decodedLength(length)
proc b58fd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b58fd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
proc b58fe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b58fe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
proc b58bd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b58bd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
proc b58be(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b58be(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
proc b58el(length: int): int =
Base58.encodedLength(length)
proc b58el(length: int): int = Base58.encodedLength(length)
proc b58dl(length: int): int = Base58.decodedLength(length)
proc b58dl(length: int): int =
Base58.decodedLength(length)
proc b64el(length: int): int = Base64.encodedLength(length)
proc b64dl(length: int): int = Base64.decodedLength(length)
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
proc b64el(length: int): int =
Base64.encodedLength(length)
proc b64dl(length: int): int =
Base64.decodedLength(length)
proc b64pel(length: int): int =
Base64Pad.encodedLength(length)
proc b64pdl(length: int): int =
Base64Pad.decodedLength(length)
proc b64e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b64e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
proc b64d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b64d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
proc b64pe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b64pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
proc b64pd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b64pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
proc b64ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b64ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
proc b64ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b64ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
proc b64upe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
proc b64upe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
proc b64upd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
proc b64upd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
const MultiBaseCodecs = [
MBCodec(
name: "identity", code: chr(0x00), decr: idd, encr: ide, decl: iddl, encl: idel
),
MBCodec(name: "base1", code: '1'),
MBCodec(name: "base2", code: '0'),
MBCodec(name: "base8", code: '7'),
MBCodec(name: "base10", code: '9'),
MBCodec(name: "base16", code: 'f', decr: b16d, encr: b16e, decl: b16dl, encl: b16el),
MBCodec(
name: "base16upper", code: 'F', decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
),
MBCodec(
name: "base32hex", code: 'v', decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
),
MBCodec(
name: "base32hexupper",
code: 'V',
decr: b32hud,
encr: b32hue,
decl: b32dl,
encl: b32el,
),
MBCodec(
name: "base32hexpad",
code: 't',
decr: b32hpd,
encr: b32hpe,
decl: b32pdl,
encl: b32pel,
),
MBCodec(
name: "base32hexpadupper",
code: 'T',
decr: b32hpud,
encr: b32hpue,
decl: b32pdl,
encl: b32pel,
),
MBCodec(name: "base32", code: 'b', decr: b32d, encr: b32e, decl: b32dl, encl: b32el),
MBCodec(
name: "base32upper", code: 'B', decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
),
MBCodec(
name: "base32pad", code: 'c', decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
),
MBCodec(
name: "base32padupper",
code: 'C',
decr: b32pud,
encr: b32pue,
decl: b32pdl,
encl: b32pel,
),
MBCodec(name: "base32z", code: 'h'),
MBCodec(
name: "base58flickr", code: 'Z', decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
),
MBCodec(
name: "base58btc", code: 'z', decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
),
MBCodec(name: "base64", code: 'm', decr: b64d, encr: b64e, decl: b64dl, encl: b64el),
MBCodec(
name: "base64pad", code: 'M', decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
),
MBCodec(
name: "base64url", code: 'u', decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
),
MBCodec(
name: "base64urlpad",
code: 'U',
decr: b64upd,
encr: b64upe,
decl: b64pdl,
encl: b64pel,
),
]
const
MultiBaseCodecs = [
MBCodec(name: "identity", code: chr(0x00),
decr: idd, encr: ide, decl: iddl, encl: idel
),
MBCodec(name: "base1", code: '1'),
MBCodec(name: "base2", code: '0'),
MBCodec(name: "base8", code: '7'),
MBCodec(name: "base10", code: '9'),
MBCodec(name: "base16", code: 'f',
decr: b16d, encr: b16e, decl: b16dl, encl: b16el
),
MBCodec(name: "base16upper", code: 'F',
decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
),
MBCodec(name: "base32hex", code: 'v',
decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
),
MBCodec(name: "base32hexupper", code: 'V',
decr: b32hud, encr: b32hue, decl: b32dl, encl: b32el
),
MBCodec(name: "base32hexpad", code: 't',
decr: b32hpd, encr: b32hpe, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32hexpadupper", code: 'T',
decr: b32hpud, encr: b32hpue, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32", code: 'b',
decr: b32d, encr: b32e, decl: b32dl, encl: b32el
),
MBCodec(name: "base32upper", code: 'B',
decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
),
MBCodec(name: "base32pad", code: 'c',
decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32padupper", code: 'C',
decr: b32pud, encr: b32pue, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32z", code: 'h'),
MBCodec(name: "base58flickr", code: 'Z',
decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
),
MBCodec(name: "base58btc", code: 'z',
decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
),
MBCodec(name: "base64", code: 'm',
decr: b64d, encr: b64e, decl: b64dl, encl: b64el
),
MBCodec(name: "base64pad", code: 'M',
decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
),
MBCodec(name: "base64url", code: 'u',
decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
),
MBCodec(name: "base64urlpad", code: 'U',
decr: b64upd, encr: b64upe, decl: b64pdl, encl: b64pel
)
]
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
for item in MultiBaseCodecs:
@@ -388,7 +342,8 @@ const
CodeMultiBases = initMultiBaseCodeTable()
NameMultiBases = initMultiBaseNameTable()
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string, length: int): int =
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
length: int): int =
## Return estimated size of buffer to store MultiBase encoded value with
## encoding ``encoding`` of length ``length``.
##
@@ -403,7 +358,8 @@ proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string, length: int):
else:
result = mb.encl(length) + 1
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char, length: int): int =
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
length: int): int =
## Return estimated size of buffer to store MultiBase decoded value with
## encoding character ``encoding`` of length ``length``.
let mb = CodeMultiBases.getOrDefault(encoding)
@@ -415,13 +371,9 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char, length: int): i
else:
result = mb.decl(length - 1)
proc encode*(
mbtype: typedesc[MultiBase],
encoding: string,
inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int,
): MultiBaseStatus =
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openArray[byte], outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## store encoded value to ``outbytes``.
##
@@ -443,7 +395,8 @@ proc encode*(
if isNil(mb.encr) or isNil(mb.encl):
return MultiBaseStatus.NotSupported
if len(outbytes) > 1:
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high), outlen)
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
outlen)
if result == MultiBaseStatus.Overrun:
outlen += 1
elif result == MultiBaseStatus.Success:
@@ -458,12 +411,8 @@ proc encode*(
result = MultiBaseStatus.Overrun
outlen = mb.encl(len(inbytes)) + 1
proc decode*(
mbtype: typedesc[MultiBase],
inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int,
): MultiBaseStatus =
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
outbytes: var openArray[byte], outlen: var int): MultiBaseStatus =
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
## to ``outbytes``.
##
@@ -492,9 +441,8 @@ proc decode*(
else:
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
proc encode*(
mbtype: typedesc[MultiBase], encoding: string, inbytes: openArray[byte]
): Result[string, string] =
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openArray[byte]): Result[string, string] =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## return encoded string.
let length = len(inbytes)
@@ -517,9 +465,7 @@ proc encode*(
buffer[0] = mb.code
ok(buffer)
proc decode*(
mbtype: typedesc[MultiBase], inbytes: openArray[char]
): Result[seq[byte], string] =
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[byte], string] =
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
## bytes.
let length = len(inbytes)
@@ -534,9 +480,10 @@ proc decode*(
let empty: seq[byte] = @[]
ok(empty) # empty
else:
var buffer = newSeqUninit[byte](mb.decl(length - 1))
var buffer = newSeq[byte](mb.decl(length - 1))
var outlen = 0
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
let res = mb.decr(inbytes.toOpenArray(1, length - 1),
buffer, outlen)
if res != MultiBaseStatus.Success:
err("multibase: Decoding error [" & $res & "]")
else:

View File

@@ -9,14 +9,18 @@
## This module implements MultiCodec.
{.push raises: [].}
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables, hashes
import vbuffer
import results
import varint, vbuffer
import stew/results
export results
{.deadCodeElim: on.}
## List of officially supported codecs can BE found here
## https://github.com/multiformats/multicodec/blob/master/table.csv
const MultiCodecList = [
@@ -50,325 +54,132 @@ const MultiCodecList = [
("keccak-384", 0x1C),
("keccak-512", 0x1D),
("murmur3", 0x22),
("blake2b-8", 0xB201),
("blake2b-16", 0xB202),
("blake2b-24", 0xB203),
("blake2b-32", 0xB204),
("blake2b-40", 0xB205),
("blake2b-48", 0xB206),
("blake2b-56", 0xB207),
("blake2b-64", 0xB208),
("blake2b-72", 0xB209),
("blake2b-80", 0xB20A),
("blake2b-88", 0xB20B),
("blake2b-96", 0xB20C),
("blake2b-104", 0xB20D),
("blake2b-112", 0xB20E),
("blake2b-120", 0xB20F),
("blake2b-128", 0xB210),
("blake2b-136", 0xB211),
("blake2b-144", 0xB212),
("blake2b-152", 0xB213),
("blake2b-160", 0xB214),
("blake2b-168", 0xB215),
("blake2b-176", 0xB216),
("blake2b-184", 0xB217),
("blake2b-192", 0xB218),
("blake2b-200", 0xB219),
("blake2b-208", 0xB21A),
("blake2b-216", 0xB21B),
("blake2b-224", 0xB21C),
("blake2b-232", 0xB21D),
("blake2b-240", 0xB21E),
("blake2b-248", 0xB21F),
("blake2b-256", 0xB220),
("blake2b-264", 0xB221),
("blake2b-272", 0xB222),
("blake2b-280", 0xB223),
("blake2b-288", 0xB224),
("blake2b-296", 0xB225),
("blake2b-304", 0xB226),
("blake2b-312", 0xB227),
("blake2b-320", 0xB228),
("blake2b-328", 0xB229),
("blake2b-336", 0xB22A),
("blake2b-344", 0xB22B),
("blake2b-352", 0xB22C),
("blake2b-360", 0xB22D),
("blake2b-368", 0xB22E),
("blake2b-376", 0xB22F),
("blake2b-384", 0xB230),
("blake2b-392", 0xB231),
("blake2b-400", 0xB232),
("blake2b-408", 0xB233),
("blake2b-416", 0xB234),
("blake2b-424", 0xB235),
("blake2b-432", 0xB236),
("blake2b-440", 0xB237),
("blake2b-448", 0xB238),
("blake2b-456", 0xB239),
("blake2b-464", 0xB23A),
("blake2b-472", 0xB23B),
("blake2b-480", 0xB23C),
("blake2b-488", 0xB23D),
("blake2b-496", 0xB23E),
("blake2b-504", 0xB23F),
("blake2b-512", 0xB240),
("blake2s-8", 0xB241),
("blake2s-16", 0xB242),
("blake2s-24", 0xB243),
("blake2s-32", 0xB244),
("blake2s-40", 0xB245),
("blake2s-48", 0xB246),
("blake2s-56", 0xB247),
("blake2s-64", 0xB248),
("blake2s-72", 0xB249),
("blake2s-80", 0xB24A),
("blake2s-88", 0xB24B),
("blake2s-96", 0xB24C),
("blake2s-104", 0xB24D),
("blake2s-112", 0xB24E),
("blake2s-120", 0xB24F),
("blake2s-128", 0xB250),
("blake2s-136", 0xB251),
("blake2s-144", 0xB252),
("blake2s-152", 0xB253),
("blake2s-160", 0xB254),
("blake2s-168", 0xB255),
("blake2s-176", 0xB256),
("blake2s-184", 0xB257),
("blake2s-192", 0xB258),
("blake2s-200", 0xB259),
("blake2s-208", 0xB25A),
("blake2s-216", 0xB25B),
("blake2s-224", 0xB25C),
("blake2s-232", 0xB25D),
("blake2s-240", 0xB25E),
("blake2s-248", 0xB25F),
("blake2s-256", 0xB260),
("skein256-8", 0xB301),
("skein256-16", 0xB302),
("skein256-24", 0xB303),
("skein256-32", 0xB304),
("skein256-40", 0xB305),
("skein256-48", 0xB306),
("skein256-56", 0xB307),
("skein256-64", 0xB308),
("skein256-72", 0xB309),
("skein256-80", 0xB30A),
("skein256-88", 0xB30B),
("skein256-96", 0xB30C),
("skein256-104", 0xB30D),
("skein256-112", 0xB30E),
("skein256-120", 0xB30F),
("skein256-128", 0xB310),
("skein256-136", 0xB311),
("skein256-144", 0xB312),
("skein256-152", 0xB313),
("skein256-160", 0xB314),
("skein256-168", 0xB315),
("skein256-176", 0xB316),
("skein256-184", 0xB317),
("skein256-192", 0xB318),
("skein256-200", 0xB319),
("skein256-208", 0xB31A),
("skein256-216", 0xB31B),
("skein256-224", 0xB31C),
("skein256-232", 0xB31D),
("skein256-240", 0xB31E),
("skein256-248", 0xB31F),
("skein256-256", 0xB320),
("skein512-8", 0xB321),
("skein512-16", 0xB322),
("skein512-24", 0xB323),
("skein512-32", 0xB324),
("skein512-40", 0xB325),
("skein512-48", 0xB326),
("skein512-56", 0xB327),
("skein512-64", 0xB328),
("skein512-72", 0xB329),
("skein512-80", 0xB32A),
("skein512-88", 0xB32B),
("skein512-96", 0xB32C),
("skein512-104", 0xB32D),
("skein512-112", 0xB32E),
("skein512-120", 0xB32F),
("skein512-128", 0xB330),
("skein512-136", 0xB331),
("skein512-144", 0xB332),
("skein512-152", 0xB333),
("skein512-160", 0xB334),
("skein512-168", 0xB335),
("skein512-176", 0xB336),
("skein512-184", 0xB337),
("skein512-192", 0xB338),
("skein512-200", 0xB339),
("skein512-208", 0xB33A),
("skein512-216", 0xB33B),
("skein512-224", 0xB33C),
("skein512-232", 0xB33D),
("skein512-240", 0xB33E),
("skein512-248", 0xB33F),
("skein512-256", 0xB340),
("skein512-264", 0xB341),
("skein512-272", 0xB342),
("skein512-280", 0xB343),
("skein512-288", 0xB344),
("skein512-296", 0xB345),
("skein512-304", 0xB346),
("skein512-312", 0xB347),
("skein512-320", 0xB348),
("skein512-328", 0xB349),
("skein512-336", 0xB34A),
("skein512-344", 0xB34B),
("skein512-352", 0xB34C),
("skein512-360", 0xB34D),
("skein512-368", 0xB34E),
("skein512-376", 0xB34F),
("skein512-384", 0xB350),
("skein512-392", 0xB351),
("skein512-400", 0xB352),
("skein512-408", 0xB353),
("skein512-416", 0xB354),
("skein512-424", 0xB355),
("skein512-432", 0xB356),
("skein512-440", 0xB357),
("skein512-448", 0xB358),
("skein512-456", 0xB359),
("skein512-464", 0xB35A),
("skein512-472", 0xB35B),
("skein512-480", 0xB35C),
("skein512-488", 0xB35D),
("skein512-496", 0xB35E),
("skein512-504", 0xB35F),
("skein512-512", 0xB360),
("skein1024-8", 0xB361),
("skein1024-16", 0xB362),
("skein1024-24", 0xB363),
("skein1024-32", 0xB364),
("skein1024-40", 0xB365),
("skein1024-48", 0xB366),
("skein1024-56", 0xB367),
("skein1024-64", 0xB368),
("skein1024-72", 0xB369),
("skein1024-80", 0xB36A),
("skein1024-88", 0xB36B),
("skein1024-96", 0xB36C),
("skein1024-104", 0xB36D),
("skein1024-112", 0xB36E),
("skein1024-120", 0xB36F),
("skein1024-128", 0xB370),
("skein1024-136", 0xB371),
("skein1024-144", 0xB372),
("skein1024-152", 0xB373),
("skein1024-160", 0xB374),
("skein1024-168", 0xB375),
("skein1024-176", 0xB376),
("skein1024-184", 0xB377),
("skein1024-192", 0xB378),
("skein1024-200", 0xB379),
("skein1024-208", 0xB37A),
("skein1024-216", 0xB37B),
("skein1024-224", 0xB37C),
("skein1024-232", 0xB37D),
("skein1024-240", 0xB37E),
("skein1024-248", 0xB37F),
("skein1024-256", 0xB380),
("skein1024-264", 0xB381),
("skein1024-272", 0xB382),
("skein1024-280", 0xB383),
("skein1024-288", 0xB384),
("skein1024-296", 0xB385),
("skein1024-304", 0xB386),
("skein1024-312", 0xB387),
("skein1024-320", 0xB388),
("skein1024-328", 0xB389),
("skein1024-336", 0xB38A),
("skein1024-344", 0xB38B),
("skein1024-352", 0xB38C),
("skein1024-360", 0xB38D),
("skein1024-368", 0xB38E),
("skein1024-376", 0xB38F),
("skein1024-384", 0xB390),
("skein1024-392", 0xB391),
("skein1024-400", 0xB392),
("skein1024-408", 0xB393),
("skein1024-416", 0xB394),
("skein1024-424", 0xB395),
("skein1024-432", 0xB396),
("skein1024-440", 0xB397),
("skein1024-448", 0xB398),
("skein1024-456", 0xB399),
("skein1024-464", 0xB39A),
("skein1024-472", 0xB39B),
("skein1024-480", 0xB39C),
("skein1024-488", 0xB39D),
("skein1024-496", 0xB39E),
("skein1024-504", 0xB39F),
("skein1024-512", 0xB3A0),
("skein1024-520", 0xB3A1),
("skein1024-528", 0xB3A2),
("skein1024-536", 0xB3A3),
("skein1024-544", 0xB3A4),
("skein1024-552", 0xB3A5),
("skein1024-560", 0xB3A6),
("skein1024-568", 0xB3A7),
("skein1024-576", 0xB3A8),
("skein1024-584", 0xB3A9),
("skein1024-592", 0xB3AA),
("skein1024-600", 0xB3AB),
("skein1024-608", 0xB3AC),
("skein1024-616", 0xB3AD),
("skein1024-624", 0xB3AE),
("skein1024-632", 0xB3AF),
("skein1024-640", 0xB3B0),
("skein1024-648", 0xB3B1),
("skein1024-656", 0xB3B2),
("skein1024-664", 0xB3B3),
("skein1024-672", 0xB3B4),
("skein1024-680", 0xB3B5),
("skein1024-688", 0xB3B6),
("skein1024-696", 0xB3B7),
("skein1024-704", 0xB3B8),
("skein1024-712", 0xB3B9),
("skein1024-720", 0xB3BA),
("skein1024-728", 0xB3BB),
("skein1024-736", 0xB3BC),
("skein1024-744", 0xB3BD),
("skein1024-752", 0xB3BE),
("skein1024-760", 0xB3BF),
("skein1024-768", 0xB3C0),
("skein1024-776", 0xB3C1),
("skein1024-784", 0xB3C2),
("skein1024-792", 0xB3C3),
("skein1024-800", 0xB3C4),
("skein1024-808", 0xB3C5),
("skein1024-816", 0xB3C6),
("skein1024-824", 0xB3C7),
("skein1024-832", 0xB3C8),
("skein1024-840", 0xB3C9),
("skein1024-848", 0xB3CA),
("skein1024-856", 0xB3CB),
("skein1024-864", 0xB3CC),
("skein1024-872", 0xB3CD),
("skein1024-880", 0xB3CE),
("skein1024-888", 0xB3CF),
("skein1024-896", 0xB3D0),
("skein1024-904", 0xB3D1),
("skein1024-912", 0xB3D2),
("skein1024-920", 0xB3D3),
("skein1024-928", 0xB3D4),
("skein1024-936", 0xB3D5),
("skein1024-944", 0xB3D6),
("skein1024-952", 0xB3D7),
("skein1024-960", 0xB3D8),
("skein1024-968", 0xB3D9),
("skein1024-976", 0xB3DA),
("skein1024-984", 0xB3DB),
("skein1024-992", 0xB3DC),
("skein1024-1000", 0xB3DD),
("skein1024-1008", 0xB3DE),
("skein1024-1016", 0xB3DF),
("blake2b-8", 0xB201), ("blake2b-16", 0xB202), ("blake2b-24", 0xB203),
("blake2b-32", 0xB204), ("blake2b-40", 0xB205), ("blake2b-48", 0xB206),
("blake2b-56", 0xB207), ("blake2b-64", 0xB208), ("blake2b-72", 0xB209),
("blake2b-80", 0xB20A), ("blake2b-88", 0xB20B), ("blake2b-96", 0xB20C),
("blake2b-104", 0xB20D), ("blake2b-112", 0xB20E), ("blake2b-120", 0xB20F),
("blake2b-128", 0xB210), ("blake2b-136", 0xB211), ("blake2b-144", 0xB212),
("blake2b-152", 0xB213), ("blake2b-160", 0xB214), ("blake2b-168", 0xB215),
("blake2b-176", 0xB216), ("blake2b-184", 0xB217), ("blake2b-192", 0xB218),
("blake2b-200", 0xB219), ("blake2b-208", 0xB21A), ("blake2b-216", 0xB21B),
("blake2b-224", 0xB21C), ("blake2b-232", 0xB21D), ("blake2b-240", 0xB21E),
("blake2b-248", 0xB21F), ("blake2b-256", 0xB220), ("blake2b-264", 0xB221),
("blake2b-272", 0xB222), ("blake2b-280", 0xB223), ("blake2b-288", 0xB224),
("blake2b-296", 0xB225), ("blake2b-304", 0xB226), ("blake2b-312", 0xB227),
("blake2b-320", 0xB228), ("blake2b-328", 0xB229), ("blake2b-336", 0xB22A),
("blake2b-344", 0xB22B), ("blake2b-352", 0xB22C), ("blake2b-360", 0xB22D),
("blake2b-368", 0xB22E), ("blake2b-376", 0xB22F), ("blake2b-384", 0xB230),
("blake2b-392", 0xB231), ("blake2b-400", 0xB232), ("blake2b-408", 0xB233),
("blake2b-416", 0xB234), ("blake2b-424", 0xB235), ("blake2b-432", 0xB236),
("blake2b-440", 0xB237), ("blake2b-448", 0xB238), ("blake2b-456", 0xB239),
("blake2b-464", 0xB23A), ("blake2b-472", 0xB23B), ("blake2b-480", 0xB23C),
("blake2b-488", 0xB23D), ("blake2b-496", 0xB23E), ("blake2b-504", 0xB23F),
("blake2b-512", 0xB240), ("blake2s-8", 0xB241), ("blake2s-16", 0xB242),
("blake2s-24", 0xB243), ("blake2s-32", 0xB244), ("blake2s-40", 0xB245),
("blake2s-48", 0xB246), ("blake2s-56", 0xB247), ("blake2s-64", 0xB248),
("blake2s-72", 0xB249), ("blake2s-80", 0xB24A), ("blake2s-88", 0xB24B),
("blake2s-96", 0xB24C), ("blake2s-104", 0xB24D), ("blake2s-112", 0xB24E),
("blake2s-120", 0xB24F), ("blake2s-128", 0xB250), ("blake2s-136", 0xB251),
("blake2s-144", 0xB252), ("blake2s-152", 0xB253), ("blake2s-160", 0xB254),
("blake2s-168", 0xB255), ("blake2s-176", 0xB256), ("blake2s-184", 0xB257),
("blake2s-192", 0xB258), ("blake2s-200", 0xB259), ("blake2s-208", 0xB25A),
("blake2s-216", 0xB25B), ("blake2s-224", 0xB25C), ("blake2s-232", 0xB25D),
("blake2s-240", 0xB25E), ("blake2s-248", 0xB25F), ("blake2s-256", 0xB260),
("skein256-8", 0xB301), ("skein256-16", 0xB302), ("skein256-24", 0xB303),
("skein256-32", 0xB304), ("skein256-40", 0xB305), ("skein256-48", 0xB306),
("skein256-56", 0xB307), ("skein256-64", 0xB308), ("skein256-72", 0xB309),
("skein256-80", 0xB30A), ("skein256-88", 0xB30B), ("skein256-96", 0xB30C),
("skein256-104", 0xB30D), ("skein256-112", 0xB30E), ("skein256-120", 0xB30F),
("skein256-128", 0xB310), ("skein256-136", 0xB311), ("skein256-144", 0xB312),
("skein256-152", 0xB313), ("skein256-160", 0xB314), ("skein256-168", 0xB315),
("skein256-176", 0xB316), ("skein256-184", 0xB317), ("skein256-192", 0xB318),
("skein256-200", 0xB319), ("skein256-208", 0xB31A), ("skein256-216", 0xB31B),
("skein256-224", 0xB31C), ("skein256-232", 0xB31D), ("skein256-240", 0xB31E),
("skein256-248", 0xB31F), ("skein256-256", 0xB320),
("skein512-8", 0xB321), ("skein512-16", 0xB322), ("skein512-24", 0xB323),
("skein512-32", 0xB324), ("skein512-40", 0xB325), ("skein512-48", 0xB326),
("skein512-56", 0xB327), ("skein512-64", 0xB328), ("skein512-72", 0xB329),
("skein512-80", 0xB32A), ("skein512-88", 0xB32B), ("skein512-96", 0xB32C),
("skein512-104", 0xB32D), ("skein512-112", 0xB32E), ("skein512-120", 0xB32F),
("skein512-128", 0xB330), ("skein512-136", 0xB331), ("skein512-144", 0xB332),
("skein512-152", 0xB333), ("skein512-160", 0xB334), ("skein512-168", 0xB335),
("skein512-176", 0xB336), ("skein512-184", 0xB337), ("skein512-192", 0xB338),
("skein512-200", 0xB339), ("skein512-208", 0xB33A), ("skein512-216", 0xB33B),
("skein512-224", 0xB33C), ("skein512-232", 0xB33D), ("skein512-240", 0xB33E),
("skein512-248", 0xB33F), ("skein512-256", 0xB340), ("skein512-264", 0xB341),
("skein512-272", 0xB342), ("skein512-280", 0xB343), ("skein512-288", 0xB344),
("skein512-296", 0xB345), ("skein512-304", 0xB346), ("skein512-312", 0xB347),
("skein512-320", 0xB348), ("skein512-328", 0xB349), ("skein512-336", 0xB34A),
("skein512-344", 0xB34B), ("skein512-352", 0xB34C), ("skein512-360", 0xB34D),
("skein512-368", 0xB34E), ("skein512-376", 0xB34F), ("skein512-384", 0xB350),
("skein512-392", 0xB351), ("skein512-400", 0xB352), ("skein512-408", 0xB353),
("skein512-416", 0xB354), ("skein512-424", 0xB355), ("skein512-432", 0xB356),
("skein512-440", 0xB357), ("skein512-448", 0xB358), ("skein512-456", 0xB359),
("skein512-464", 0xB35A), ("skein512-472", 0xB35B), ("skein512-480", 0xB35C),
("skein512-488", 0xB35D), ("skein512-496", 0xB35E), ("skein512-504", 0xB35F),
("skein512-512", 0xB360), ("skein1024-8", 0xB361), ("skein1024-16", 0xB362),
("skein1024-24", 0xB363), ("skein1024-32", 0xB364), ("skein1024-40", 0xB365),
("skein1024-48", 0xB366), ("skein1024-56", 0xB367), ("skein1024-64", 0xB368),
("skein1024-72", 0xB369), ("skein1024-80", 0xB36A), ("skein1024-88", 0xB36B),
("skein1024-96", 0xB36C), ("skein1024-104", 0xB36D),
("skein1024-112", 0xB36E), ("skein1024-120", 0xB36F),
("skein1024-128", 0xB370), ("skein1024-136", 0xB371),
("skein1024-144", 0xB372), ("skein1024-152", 0xB373),
("skein1024-160", 0xB374), ("skein1024-168", 0xB375),
("skein1024-176", 0xB376), ("skein1024-184", 0xB377),
("skein1024-192", 0xB378), ("skein1024-200", 0xB379),
("skein1024-208", 0xB37A), ("skein1024-216", 0xB37B),
("skein1024-224", 0xB37C), ("skein1024-232", 0xB37D),
("skein1024-240", 0xB37E), ("skein1024-248", 0xB37F),
("skein1024-256", 0xB380), ("skein1024-264", 0xB381),
("skein1024-272", 0xB382), ("skein1024-280", 0xB383),
("skein1024-288", 0xB384), ("skein1024-296", 0xB385),
("skein1024-304", 0xB386), ("skein1024-312", 0xB387),
("skein1024-320", 0xB388), ("skein1024-328", 0xB389),
("skein1024-336", 0xB38A), ("skein1024-344", 0xB38B),
("skein1024-352", 0xB38C), ("skein1024-360", 0xB38D),
("skein1024-368", 0xB38E), ("skein1024-376", 0xB38F),
("skein1024-384", 0xB390), ("skein1024-392", 0xB391),
("skein1024-400", 0xB392), ("skein1024-408", 0xB393),
("skein1024-416", 0xB394), ("skein1024-424", 0xB395),
("skein1024-432", 0xB396), ("skein1024-440", 0xB397),
("skein1024-448", 0xB398), ("skein1024-456", 0xB399),
("skein1024-464", 0xB39A), ("skein1024-472", 0xB39B),
("skein1024-480", 0xB39C), ("skein1024-488", 0xB39D),
("skein1024-496", 0xB39E), ("skein1024-504", 0xB39F),
("skein1024-512", 0xB3A0), ("skein1024-520", 0xB3A1),
("skein1024-528", 0xB3A2), ("skein1024-536", 0xB3A3),
("skein1024-544", 0xB3A4), ("skein1024-552", 0xB3A5),
("skein1024-560", 0xB3A6), ("skein1024-568", 0xB3A7),
("skein1024-576", 0xB3A8), ("skein1024-584", 0xB3A9),
("skein1024-592", 0xB3AA), ("skein1024-600", 0xB3AB),
("skein1024-608", 0xB3AC), ("skein1024-616", 0xB3AD),
("skein1024-624", 0xB3AE), ("skein1024-632", 0xB3AF),
("skein1024-640", 0xB3B0), ("skein1024-648", 0xB3B1),
("skein1024-656", 0xB3B2), ("skein1024-664", 0xB3B3),
("skein1024-672", 0xB3B4), ("skein1024-680", 0xB3B5),
("skein1024-688", 0xB3B6), ("skein1024-696", 0xB3B7),
("skein1024-704", 0xB3B8), ("skein1024-712", 0xB3B9),
("skein1024-720", 0xB3BA), ("skein1024-728", 0xB3BB),
("skein1024-736", 0xB3BC), ("skein1024-744", 0xB3BD),
("skein1024-752", 0xB3BE), ("skein1024-760", 0xB3BF),
("skein1024-768", 0xB3C0), ("skein1024-776", 0xB3C1),
("skein1024-784", 0xB3C2), ("skein1024-792", 0xB3C3),
("skein1024-800", 0xB3C4), ("skein1024-808", 0xB3C5),
("skein1024-816", 0xB3C6), ("skein1024-824", 0xB3C7),
("skein1024-832", 0xB3C8), ("skein1024-840", 0xB3C9),
("skein1024-848", 0xB3CA), ("skein1024-856", 0xB3CB),
("skein1024-864", 0xB3CC), ("skein1024-872", 0xB3CD),
("skein1024-880", 0xB3CE), ("skein1024-888", 0xB3CF),
("skein1024-896", 0xB3D0), ("skein1024-904", 0xB3D1),
("skein1024-912", 0xB3D2), ("skein1024-920", 0xB3D3),
("skein1024-928", 0xB3D4), ("skein1024-936", 0xB3D5),
("skein1024-944", 0xB3D6), ("skein1024-952", 0xB3D7),
("skein1024-960", 0xB3D8), ("skein1024-968", 0xB3D9),
("skein1024-976", 0xB3DA), ("skein1024-984", 0xB3DB),
("skein1024-992", 0xB3DC), ("skein1024-1000", 0xB3DD),
("skein1024-1008", 0xB3DE), ("skein1024-1016", 0xB3DF),
("skein1024-1024", 0xB3E0),
# multiaddrs
("ip4", 0x04),
@@ -385,11 +196,9 @@ const MultiCodecList = [
("p2p", 0x01A5),
("http", 0x01E0),
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE),
("wss", 0x01DE), # not in multicodec list
("p2p-websocket-star", 0x01DF), # not in multicodec list
("p2p-webrtc-star", 0x0113), # not in multicodec list
("p2p-webrtc-direct", 0x0114), # not in multicodec list
@@ -397,7 +206,6 @@ const MultiCodecList = [
("onion3", 0x01BD),
("p2p-circuit", 0x0122),
("libp2p-peer-record", 0x0301),
("memory", 0x0309),
("dns", 0x35),
("dns4", 0x36),
("dns6", 0x37),
@@ -405,7 +213,6 @@ const MultiCodecList = [
# IPLD formats
("dag-pb", 0x70),
("dag-cbor", 0x71),
("libp2p-key", 0x72),
("dag-json", 0x129),
("git-raw", 0x78),
("eth-block", 0x90),
@@ -429,7 +236,7 @@ const MultiCodecList = [
("dash-tx", 0xF1),
("torrent-info", 0x7B),
("torrent-file", 0x7C),
("ed25519-pub", 0xED),
("ed25519-pub", 0xED)
]
type
@@ -437,7 +244,8 @@ type
MultiCodecError* = enum
MultiCodecNotSupported
const InvalidMultiCodec* = MultiCodec(-1)
const
InvalidMultiCodec* = MultiCodec(-1)
proc initMultiCodecNameTable(): Table[string, int] {.compileTime.} =
for item in MultiCodecList:
@@ -484,6 +292,10 @@ proc `==`*(a, b: MultiCodec): bool =
## Returns ``true`` if MultiCodecs ``a`` and ``b`` are equal.
int(a) == int(b)
proc `!=`*(a, b: MultiCodec): bool =
## Returns ``true`` if MultiCodecs ``a`` and ``b`` are not equal.
int(a) != int(b)
proc hash*(m: MultiCodec): Hash {.inline.} =
## Hash procedure for tables.
hash(int(m))

View File

@@ -21,14 +21,16 @@
## 1. SKEIN
## 2. MURMUR
{.push raises: [].}
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
import varint, vbuffer, multicodec, multibase
import stew/base58
import results
import stew/results
export results
# This is workaround for Nim `import` bug.
export sha, sha2, keccak, blake2, hash, utils
@@ -42,9 +44,8 @@ const
ErrParseError = "Parse error fromHex"
type
MHashCoderProc* = proc(data: openArray[byte], output: var openArray[byte]) {.
nimcall, gcsafe, noSideEffect, raises: []
.}
MHashCoderProc* = proc(data: openArray[byte],
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
MHash* = object
mcodec*: MultiCodec
size*: int
@@ -60,152 +61,107 @@ type
proc identhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var length =
if len(data) > len(output):
len(output)
else:
len(data)
var length = if len(data) > len(output): len(output)
else: len(data)
copyMem(addr output[0], unsafeAddr data[0], length)
proc sha1hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha1.digest(data)
var length =
if sha1.sizeDigest > len(output):
len(output)
else:
sha1.sizeDigest
var length = if sha1.sizeDigest > len(output): len(output)
else: sha1.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc dblsha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest1 = sha256.digest(data)
var digest2 = sha256.digest(digest1.data)
var length =
if sha256.sizeDigest > len(output):
len(output)
else:
sha256.sizeDigest
var length = if sha256.sizeDigest > len(output): len(output)
else: sha256.sizeDigest
copyMem(addr output[0], addr digest2.data[0], length)
proc blake2Bhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_512.digest(data)
var length =
if blake2_512.sizeDigest > len(output):
len(output)
else:
blake2_512.sizeDigest
var length = if blake2_512.sizeDigest > len(output): len(output)
else: blake2_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc blake2Shash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_256.digest(data)
var length =
if blake2_256.sizeDigest > len(output):
len(output)
else:
blake2_256.sizeDigest
var length = if blake2_256.sizeDigest > len(output): len(output)
else: blake2_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha256.digest(data)
var length =
if sha256.sizeDigest > len(output):
len(output)
else:
sha256.sizeDigest
var length = if sha256.sizeDigest > len(output): len(output)
else: sha256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha512.digest(data)
var length =
if sha512.sizeDigest > len(output):
len(output)
else:
sha512.sizeDigest
var length = if sha512.sizeDigest > len(output): len(output)
else: sha512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_224.digest(data)
var length =
if sha3_224.sizeDigest > len(output):
len(output)
else:
sha3_224.sizeDigest
var length = if sha3_224.sizeDigest > len(output): len(output)
else: sha3_224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_256.digest(data)
var length =
if sha3_256.sizeDigest > len(output):
len(output)
else:
sha3_256.sizeDigest
var length = if sha3_256.sizeDigest > len(output): len(output)
else: sha3_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_384.digest(data)
var length =
if sha3_384.sizeDigest > len(output):
len(output)
else:
sha3_384.sizeDigest
var length = if sha3_384.sizeDigest > len(output): len(output)
else: sha3_384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_512.digest(data)
var length =
if sha3_512.sizeDigest > len(output):
len(output)
else:
sha3_512.sizeDigest
var length = if sha3_512.sizeDigest > len(output): len(output)
else: sha3_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak224.digest(data)
var length =
if keccak224.sizeDigest > len(output):
len(output)
else:
keccak224.sizeDigest
var length = if keccak224.sizeDigest > len(output): len(output)
else: keccak224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak256.digest(data)
var length =
if keccak256.sizeDigest > len(output):
len(output)
else:
keccak256.sizeDigest
var length = if keccak256.sizeDigest > len(output): len(output)
else: keccak256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak384.digest(data)
var length =
if keccak384.sizeDigest > len(output):
len(output)
else:
keccak384.sizeDigest
var length = if keccak384.sizeDigest > len(output): len(output)
else: keccak384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak512.digest(data)
var length =
if keccak512.sizeDigest > len(output):
len(output)
else:
keccak512.sizeDigest
var length = if keccak512.sizeDigest > len(output): len(output)
else: keccak512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc shake_128hash(data: openArray[byte], output: var openArray[byte]) =
@@ -226,135 +182,151 @@ proc shake_256hash(data: openArray[byte], output: var openArray[byte]) =
discard sctx.output(addr output[0], uint(len(output)))
sctx.clear()
const HashesList = [
MHash(mcodec: multiCodec("identity"), size: 0, coder: identhash),
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest, coder: sha1hash),
MHash(
mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest, coder: dblsha2_256hash
),
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest, coder: sha2_256hash),
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest, coder: sha2_512hash),
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest, coder: sha3_224hash),
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest, coder: sha3_256hash),
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest, coder: sha3_384hash),
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest, coder: sha3_512hash),
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
MHash(
mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest, coder: keccak_224hash
),
MHash(
mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest, coder: keccak_256hash
),
MHash(
mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest, coder: keccak_384hash
),
MHash(
mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest, coder: keccak_512hash
),
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash),
]
const
HashesList = [
MHash(mcodec: multiCodec("identity"), size: 0,
coder: identhash),
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest,
coder: sha1hash),
MHash(mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest,
coder: dblsha2_256hash
),
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest,
coder: sha2_256hash
),
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest,
coder: sha2_512hash
),
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest,
coder: sha3_224hash
),
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest,
coder: sha3_256hash
),
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest,
coder: sha3_384hash
),
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest,
coder: sha3_512hash
),
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
MHash(mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest,
coder: keccak_224hash
),
MHash(mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest,
coder: keccak_256hash
),
MHash(mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest,
coder: keccak_384hash
),
MHash(mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest,
coder: keccak_512hash
),
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash)
]
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
const
CodeHashes = initMultiHashCodeTable()
proc digestImplWithHash(hash: MHash, data: openArray[byte]): MultiHash =
var buffer: array[MaxHashSize, byte]
@@ -384,9 +356,8 @@ proc digestImplWithoutHash(hash: MHash, data: openArray[byte]): MultiHash =
result.data.writeArray(data)
result.data.finish()
proc digest*(
mhtype: typedesc[MultiHash], hashname: string, data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with name ``hashname`` on
## data array ``data``.
let mc = MultiCodec.codec(hashname)
@@ -399,9 +370,8 @@ proc digest*(
else:
ok(digestImplWithHash(hash, data))
proc digest*(
mhtype: typedesc[MultiHash], hashcode: int, data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with code ``hashcode`` on
## data array ``data``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -410,9 +380,8 @@ proc digest*(
else:
ok(digestImplWithHash(hash, data))
proc init*[T](
mhtype: typedesc[MultiHash], hashname: string, mdigest: MDigest[T]
): MhResult[MultiHash] {.inline.} =
proc init*[T](mhtype: typedesc[MultiHash], hashname: string,
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from nimcrypto's `MDigest` object and hash algorithm name
## ``hashname``.
let mc = MultiCodec.codec(hashname)
@@ -427,9 +396,8 @@ proc init*[T](
else:
ok(digestImplWithoutHash(hash, mdigest.data))
proc init*[T](
mhtype: typedesc[MultiHash], hashcode: MultiCodec, mdigest: MDigest[T]
): MhResult[MultiHash] {.inline.} =
proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from nimcrypto's `MDigest` and hash algorithm code
## ``hashcode``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -440,9 +408,8 @@ proc init*[T](
else:
ok(digestImplWithoutHash(hash, mdigest.data))
proc init*(
mhtype: typedesc[MultiHash], hashname: string, bdigest: openArray[byte]
): MhResult[MultiHash] {.inline.} =
proc init*(mhtype: typedesc[MultiHash], hashname: string,
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let mc = MultiCodec.codec(hashname)
@@ -457,9 +424,8 @@ proc init*(
else:
ok(digestImplWithoutHash(hash, bdigest))
proc init*(
mhtype: typedesc[MultiHash], hashcode: MultiCodec, bdigest: openArray[byte]
): MhResult[MultiHash] {.inline.} =
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -470,9 +436,8 @@ proc init*(
else:
ok(digestImplWithoutHash(hash, bdigest))
proc decode*(
mhtype: typedesc[MultiHash], data: openArray[byte], mhash: var MultiHash
): MhResult[int] =
proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
mhash: var MultiHash): MhResult[int] =
## Decode MultiHash value from array of bytes ``data``.
##
## On success decoded MultiHash will be stored into ``mhash`` and number of
@@ -511,10 +476,9 @@ proc decode*(
if not vb.isEnough(int(size)):
return err(ErrDecodeError)
mhash =
?MultiHash.init(
MultiCodec(code), vb.buffer.toOpenArray(vb.offset, vb.offset + int(size) - 1)
)
mhash = ? MultiHash.init(MultiCodec(code),
vb.buffer.toOpenArray(vb.offset,
vb.offset + int(size) - 1))
ok(vb.offset + int(size))
proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
@@ -547,27 +511,27 @@ proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
return false
result = true
proc init*(
mhtype: typedesc[MultiHash], data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
proc init*(mhtype: typedesc[MultiHash],
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from bytes array ``data``.
var hash: MultiHash
discard ?MultiHash.decode(data, hash)
discard ? MultiHash.decode(data, hash)
ok(hash)
proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inline.} =
## Create MultiHash from hexadecimal string representation ``data``.
var hash: MultiHash
try:
discard ?MultiHash.decode(fromHex(data), hash)
discard ? MultiHash.decode(fromHex(data), hash)
ok(hash)
except ValueError:
err(ErrParseError)
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
proc init58*(mhtype: typedesc[MultiHash],
data: string): MultiHash {.inline.} =
## Create MultiHash from BASE58 encoded string representation ``data``.
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
raise newException(MultihashError, "Incorrect MultiHash binary format")
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
if len(a) != len(b):
@@ -577,7 +541,7 @@ proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
while n > 0:
dec(n)
diff = int(a[n]) - int(b[n])
res = (res and - not (diff)) or diff
res = (res and -not(diff)) or diff
result = (res == 0)
proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
@@ -587,10 +551,8 @@ proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
return false
if len(mdigest.data) != mh.size:
return false
result = cmp(
mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
mdigest.data.toOpenArray(0, mdigest.data.high),
)
result = cmp(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
mdigest.data.toOpenArray(0, mdigest.data.high))
proc `==`*[T](mdigest: MDigest[T], mh: MultiHash): bool {.inline.} =
## Compares MultiHash with nimcrypto's MDigest[T], returns ``true`` if
@@ -606,10 +568,8 @@ proc `==`*(a: MultiHash, b: MultiHash): bool =
return false
if a.size != b.size:
return false
result = cmp(
a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1),
)
result = cmp(a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1))
proc hex*(value: MultiHash): string =
## Return hexadecimal string representation of MultiHash ``value``.
@@ -621,16 +581,16 @@ proc base58*(value: MultiHash): string =
proc `$`*(mh: MultiHash): string =
## Return string representation of MultiHash ``value``.
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1))
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos,
mh.dpos + mh.size - 1))
result = $(mh.mcodec) & "/" & digest
proc write*(vb: var VBuffer, mh: MultiHash) {.inline.} =
## Write MultiHash value ``mh`` to buffer ``vb``.
vb.writeArray(mh.data.buffer)
proc encode*(
mbtype: typedesc[MultiBase], encoding: string, mh: MultiHash
): string {.inline.} =
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
mh: MultiHash): string {.inline.} =
## Get MultiBase encoded representation of ``mh`` using encoding
## ``encoding``.
result = MultiBase.encode(encoding, mh.data.buffer)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,24 +7,29 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[strutils, sequtils, tables]
import chronos, chronicles, stew/byteutils
import stream/connection, protocols/protocol
import stream/connection,
protocols/protocol
logScope:
topics = "libp2p multistream"
const
MsgSize = 1024
Codec = "/multistream/1.0.0"
MsgSize* = 1024
Codec* = "/multistream/1.0.0"
Na = "na\n"
Ls = "ls\n"
MSCodec* = "\x13" & Codec & "\n"
Na* = "\x03na\n"
Ls* = "\x03ls\n"
type
Matcher* = proc(proto: string): bool {.gcsafe, raises: [].}
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
MultiStreamError* = object of LPError
@@ -39,20 +44,23 @@ type
codec*: string
proc new*(T: typedesc[MultistreamSelect]): T =
T(codec: Codec)
T(
codec: MSCodec,
)
template validateSuffix(str: string): untyped =
if str.endsWith("\n"):
str.removeSuffix("\n")
else:
raise (ref MultiStreamError)(msg: "MultistreamSelect failed, malformed message")
if str.endsWith("\n"):
str.removeSuffix("\n")
else:
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
proc select*(
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: seq[string]
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
trace "initiating handshake", conn, codec = Codec
proc select*(m: MultistreamSelect,
conn: Connection,
proto: seq[string]):
Future[string] {.async.} =
trace "initiating handshake", conn, codec = m.codec
## select a remote protocol
await conn.writeLp(Codec & "\n") # write handshake
await conn.write(m.codec) # write handshake
if proto.len() > 0:
trace "selecting proto", conn, proto = proto[0]
await conn.writeLp((proto[0] & "\n")) # select proto
@@ -62,7 +70,7 @@ proc select*(
if s != Codec:
notice "handshake failed", conn, codec = s
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
else:
trace "multistream handshake success", conn
@@ -78,7 +86,7 @@ proc select*(
return proto[0]
elif proto.len > 1:
# Try to negotiate alternatives
let protos = proto[1 ..< proto.len()]
let protos = proto[1..<proto.len()]
trace "selecting one of several protos", conn, protos = protos
for p in protos:
trace "selecting proto", conn, proto = p
@@ -94,31 +102,24 @@ proc select*(
# No alternatives, fail
return ""
proc select*(
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: string
): Future[bool] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
proc select*(m: MultistreamSelect,
conn: Connection,
proto: string): Future[bool] {.async.} =
if proto.len > 0:
(await MultistreamSelect.select(conn, @[proto])) == proto
return (await m.select(conn, @[proto])) == proto
else:
(await MultistreamSelect.select(conn, @[])) == Codec
return (await m.select(conn, @[])) == Codec
proc select*(
m: MultistreamSelect, conn: Connection
): Future[bool] {.
async: (raises: [CancelledError, LPStreamError, MultiStreamError], raw: true)
.} =
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
m.select(conn, "")
proc list*(
m: MultistreamSelect, conn: Connection
): Future[seq[string]] {.
async: (raises: [CancelledError, LPStreamError, MultiStreamError])
.} =
proc list*(m: MultistreamSelect,
conn: Connection): Future[seq[string]] {.async.} =
## list remote protos requests on connection
if not await m.select(conn):
return
await conn.writeLp(Ls) # send ls
await conn.write(Ls) # send ls
var list = newSeq[string]()
let ms = string.fromBytes(await conn.readLp(MsgSize))
@@ -128,148 +129,108 @@ proc list*(
result = list
proc handle*(
_: type MultistreamSelect,
conn: Connection,
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
var ms = string.fromBytes(await conn.readLp(MsgSize))
validateSuffix(ms)
if not handshaked and ms != Codec:
debug "expected handshake message", conn, instead = ms
raise (ref MultiStreamError)(
msg: "MultistreamSelect handling failed, invalid first message"
)
trace "handle: got request", conn, ms
if ms.len() <= 0:
trace "handle: invalid proto", conn
await conn.writeLp(Na)
case ms
of "ls":
trace "handle: listing protos", conn
#TODO this doens't seem to follow spec, each protocol
# should be length prefixed. Not very important
# since LS is getting deprecated
await conn.writeLp(protos.join("\n") & "\n")
of Codec:
if not handshaked:
await conn.writeLp(Codec & "\n")
handshaked = true
else:
trace "handle: sending `na` for duplicate handshake while handshaked", conn
await conn.writeLp(Na)
elif ms in protos or matchers.anyIt(it(ms)):
trace "found handler", conn, protocol = ms
await conn.writeLp(ms & "\n")
conn.protocol = ms
return ms
else:
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(
m: MultistreamSelect, conn: Connection, active: bool = false
) {.async: (raises: [CancelledError]).} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
trace "Starting multistream handler", conn, handshaked = active
var
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:
if h.match != nil:
matchers.add(h.match)
for proto in h.protos:
protos.add(proto)
var handshaked = active
try:
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
for h in m.handlers:
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
while not conn.atEof:
var ms = string.fromBytes(await conn.readLp(MsgSize))
validateSuffix(ms)
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
return
debug "no handlers", conn, ms
if not handshaked and ms != Codec:
notice "expected handshake message", conn, instead=ms
raise newException(CatchableError,
"MultistreamSelect handling failed, invalid first message")
trace "handle: got request", conn, ms
if ms.len() <= 0:
trace "handle: invalid proto", conn
await conn.write(Na)
if m.handlers.len() == 0:
trace "handle: sending `na` for protocol", conn, protocol = ms
await conn.write(Na)
continue
case ms:
of "ls":
trace "handle: listing protos", conn
var protos = ""
for h in m.handlers:
for proto in h.protos:
protos &= (proto & "\n")
await conn.writeLp(protos)
of Codec:
if not handshaked:
await conn.write(m.codec)
handshaked = true
else:
trace "handle: sending `na` for duplicate handshake while handshaked",
conn
await conn.write(Na)
else:
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await conn.writeLp(ms & "\n")
conn.protocol = ms
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
return
debug "no handlers", conn, protocol = ms
await conn.write(Na)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "Exception in multistream", conn, description = exc.msg
trace "Exception in multistream", conn, msg = exc.msg
finally:
await conn.close()
trace "Stopped multistream handler", conn
proc addHandler*(
m: MultistreamSelect,
codecs: seq[string],
protocol: LPProtocol,
matcher: Matcher = nil,
) =
proc addHandler*(m: MultistreamSelect,
codecs: seq[string],
protocol: LPProtocol,
matcher: Matcher = nil) =
trace "registering protocols", protos = codecs
m.handlers.add(HandlerHolder(protos: codecs, protocol: protocol, match: matcher))
m.handlers.add(HandlerHolder(protos: codecs,
protocol: protocol,
match: matcher))
proc addHandler*(
m: MultistreamSelect, codec: string, protocol: LPProtocol, matcher: Matcher = nil
) =
proc addHandler*(m: MultistreamSelect,
codec: string,
protocol: LPProtocol,
matcher: Matcher = nil) =
addHandler(m, @[codec], protocol, matcher)
proc addHandler*[E](
m: MultistreamSelect,
codec: string,
handler:
LPProtoHandler |
proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
matcher: Matcher = nil,
) =
proc addHandler*(m: MultistreamSelect,
codec: string,
handler: LPProtoHandler,
matcher: Matcher = nil) =
## helper to allow registering pure handlers
trace "registering proto handler", proto = codec
let protocol = new LPProtocol
protocol.codec = codec
protocol.handler = handler
m.handlers.add(HandlerHolder(protos: @[codec], protocol: protocol, match: matcher))
m.handlers.add(HandlerHolder(protos: @[codec],
protocol: protocol,
match: matcher))
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
let futs = m.handlers.mapIt(it.protocol.start())
try:
await allFutures(futs)
for fut in futs:
await fut
except CancelledError as exc:
var pending: seq[Future[void].Raising([])]
doAssert m.handlers.len == futs.len, "Handlers modified while starting"
for i, fut in futs:
if not fut.finished:
pending.add fut.cancelAndWait()
elif fut.completed:
pending.add m.handlers[i].protocol.stop()
else:
static:
doAssert typeof(fut).E is (CancelledError,)
await noCancel allFutures(pending)
raise exc
proc start*(m: MultistreamSelect) {.async.} =
await allFutures(m.handlers.mapIt(it.protocol.start()))
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
let futs = m.handlers.mapIt(it.protocol.stop())
await noCancel allFutures(futs)
for fut in futs:
await fut
proc stop*(m: MultistreamSelect) {.async.} =
await allFutures(m.handlers.mapIt(it.protocol.stop()))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,25 +7,35 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import pkg/[chronos, chronicles, stew/byteutils]
import ../../stream/connection, ../../utility, ../../varint, ../../vbuffer, ../muxer
import pkg/[chronos, nimcrypto/utils, chronicles, stew/byteutils]
import ../../stream/connection,
../../utility,
../../varint,
../../vbuffer,
../muxer
logScope:
topics = "libp2p mplexcoder"
type
MessageType* {.pure.} = enum
New
MsgIn
MsgOut
CloseIn
CloseOut
ResetIn
New,
MsgIn,
MsgOut,
CloseIn,
CloseOut,
ResetIn,
ResetOut
Msg* = tuple[id: uint64, msgType: MessageType, data: seq[byte]]
Msg* = tuple
id: uint64
msgType: MessageType
data: seq[byte]
InvalidMplexMsgType* = object of MuxerError
@@ -35,9 +45,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(
conn: Connection
): Future[Msg] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn
@@ -50,9 +58,10 @@ proc readMsg*(
return (header shr 3, MessageType(msgType), data)
proc writeMsg*(
conn: Connection, id: uint64, msgType: MessageType, data: seq[byte] = @[]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
proc writeMsg*(conn: Connection,
id: uint64,
msgType: MessageType,
data: seq[byte] = @[]): Future[void] =
var
left = data.len
offset = 0
@@ -60,11 +69,8 @@ proc writeMsg*(
# Split message into length-prefixed chunks
while left > 0 or data.len == 0:
let chunkSize =
if left > MaxMsgSize:
MaxMsgSize - 64
else:
left
let
chunkSize = if left > MaxMsgSize: MaxMsgSize - 64 else: left
buf.writePBVarint(id shl 3 or ord(msgType).uint64)
buf.writeSeq(data.toOpenArray(offset, offset + chunkSize - 1))
@@ -81,7 +87,8 @@ proc writeMsg*(
# message gets written before some of the chunks
conn.write(buf.buffer)
proc writeMsg*(
conn: Connection, id: uint64, msgType: MessageType, data: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
proc writeMsg*(conn: Connection,
id: uint64,
msgType: MessageType,
data: string): Future[void] =
conn.writeMsg(id, msgType, data.toBytes())

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,11 +7,17 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[oids, strformat]
import pkg/[chronos, chronicles, metrics]
import ./coder, ../muxer, ../../stream/[bufferstream, connection], ../../peerinfo
import pkg/[chronos, chronicles, metrics, nimcrypto/utils]
import ./coder,
../muxer,
../../stream/[bufferstream, connection, streamseq],
../../peerinfo
export connection
@@ -19,15 +25,13 @@ logScope:
topics = "libp2p mplexchannel"
when defined(libp2p_mplex_metrics):
declareHistogram libp2p_mplex_qlen,
"message queue length",
declareHistogram libp2p_mplex_qlen, "message queue length",
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
declareHistogram libp2p_mplex_qtime, "message queuing time"
when defined(libp2p_network_protocols_metrics):
declareCounter libp2p_protocols_bytes,
"total sent or received bytes", ["protocol", "direction"]
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
## Channel half-closed states
##
@@ -41,41 +45,39 @@ when defined(libp2p_network_protocols_metrics):
## EOF marker
const
MaxWrites = 1024
##\
MaxWrites = 1024 ##\
## Maximum number of in-flight writes - after this, we disconnect the peer
LPChannelTrackerName* = "LPChannel"
type LPChannel* = ref object of BufferStream
id*: uint64 # channel id
name*: string # name of the channel (for debugging)
conn*: Connection # wrapped connection used to for writing
initiator*: bool # initiated remotely or locally flag
isOpen*: bool # has channel been opened
closedLocal*: bool # has channel been closed locally
remoteReset*: bool # has channel been remotely reset
localReset*: bool # has channel been reset locally
msgCode*: MessageType # cached in/out message code
closeCode*: MessageType # cached in/out close code
resetCode*: MessageType # cached in/out reset code
writes*: int # In-flight writes
type
LPChannel* = ref object of BufferStream
id*: uint64 # channel id
name*: string # name of the channel (for debugging)
conn*: Connection # wrapped connection used to for writing
initiator*: bool # initiated remotely or locally flag
isOpen*: bool # has channel been opened
closedLocal*: bool # has channel been closed locally
remoteReset*: bool # has channel been remotely reset
localReset*: bool # has channel been reset locally
msgCode*: MessageType # cached in/out message code
closeCode*: MessageType # cached in/out close code
resetCode*: MessageType # cached in/out reset code
writes*: int # In-flight writes
writesBytes*: int # In-flight writes bytes
func shortLog*(s: LPChannel): auto =
try:
if s == nil:
"LPChannel(nil)"
if s.isNil: "LPChannel(nil)"
elif s.name != $s.oid and s.name.len > 0:
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
else:
&"{shortLog(s.conn.peerId)}:{s.oid}"
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
raise newException(Defect, exc.msg)
chronicles.formatIt(LPChannel):
shortLog(it)
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
proc open*(s: LPChannel) {.async, gcsafe.} =
trace "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
@@ -84,20 +86,20 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
s.isOpen = true
except CancelledError as exc:
raise exc
except LPStreamError as exc:
except CatchableError as exc:
await s.conn.close()
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
raise exc
method closed*(s: LPChannel): bool =
s.closedLocal
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
## Channels may be closed for reading and writing in any order - we'll close
## the underlying bufferstream when both directions are closed
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async: (raises: []).} =
proc reset*(s: LPChannel) {.async, gcsafe.} =
if s.isClosed:
trace "Already closed", s
return
@@ -110,21 +112,22 @@ proc reset*(s: LPChannel) {.async: (raises: []).} =
if s.isOpen and not s.conn.isClosed:
# If the connection is still active, notify the other end
proc resetMessage() {.async: (raises: []).} =
proc resetMessage() {.async.} =
try:
trace "sending reset message", s, conn = s.conn
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
except LPStreamError as exc:
trace "Can't send reset message", s, conn = s.conn, description = exc.msg
await s.conn.writeMsg(s.id, s.resetCode) # write reset
except CatchableError as exc:
# No cancellations
await s.conn.close()
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
asyncSpawn resetMessage()
await s.closeImpl()
await s.closeImpl() # noraises, nocancels
trace "Channel reset", s
method close*(s: LPChannel) {.async: (raises: []).} =
method close*(s: LPChannel) {.async, gcsafe.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.
@@ -138,41 +141,38 @@ method close*(s: LPChannel) {.async: (raises: []).} =
if s.isOpen and not s.conn.isClosed:
try:
await s.conn.writeMsg(s.id, s.closeCode) # write close
except CancelledError:
except CancelledError as exc:
await s.conn.close()
except LPStreamError as exc:
raise exc
except CatchableError as exc:
# It's harmless that close message cannot be sent - the connection is
# likely down already
await s.conn.close()
trace "Cannot send close message", s, id = s.id, description = exc.msg
trace "Cannot send close message", s, id = s.id, msg = exc.msg
await s.closeUnderlying() # maybe already eofed
trace "Closed channel", s, len = s.len
method closeWrite*(s: LPChannel) {.async: (raises: []).} =
## For mplex, closeWrite is the same as close - it implements half-close
await s.close()
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
trace "Idle timeout expired, resetting LPChannel", s
s.reset()
procCall BufferStream(s).initStream()
method readOnce*(
s: LPChannel, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
method readOnce*(s: LPChannel,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
## Mplex relies on reading being done regularly from every channel, or all
## channels are blocked - in particular, this means that reading from one
## channel must not be done from within a callback / read handler of another
## or the reads will lock each other.
if s.remoteReset:
trace "reset stream in readOnce", s
raise newLPStreamResetError()
if s.localReset:
raise newLPStreamClosedError()
@@ -184,28 +184,24 @@ method readOnce*(
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
when defined(libp2p_network_protocols_metrics):
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(bytes.int64, labelValues = [s.protocol, "in"])
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
trace "readOnce", s, bytes
if bytes == 0:
await s.closeUnderlying()
return bytes
except CancelledError as exc:
await s.reset()
raise exc
except LPStreamError as exc:
# Resetting is necessary because data has been lost in s.readBuf and
# there's no way to gracefully recover / use the channel any more
except CatchableError as exc:
# readOnce in BufferStream generally raises on EOF or cancellation - for
# the former, resetting is harmless, for the latter it's necessary because
# data has been lost in s.readBuf and there's no way to gracefully recover /
# use the channel any more
await s.reset()
raise newLPStreamConnDownError(exc)
proc prepareWrite(
s: LPChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.remoteReset:
trace "stream is reset when prepareWrite", s
raise newLPStreamResetError()
if s.closedLocal:
raise newLPStreamClosedError()
@@ -219,7 +215,7 @@ proc prepareWrite(
debug "Closing connection, too many in-flight writes on channel",
s, conn = s.conn, writes = s.writes
when defined(libp2p_mplex_metrics):
libp2p_mplex_qlenclose.inc()
libp2p_mplex_qlenclose.inc()
await s.reset()
await s.conn.close()
return
@@ -230,12 +226,10 @@ proc prepareWrite(
await s.conn.writeMsg(s.id, s.msgCode, msg)
proc completeWrite(
s: LPChannel,
fut: Future[void].Raising([CancelledError, LPStreamError]),
msgLen: int,
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
try:
s.writes += 1
s.writesBytes += msgLen
when defined(libp2p_mplex_metrics):
libp2p_mplex_qlen.observe(s.writes.int64 - 1)
@@ -244,11 +238,9 @@ proc completeWrite(
else:
await fut
when defined(libp2p_network_protocols_metrics):
when defined(libp2p_network_protocol_metrics):
if s.protocol.len > 0:
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
# https://github.com/status-im/nim-metrics/issues/79
libp2p_protocols_bytes.inc(msgLen.int64, labelValues = [s.protocol, "out"])
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
s.activity = true
except CancelledError as exc:
@@ -260,21 +252,24 @@ proc completeWrite(
raise exc
except LPStreamEOFError as exc:
raise exc
except LPStreamError as exc:
trace "exception in lpchannel write handler", s, description = exc.msg
except CatchableError as exc:
trace "exception in lpchannel write handler", s, msg = exc.msg
await s.reset()
await s.conn.close()
raise newLPStreamConnDownError(exc)
finally:
s.writes -= 1
s.writesBytes -= msgLen
method write*(
s: LPChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
method queuedSendBytes*(channel: LPChannel): int = channel.writesBytes
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
## Write to mplex channel - there may be up to MaxWrite concurrent writes
## pending after which the peer is disconnected
let closed = s.closedLocal or s.conn.closed
let
closed = s.closedLocal or s.conn.closed
let fut =
if (not closed) and msg.len > 0 and s.writes < MaxWrites and s.isOpen:
@@ -287,17 +282,16 @@ method write*(
s.completeWrite(fut, msg.len)
method getWrapped*(s: LPChannel): Connection =
s.conn
method getWrapped*(s: LPChannel): Connection = s.conn
proc init*(
L: type LPChannel,
id: uint64,
conn: Connection,
initiator: bool,
name: string = "",
timeout: Duration = DefaultChanTimeout,
): LPChannel =
L: type LPChannel,
id: uint64,
conn: Connection,
initiator: bool,
name: string = "",
timeout: Duration = DefaultChanTimeout): LPChannel =
let chann = L(
id: id,
name: name,
@@ -308,17 +302,12 @@ proc init*(
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
dir: if initiator: Direction.Out else: Direction.In,
)
dir: if initiator: Direction.Out else: Direction.In)
chann.initStream()
when chronicles.enabledLogLevel == LogLevel.TRACE:
chann.name =
if chann.name.len > 0:
chann.name
else:
$chann.oid
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
trace "Created new lpchannel", s = chann, id, initiator

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,18 +7,20 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables, sequtils, oids
import chronos, chronicles, stew/byteutils, metrics
import
../muxer,
../../stream/connection,
../../stream/bufferstream,
../../utility,
../../peerinfo,
./coder,
./lpchannel
import ../muxer,
../../stream/connection,
../../stream/bufferstream,
../../utility,
../../peerinfo,
./coder,
./lpchannel
export muxer
@@ -27,10 +29,12 @@ logScope:
const MplexCodec* = "/mplex/6.7.0"
const MaxChannelCount = 200
const
MaxChannelCount = 200
when defined(libp2p_expensive_metrics):
declareGauge(libp2p_mplex_channels, "mplex channels", labels = ["initiator", "peer"])
declareGauge(libp2p_mplex_channels,
"mplex channels", labels = ["initiator", "peer"])
type
InvalidChannelIdError* = object of MuxerError
@@ -47,8 +51,7 @@ type
func shortLog*(m: Mplex): auto =
shortLog(m.connection)
chronicles.formatIt(Mplex):
shortLog(it)
chronicles.formatIt(Mplex): shortLog(it)
proc newTooManyChannels(): ref TooManyChannels =
newException(TooManyChannels, "max allowed channel count exceeded")
@@ -56,7 +59,7 @@ proc newTooManyChannels(): ref TooManyChannels =
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
newException(InvalidChannelIdError, "max allowed channel count exceeded")
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
## remove the local channel from the internal tables
##
try:
@@ -67,35 +70,34 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.channels[chann.initiator].len.int64,
labelValues = [$chann.initiator, $m.connection.peerId],
)
except CancelledError as exc:
warn "Error cleaning up mplex channel", m, chann, description = exc.msg
labelValues = [$chann.initiator, $m.connection.peerId])
except CatchableError as exc:
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
proc newStreamInternal*(
m: Mplex,
initiator: bool = true,
chanId: uint64 = 0,
name: string = "",
timeout: Duration,
): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
proc newStreamInternal*(m: Mplex,
initiator: bool = true,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel
{.gcsafe, raises: [Defect, InvalidChannelIdError].} =
## create new channel/stream
##
let id =
if initiator:
m.currentId.inc()
m.currentId
else:
chanId
let id = if initiator:
m.currentId.inc(); m.currentId
else: chanId
if id in m.channels[initiator]:
raise newInvalidChannelIdError()
result = LPChannel.init(id, m.connection, initiator, name, timeout = timeout)
result = LPChannel.init(
id,
m.connection,
initiator,
name,
timeout = timeout)
result.peerId = m.connection.peerId
result.observedAddr = m.connection.observedAddr
result.localAddr = m.connection.localAddr
result.transportDir = m.connection.transportDir
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
@@ -109,17 +111,21 @@ proc newStreamInternal*(
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.channels[initiator].len.int64, labelValues = [$initiator, $m.connection.peerId]
)
m.channels[initiator].len.int64,
labelValues = [$initiator, $m.connection.peerId])
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
## call the muxer stream handler for this channel
##
await m.streamHandler(chann)
trace "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
try:
await m.streamHandler(chann)
trace "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async: (raises: []).} =
method handle*(m: Mplex) {.async, gcsafe.} =
trace "Starting mplex handler", m
try:
while not m.connection.atEof:
@@ -147,7 +153,7 @@ method handle*(m: Mplex) {.async: (raises: []).} =
else:
if m.channels[false].len > m.maxChannCount - 1:
warn "too many channels created by remote peer",
allowedMax = MaxChannelCount, m
allowedMax = MaxChannelCount, m
raise newTooManyChannels()
let name = string.fromBytes(data)
@@ -155,64 +161,59 @@ method handle*(m: Mplex) {.async: (raises: []).} =
trace "Processing channel message", m, channel, data = data.shortLog
case msgType
of MessageType.New:
trace "created channel", m, channel
case msgType:
of MessageType.New:
trace "created channel", m, channel
if m.streamHandler != nil:
# Launch handler task
# All the errors are handled inside `handleStream()` procedure.
asyncSpawn m.handleStream(channel)
of MessageType.MsgIn, MessageType.MsgOut:
if data.len > MaxMsgSize:
warn "attempting to send a packet larger than allowed",
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
if not isNil(m.streamHandler):
# Launch handler task
# All the errors are handled inside `handleStream()` procedure.
asyncSpawn m.handleStream(channel)
trace "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed",
m, channel, len = data.len, description = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
of MessageType.ResetIn, MessageType.ResetOut:
channel.remoteReset = true
await channel.reset()
of MessageType.MsgIn, MessageType.MsgOut:
if data.len > MaxMsgSize:
warn "attempting to send a packet larger than allowed",
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
of MessageType.ResetIn, MessageType.ResetOut:
channel.remoteReset = true
await channel.reset()
except CancelledError:
debug "Unexpected cancellation in mplex handler", m
except LPStreamEOFError as exc:
trace "Stream EOF", m, description = exc.msg
except LPStreamError as exc:
debug "Unexpected stream exception in mplex read loop", m, description = exc.msg
except MuxerError as exc:
debug "Unexpected muxer exception in mplex read loop", m, description = exc.msg
trace "Stream EOF", m, msg = exc.msg
except CatchableError as exc:
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
finally:
await m.close()
trace "Stopped mplex handler", m
proc new*(
M: type Mplex,
conn: Connection,
inTimeout: Duration = DefaultChanTimeout,
outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount,
): Mplex =
M(
connection: conn,
proc new*(M: type Mplex,
conn: Connection,
inTimeout, outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount): Mplex =
M(connection: conn,
inChannTimeout: inTimeout,
outChannTimeout: outTimeout,
oid: genOid(),
maxChannCount: maxChannCount,
)
maxChannCount: maxChannCount)
method newStream*(
m: Mplex, name: string = "", lazy: bool = false
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -220,7 +221,7 @@ method newStream*(
return Connection(channel)
method close*(m: Mplex) {.async: (raises: []).} =
method close*(m: Mplex) {.async, gcsafe.} =
if m.isClosed:
trace "Already closed", m
return
@@ -247,9 +248,3 @@ method close*(m: Mplex) {.async: (raises: []).} =
m.channels[true].clear()
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
for c in m.channels[false].values:
result.add(c)
for c in m.channels[true].values:
result.add(c)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,69 +7,86 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import chronos, chronicles
import ../stream/connection, ../errors
import ../protocols/protocol,
../stream/connection,
../errors
logScope:
topics = "libp2p muxer"
const DefaultChanTimeout* = 5.minutes
const
DefaultChanTimeout* = 5.minutes
type
MuxerError* = object of LPError
TooManyChannels* = object of MuxerError
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [Defect].}
Muxer* = ref object of RootObj
streamHandler*: StreamHandler
handler*: Future[void].Raising([])
connection*: Connection
# user provider proc that returns a constructed Muxer
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = object
MuxerProvider* = ref object of LPProtocol
newMuxer*: MuxerConstructor
codec*: string
streamHandler*: StreamHandler # triggered every time there is a new stream, called for any muxer instance
muxerHandler*: MuxerHandler # triggered every time there is a new muxed connection created
func shortLog*(m: Muxer): auto =
if m == nil:
"nil"
else:
shortLog(m.connection)
chronicles.formatIt(Muxer):
shortLog(it)
func shortLog*(m: Muxer): auto = shortLog(m.connection)
chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(
m: Muxer, name: string = "", lazy: bool = false
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("[Muxer.newStream] abstract method not implemented!")
when defined(libp2p_agents_metrics):
method setShortAgent*(m: Muxer, shortAgent: string) {.base, gcsafe.} =
m.connection.shortAgent = shortAgent
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} =
discard
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
proc new*(
T: typedesc[MuxerProvider], creator: MuxerConstructor, codec: string
): T {.gcsafe.} =
let muxerProvider = T(newMuxer: creator, codec: codec)
T: typedesc[MuxerProvider],
creator: MuxerConstructor,
codec: string): T {.gcsafe.} =
let muxerProvider = T(newMuxer: creator)
muxerProvider.codec = codec
muxerProvider.init()
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
raiseAssert("[Muxer.getStreams] abstract method not implemented!")
method init(c: MuxerProvider) =
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
trace "starting muxer handler", proto=proto, conn
try:
let
muxer = c.newMuxer(conn)
if not isNil(c.streamHandler):
muxer.streamHandler = c.streamHandler
var futs = newSeq[Future[void]]()
futs &= muxer.handle()
# finally await both the futures
if not isNil(c.muxerHandler):
await c.muxerHandler(muxer)
when defined(libp2p_agents_metrics):
conn.shortAgent = muxer.connection.shortAgent
checkFutures(await allFinished(futs))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in muxer handler", exc = exc.msg, conn, proto
finally:
await conn.close()
c.handler = handler

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,12 +7,15 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
import ../muxer, ../../stream/connection
import ../../utils/[zeroqueue, sequninit]
import ../muxer,
../../stream/connection
export muxer
@@ -22,21 +25,18 @@ logScope:
const
YamuxCodec* = "/yamux/1.0.0"
YamuxVersion = 0.uint8
YamuxDefaultWindowSize* = 256000
MaxSendQueueSize = 256000
DefaultWindowSize = 256000
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
declareGauge libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"]
declareHistogram libp2p_yamux_send_queue,
"message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue,
"message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
type
YamuxError* = object of MuxerError
YamuxError* = object of CatchableError
MsgType = enum
Data = 0x0
@@ -51,9 +51,9 @@ type
Rst
GoAwayStatus = enum
NormalTermination = 0x0
ProtocolError = 0x1
InternalError = 0x2
NormalTermination = 0x0,
ProtocolError = 0x1,
InternalError = 0x2,
YamuxHeader = object
version: uint8
@@ -62,264 +62,214 @@ type
streamId: uint32
length: uint32
proc readHeader(
conn: LPStream
): Future[YamuxHeader] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
result.version = buffer[0]
let flags = fromBytesBE(uint16, buffer[2 .. 3])
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16 .. 15'u16:
raise newException(YamuxError, "Wrong header")
let flags = fromBytesBE(uint16, buffer[2..3])
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16..15'u16:
raise newException(YamuxError, "Wrong header")
result.flags = cast[set[MsgFlags]](flags)
result.streamId = fromBytesBE(uint32, buffer[4 .. 7])
result.length = fromBytesBE(uint32, buffer[8 .. 11])
result.streamId = fromBytesBE(uint32, buffer[4..7])
result.length = fromBytesBE(uint32, buffer[8..11])
return result
proc `$`(header: YamuxHeader): string =
"{" & $header.msgType & ", " & "{" &
header.flags.foldl(
if a != "":
a & ", " & $b
else:
$b,
"",
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
"}"
result = "{" & $header.msgType & ", "
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
result &= "streamId: " & $header.streamId & ", "
result &= "length: " & $header.length & "}"
proc encode(header: YamuxHeader): array[12, byte] =
result[0] = header.version
result[1] = uint8(header.msgType)
result[2 .. 3] = toBytesBE(uint16(cast[uint8](header.flags)))
# workaround https://github.com/nim-lang/Nim/issues/21789
result[4 .. 7] = toBytesBE(header.streamId)
result[8 .. 11] = toBytesBE(header.length)
result[2..3] = toBytesBE(cast[uint16](header.flags))
result[4..7] = toBytesBE(header.streamId)
result[8..11] = toBytesBE(header.length)
proc write(
conn: LPStream, header: YamuxHeader
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
trace "write directly on stream", h = $header
var buffer = header.encode()
conn.write(@buffer)
return conn.write(@buffer)
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
T(version: YamuxVersion, msgType: MsgType.Ping, flags: {flag}, length: pingData)
T(
version: YamuxVersion,
msgType: MsgType.Ping,
flags: {flag},
length: pingData
)
proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
T(version: YamuxVersion, msgType: MsgType.GoAway, length: uint32(status))
T(
version: YamuxVersion,
msgType: MsgType.GoAway,
length: uint32(status)
)
proc data(
T: type[YamuxHeader],
streamId: uint32,
length: uint32 = 0,
flags: set[MsgFlags] = {},
): T =
T: type[YamuxHeader],
streamId: uint32,
length: uint32 = 0,
flags: set[MsgFlags] = {},
): T =
T(
version: YamuxVersion,
msgType: MsgType.Data,
length: length,
flags: flags,
streamId: streamId,
streamId: streamId
)
proc windowUpdate(
T: type[YamuxHeader], streamId: uint32, delta: uint32, flags: set[MsgFlags] = {}
): T =
T: type[YamuxHeader],
streamId: uint32,
delta: uint32,
flags: set[MsgFlags] = {},
): T =
T(
version: YamuxVersion,
msgType: MsgType.WindowUpdate,
length: delta,
flags: flags,
streamId: streamId,
streamId: streamId
)
type
ToSend = ref object
ToSend = tuple
data: seq[byte]
sent: int
fut: Future[void].Raising([CancelledError, LPStreamError])
fut: Future[void]
YamuxChannel* = ref object of Connection
id: uint32
recvWindow: int
sendWindow: int
maxRecvWindow: int
maxSendQueueSize: int
conn: Connection
isSrc: bool
opened: bool
isSending: bool
sendQueue: seq[ToSend]
recvQueue: ZeroQueue
recvQueue: seq[byte]
isReset: bool
remoteReset: bool
closedRemotely: AsyncEvent
closedRemotely: Future[void]
closedLocally: bool
receivedData: AsyncEvent
returnedEof: bool
proc `$`(channel: YamuxChannel): string =
result = if channel.conn.dir == Out: "=> " else: "<= "
result &= $channel.id
var s: seq[string] = @[]
if channel.closedRemotely.isSet():
if channel.closedRemotely.done():
s.add("ClosedRemotely")
if channel.closedLocally:
s.add("ClosedLocally")
if channel.isReset:
s.add("Reset")
if s.len > 0:
result &=
" {" &
s.foldl(
if a != "":
a & ", " & b
else:
b,
"",
) & "}"
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
proc lengthSendQueue(channel: YamuxChannel): int =
## Returns the length of what remains to be sent
##
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
for (elem, sent, _) in channel.sendQueue:
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
## Returns the length of what remains to be sent, but limit the size of big messages.
##
# For leniency, limit big messages size to the third of maxSendQueueSize
# This value is arbitrary, it's not in the specs, it permits to store up to
# 3 big messages if the peer is stalling.
channel.sendQueue.foldl(
a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0
)
method queuedSendBytes*(channel: YamuxChannel): int = channel.sendQueueBytes()
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
proc actuallyClose(channel: YamuxChannel) {.async.} =
if channel.closedLocally and channel.sendQueue.len == 0 and
channel.closedRemotely.isSet():
channel.closedRemotely.done():
await procCall Connection(channel).closeImpl()
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedRemotely.isSet():
channel.closedRemotely.fire()
proc remoteClosed(channel: YamuxChannel) {.async.} =
if not channel.closedRemotely.done():
channel.closedRemotely.complete()
await channel.actuallyClose()
channel.isClosedRemotely = true
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
if not channel.closedLocally:
trace "Closing yamux channel locally", streamId = channel.id, conn = channel.conn
channel.closedLocally = true
if not channel.isReset and channel.sendQueue.len == 0:
try:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
except CancelledError, LPStreamError:
discard
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
await channel.actuallyClose()
method closeWrite*(channel: YamuxChannel) {.async: (raises: []).} =
## For yamux, closeWrite is the same as close - it implements half-close
await channel.close()
proc clearQueues(channel: YamuxChannel, error: ref LPStreamEOFError = nil) =
for toSend in channel.sendQueue:
if error.isNil():
toSend.fut.complete()
else:
toSend.fut.fail(error)
channel.sendQueue = @[]
channel.recvQueue.clear()
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
# it receives the reset.
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
if channel.isReset:
return
trace "Reset channel"
channel.isReset = true
channel.remoteReset = not isLocal
channel.clearQueues(newLPStreamEOFError())
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.recvQueue = @[]
channel.sendWindow = 0
if not channel.closedLocally:
if isLocal and not channel.isSending:
try:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
except CancelledError, LPStreamError:
discard
if isLocal:
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
except LPStreamEOFError as exc: discard
except LPStreamClosedError as exc: discard
await channel.close()
if not channel.closedRemotely.isSet():
if not channel.closedRemotely.done():
await channel.remoteClosed()
channel.receivedData.fire()
if not isLocal:
# If the reset is remote, there's no reason to flush anything.
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. We use the recvWindow in the proc cleanupChann.
channel.recvWindow = 0
proc updateRecvWindow(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
## Send to the peer a window update when the recvWindow is empty enough
##
# In order to avoid spamming a window update everytime a byte is read,
# we send it everytime half of the maxRecvWindow is read.
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
let inWindow = channel.recvWindow + channel.recvQueue.len
if inWindow > channel.maxRecvWindow div 2:
return
let delta = channel.maxRecvWindow - inWindow
channel.recvWindow.inc(delta.int)
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
channel.recvWindow.inc(delta)
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
delta.uint32
))
trace "increasing the recvWindow", delta
method readOnce*(
channel: YamuxChannel, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
## Read from a yamux channel
channel: YamuxChannel,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
if channel.isReset:
raise
if channel.remoteReset:
trace "stream is remote reset when readOnce", channel = $channel
raise if channel.remoteReset:
newLPStreamResetError()
elif channel.closedLocally:
trace "stream is closed locally when readOnce", channel = $channel
newLPStreamClosedError()
else:
trace "stream is down when readOnce", channel = $channel
newLPStreamConnDownError()
if channel.isEof:
channel.clearQueues()
if channel.returnedEof:
raise newLPStreamRemoteClosedError()
if channel.recvQueue.isEmpty():
if channel.recvQueue.len == 0:
channel.receivedData.clear()
let
closedRemotelyFut = channel.closedRemotely.wait()
receivedDataFut = channel.receivedData.wait()
defer:
if not closedRemotelyFut.finished():
await closedRemotelyFut.cancelAndWait()
if not receivedDataFut.finished():
await receivedDataFut.cancelAndWait()
await closedRemotelyFut or receivedDataFut
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
channel.isEof = true
channel.clearQueues()
return
0 # we return 0 to indicate that the channel is closed for reading from now on
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
return 0
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
let toRead = min(channel.recvQueue.len, nbytes)
var p = cast[ptr UncheckedArray[byte]](pbytes)
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
channel.recvQueue = channel.recvQueue[toRead..^1]
# We made some room in the recv buffer let the peer know
await channel.updateRecvWindow()
channel.activity = true
return consumed
return toRead
proc gotDataFromRemote(
channel: YamuxChannel, b: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
channel.recvWindow -= b.len
channel.recvQueue.push(b)
channel.recvQueue = channel.recvQueue.concat(b)
channel.receivedData.fire()
when defined(libp2p_yamux_metrics):
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
@@ -328,319 +278,219 @@ proc gotDataFromRemote(
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
channel.maxRecvWindow = maxRecvWindow
proc sendLoop(channel: YamuxChannel) {.async: (raises: []).} =
proc trySend(channel: YamuxChannel) {.async.} =
if channel.isSending:
return
channel.isSending = true
defer:
channel.isSending = false
const NumBytesHeader = 12
while channel.sendQueue.len > 0:
channel.sendQueue.keepItIf(not it.fut.finished())
defer: channel.isSending = false
while channel.sendQueue.len != 0:
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
if channel.sendWindow == 0:
trace "trying to send while the sendWindow is empty"
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
trace "channel send queue too big, resetting",
maxSendQueueSize = channel.maxSendQueueSize,
currentQueueSize = channel.lengthSendQueueWithLimit()
await channel.reset(isLocal = true)
trace "send window empty"
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
currentQueueSize = channel.sendQueueBytes(true)
try:
await channel.reset(true)
except CatchableError as exc:
debug "failed to reset", msg=exc.msg
break
let
bytesAvailable = channel.lengthSendQueue()
numBytesToSend = min(channel.sendWindow, bytesAvailable)
bytesAvailable = channel.sendQueueBytes()
toSend = min(channel.sendWindow, bytesAvailable)
var
sendBuffer = newSeqUninit[byte](NumBytesHeader + numBytesToSend)
header = YamuxHeader.data(channel.id, numBytesToSend.uint32)
sendBuffer = newSeqUninitialized[byte](toSend + 12)
header = YamuxHeader.data(channel.id, toSend.uint32)
inBuffer = 0
if numBytesToSend >= bytesAvailable and channel.closedLocally:
trace "last buffer we will send on this channel", numBytesToSend, bytesAvailable
if toSend >= bytesAvailable and channel.closedLocally:
trace "last buffer we'll sent on this channel", toSend, bytesAvailable
header.flags.incl({Fin})
sendBuffer[0 ..< NumBytesHeader] = header.encode()
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
while inBuffer < numBytesToSend:
var toSend = channel.sendQueue[0]
# concatenate the different message we try to send into one buffer
let bufferToSend = min(toSend.data.len - toSend.sent, numBytesToSend - inBuffer)
sendBuffer.toOpenArray(NumBytesHeader, NumBytesHeader + numBytesToSend - 1)[
inBuffer ..< (inBuffer + bufferToSend)
] = toSend.data.toOpenArray(toSend.sent, toSend.sent + bufferToSend - 1)
sendBuffer[0..<12] = header.encode()
var futures: seq[Future[void]]
while inBuffer < toSend:
let (data, sent, fut) = channel.sendQueue[0]
let bufferToSend = min(data.len - sent, toSend - inBuffer)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
channel.sendQueue[0].sent.inc(bufferToSend)
if toSend.sent >= toSend.data.len:
# if every byte of the message is in the buffer, add the write future to the
# sequence of futures to be completed (or failed) when the buffer is sent
futures.add(toSend.fut)
if channel.sendQueue[0].sent >= data.len:
futures.add(fut)
channel.sendQueue.delete(0)
inBuffer.inc(bufferToSend)
trace "try to send the buffer", h = $header
try:
await channel.conn.write(sendBuffer)
channel.sendWindow.dec(inBuffer)
except CancelledError:
## Just for compiler. This should never happen as sendLoop is started by asyncSpawn.
## Therefore, no one owns that sendLoop's future and no one can cancel it.
discard
except LPStreamError as exc:
error "failed to send the buffer", description = exc.msg
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
channel.sendWindow.dec(toSend)
try: await channel.conn.write(sendBuffer)
except CatchableError as exc:
let connDown = newLPStreamConnDownError(exc)
for fut in futures:
for fut in futures.items():
fut.fail(connDown)
await channel.reset()
break
for fut in futures:
for fut in futures.items():
fut.complete()
channel.activity = true
method write*(
channel: YamuxChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
## Write to yamux channel
##
var resFut = newFuture[void]("Yamux Send")
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
trace "stream is reset when write", channel = $channel
resFut.fail(newLPStreamResetError())
return resFut
result.fail(newLPStreamResetError())
return result
if channel.closedLocally or channel.isReset:
resFut.fail(newLPStreamClosedError())
return resFut
result.fail(newLPStreamClosedError())
return result
if msg.len == 0:
resFut.complete()
return resFut
channel.sendQueue.add(ToSend(data: msg, sent: 0, fut: resFut))
result.complete()
return result
channel.sendQueue.add((msg, 0, result))
when defined(libp2p_yamux_metrics):
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
asyncSpawn channel.trySend()
asyncSpawn channel.sendLoop()
return resFut
proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
## Open a yamux channel by sending a window update with Syn or Ack flag
##
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
if channel.opened:
trace "Try to open channel twice"
return
channel.opened = true
channel.isReset = false
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
await channel.conn.write(
YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack},
)
)
method getWrapped*(channel: YamuxChannel): Connection =
channel.conn
type Yamux* = ref object of Muxer
channels: Table[uint32, YamuxChannel]
flushed: Table[uint32, int]
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
inTimeout: Duration
outTimeout: Duration
type
Yamux* = ref object of Muxer
channels: Table[uint32, YamuxChannel]
flushed: Table[uint32, int]
currentId: uint32
isClosed: bool
maxChannCount: int
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
if v.isSrc == isSrc:
result += 1
if v.isSrc == isSrc: result += 1
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
try:
await channel.join()
except CancelledError:
discard
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
await channel.join()
m.channels.del(channel.id)
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId]
)
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(
m: Yamux, id: uint32, isSrc: bool, recvWindow: int, maxSendQueueSize: int
): YamuxChannel =
# During initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
# To solve this contradiction, no updateWindow will be sent until
# recvWindow is less than maxRecvWindow
var stream = YamuxChannel(
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
result = YamuxChannel(
id: id,
maxRecvWindow: recvWindow,
recvWindow:
if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
sendWindow: YamuxDefaultWindowSize,
maxSendQueueSize: maxSendQueueSize,
maxRecvWindow: DefaultWindowSize,
recvWindow: DefaultWindowSize,
sendWindow: DefaultWindowSize,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
closedRemotely: newAsyncEvent(),
closedRemotely: newFuture[void]()
)
stream.objName = "YamuxStream"
if isSrc:
stream.dir = Direction.Out
stream.timeout = m.outTimeout
else:
stream.dir = Direction.In
stream.timeout = m.inTimeout
stream.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
result.objName = "YamuxStream"
result.dir = if isSrc: Direction.Out else: Direction.In
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
trace "Idle timeout expired, resetting YamuxChannel"
stream.reset(isLocal = true)
stream.initStream()
stream.peerId = m.connection.peerId
stream.observedAddr = m.connection.observedAddr
stream.localAddr = m.connection.localAddr
stream.transportDir = m.connection.transportDir
result.reset()
result.initStream()
result.peerId = m.connection.peerId
result.observedAddr = m.connection.observedAddr
result.transportDir = m.connection.transportDir
when defined(libp2p_agents_metrics):
stream.shortAgent = m.connection.shortAgent
m.channels[id] = stream
asyncSpawn m.cleanupChannel(stream)
trace "created channel", id, pid = m.connection.peerId
result.shortAgent = m.connection.shortAgent
m.channels[id] = result
asyncSpawn m.cleanupChann(result)
trace "created channel", id, pid=m.connection.peerId
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
return stream
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
method close*(m: Yamux) {.async: (raises: []).} =
method close*(m: Yamux) {.async.} =
if m.isClosed == true:
trace "Already closed"
return
m.isClosed = true
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
channel.clearQueues(newLPStreamEOFError())
channel.recvWindow = 0
channel.sendWindow = 0
channel.closedLocally = true
channel.isReset = true
channel.opened = false
channel.isClosed = true
await channel.remoteClosed()
channel.receivedData.fire()
try:
await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CancelledError as exc:
trace "cancelled sending goAway", description = exc.msg
except LPStreamError as exc:
trace "failed to send goAway", description = exc.msg
await channel.reset(true)
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
await m.connection.close()
m.isClosed = true
trace "Closed yamux"
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
## Call the muxer stream handler for this channel
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
## call the muxer stream handler for this channel
##
await m.streamHandler(channel)
trace "finished handling stream"
doAssert(channel.isClosed, "connection not closed by handler!")
try:
await m.streamHandler(channel)
trace "finished handling stream"
doAssert(channel.isClosed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async: (raises: []).} =
trace "Starting yamux handler", pid = m.connection.peerId
method handle*(m: Yamux) {.async, gcsafe.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
trace "waiting for header"
let header = await m.connection.readHeader()
trace "got message", h = $header
case header.msgType
case header.msgType:
of Ping:
if MsgFlags.Syn in header.flags:
await m.connection.write(YamuxHeader.ping(MsgFlags.Ack, header.length))
of GoAway:
var status: GoAwayStatus
if status.checkedEnumAssign(header.length):
trace "Received go away", status
else:
trace "Received unexpected error go away"
if status.checkedEnumAssign(header.length): trace "Received go away", status
else: trace "Received unexpected error go away"
break
of Data, WindowUpdate:
if MsgFlags.Syn in header.flags:
if header.streamId in m.channels:
debug "Trying to create an existing channel, skipping", id = header.streamId
debug "Trying to create an existing channel, skipping", id=header.streamId
else:
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping",
id = header.streamId,
currentId = m.currentId,
peerId = m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream =
m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
let newStream = m.createStream(header.streamId, false)
if m.channels.len >= m.maxChannCount:
await newStream.reset()
continue
await newStream.open()
asyncSpawn m.handleStream(newStream)
elif header.streamId notin m.channels:
# Flush the data
m.flushed.withValue(header.streamId, flushed):
if header.msgType == Data:
flushed[].dec(int(header.length))
if flushed[] < 0:
raise
newException(YamuxError, "Peer exhausted the recvWindow after reset")
if header.length > 0:
var buffer = newSeqUninit[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
# If we do not have a stream, likely we sent a RST and/or closed the stream
trace "unknown stream id", id = header.streamId
if header.streamId notin m.flushed:
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
elif header.msgType == Data:
# Flush the data
m.flushed[header.streamId].dec(int(header.length))
if m.flushed[header.streamId] < 0:
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
continue
let channel =
try:
m.channels[header.streamId]
except KeyError as e:
raise newException(
YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
e.msg,
e,
)
let channel = m.channels[header.streamId]
if header.msgType == WindowUpdate:
channel.sendWindow += int(header.length)
asyncSpawn channel.sendLoop()
await channel.trySend()
else:
if header.length.int > channel.recvWindow.int:
# check before allocating the buffer
raise newException(YamuxError, "Peer exhausted the recvWindow")
if header.length > 0:
var buffer = newSeqUninit[byte](header.length)
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
trace "Msg Rcv", description = shortLog(buffer)
trace "Msg Rcv", msg=string.fromBytes(buffer)
await channel.gotDataFromRemote(buffer)
if MsgFlags.Fin in header.flags:
@@ -649,58 +499,31 @@ method handle*(m: Yamux) {.async: (raises: []).} =
if MsgFlags.Rst in header.flags:
trace "remote reset channel"
await channel.reset()
except CancelledError as exc:
debug "Unexpected cancellation in yamux handler", description = exc.msg
except LPStreamEOFError as exc:
trace "Stream EOF", description = exc.msg
except LPStreamError as exc:
debug "Unexpected stream exception in yamux read loop", description = exc.msg
trace "Stream EOF", msg = exc.msg
except YamuxError as exc:
trace "Closing yamux connection", description = exc.msg
try:
await m.connection.write(YamuxHeader.goAway(ProtocolError))
except CancelledError, LPStreamError:
discard
except MuxerError as exc:
debug "Unexpected muxer exception in yamux read loop", description = exc.msg
try:
await m.connection.write(YamuxHeader.goAway(ProtocolError))
except CancelledError, LPStreamError:
discard
trace "Closing yamux connection", error=exc.msg
await m.connection.write(YamuxHeader.goAway(ProtocolError))
finally:
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
for c in m.channels.values:
result.add(c)
method newStream*(
m: Yamux, name: string = "", lazy: bool = false
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
let stream = m.createStream(m.currentId, true)
m.currentId += 2
if not lazy:
await stream.open()
return stream
proc new*(
T: type[Yamux],
conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
): T =
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
maxChannCount: maxChannCount,
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize,
inTimeout: inTimeout,
outTimeout: outTimeout,
maxChannCount: maxChannCount
)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,24 +7,25 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[streams, sets, sequtils],
chronos,
chronicles,
stew/byteutils,
dnsclientpkg/[protocol, types],
../utility,
../utils/sequninit
std/[streams, strutils, sets, sequtils],
chronos, chronicles, stew/byteutils,
dnsclientpkg/[protocol, types]
import nameresolver
import
nameresolver
logScope:
topics = "libp2p dnsresolver"
type DnsResolver* = ref object of NameResolver
nameServers*: seq[TransportAddress]
type
DnsResolver* = ref object of NameResolver
nameServers*: seq[TransportAddress]
proc questionToBuf(address: string, kind: QKind): seq[byte] =
try:
@@ -38,35 +39,28 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
let dataLen = requestStream.getPosition()
requestStream.setPosition(0)
var buf = newSeqUninit[byte](dataLen)
var buf = newSeq[byte](dataLen)
discard requestStream.readData(addr buf[0], dataLen)
buf
except IOError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninit[byte](0)
except OSError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninit[byte](0)
except ValueError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeqUninit[byte](0)
return buf
except CatchableError as exc:
info "Failed to created DNS buffer", msg = exc.msg
return newSeq[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress, address: string, kind: QKind
): Future[Response] {.
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
.} =
dnsServer: TransportAddress,
address: string,
kind: QKind): Future[Response] {.async.} =
var sendBuf = questionToBuf(address, kind)
if sendBuf.len == 0:
raise newException(ValueError, "Incorrect DNS query")
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
let receivedDataFuture = newFuture[void]()
proc datagramDataReceived(
transp: DatagramTransport, raddr: TransportAddress
): Future[void] {.async: (raises: []).} =
receivedDataFuture.complete()
proc datagramDataReceived(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async, closure.} =
receivedDataFuture.complete()
let sock =
if dnsServer.family == AddressFamily.IPv6:
@@ -77,41 +71,31 @@ proc getDnsResponse(
try:
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError as e:
raise newException(IOError, "DNS server timeout: " & e.msg, e)
await receivedDataFuture or sleepAsync(5.seconds) #unix default
if not receivedDataFuture.finished:
raise newException(IOError, "DNS server timeout")
let rawResponse = sock.getMessage()
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
except OSError as exc:
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
except ValueError as exc:
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert "Exception parsing DN response: " & exc.msg
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return parseResponse(string.fromBytes(rawResponse))
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
finally:
await sock.closeWait()
method resolveIp*(
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
self: DnsResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
var responseFutures: seq[
Future[Response].Raising(
[CancelledError, IOError, OSError, TransportError, ValueError]
)
]
var responseFutures: seq[Future[Response]]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
responseFutures.add(getDnsResponse(server, address, A))
@@ -126,32 +110,27 @@ method resolveIp*(
var
resolvedAddresses: OrderedSet[string]
resolveFailed = false
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
for fut in responseFutures:
try:
let resp = await fut
for answer in resp.answers:
resolvedAddresses.incl(answer.toString())
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(
try: answer.toString()
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
)
except CancelledError as e:
raise e
except ValueError as e:
info "Invalid DNS query", address, error = e.msg
info "Invalid DNS query", address, error=e.msg
return @[]
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
except CatchableError as e:
info "Failed to query DNS", address, error=e.msg
resolveFailed = true
break
if resolveFailed:
self.nameServers.add(self.nameServers[0])
@@ -165,34 +144,25 @@ method resolveIp*(
return @[]
method resolveTxt*(
self: DnsResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
self: DnsResolver,
address: string): Future[seq[string]] {.async.} =
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
try:
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
return response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except ValueError as e:
handleFail(e)
except CatchableError as e:
info "Failed to query DNS", address, error=e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
except Exception as e:
# Nim 1.6: toString can has a raises: [Exception, ..] because of
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
@@ -200,5 +170,7 @@ method resolveTxt*(
debug "Failed to resolve TXT, returning empty set"
return @[]
proc new*(T: typedesc[DnsResolver], nameServers: seq[TransportAddress]): T =
proc new*(
T: typedesc[DnsResolver],
nameServers: seq[TransportAddress]): T =
T(nameServers: nameServers)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,9 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/tables, chronos, chronicles
import
std/[streams, strutils, tables],
chronos, chronicles
import nameresolver
@@ -24,26 +29,21 @@ type MockResolver* = ref object of NameResolver
ipResponses*: Table[(string, bool), seq[string]]
method resolveIp*(
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
var res: seq[TransportAddress]
self: MockResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, false)):
res.add(initTAddress(resp, port))
result.add(initTAddress(resp, port))
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, true)):
res.add(initTAddress(resp, port))
res
result.add(initTAddress(resp, port))
method resolveTxt*(
self: MockResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
self.txtResponses.getOrDefault(address)
self: MockResolver,
address: string): Future[seq[string]] {.async.} =
return self.txtResponses.getOrDefault(address)
proc new*(T: typedesc[MockResolver]): T =
T()
proc new*(T: typedesc[MockResolver]): T = T()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -7,77 +7,79 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[sets, sequtils, strutils]
import chronos, chronicles, stew/endians2
import std/[sugar, sets, sequtils, strutils]
import
chronos,
chronicles,
stew/[endians2, byteutils]
import ".."/[multiaddress, multicodec]
logScope:
topics = "libp2p nameresolver"
type NameResolver* = ref object of RootObj
type
NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
self: NameResolver,
address: string): Future[seq[string]] {.async, base.} =
## Get TXT record
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
##
doAssert(false, "Not implemented!")
method resolveIp*(
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError]), base
.} =
self: NameResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, base.} =
## Resolve the specified address
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
##
doAssert(false, "Not implemented!")
proc getHostname*(ma: MultiAddress): string =
let
firstPart = ma[0].valueOr:
return ""
firstPart = ma[0].valueOr: return ""
fpSplitted = ($firstPart).split('/', 2)
if fpSplitted.len > 2:
fpSplitted[2]
else:
""
if fpSplitted.len > 2: fpSplitted[2]
else: ""
proc resolveOneAddress(
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
# Resolve a single address
let portPart = ma[1].valueOr:
raise maErr error
self: NameResolver,
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [Defect, MaError, TransportAddressError].} =
#Resolve a single address
var pbuf: array[2, byte]
let plen = portPart.protoArgument(pbuf).valueOr:
raise maErr error
if plen == 0:
raise maErr "Incorrect port number"
var dnsval = getHostname(ma)
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
raise newException(MaError, "Incorrect port number")
let
port = Port(fromBytesBE(uint16, pbuf))
dnsval = getHostname(ma)
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
resolvedAddresses.mapIt:
let address = MultiAddress.init(it).valueOr:
raise maErr error
var createdAddress = address[0].valueOr:
raise maErr error
for part in ma:
let part = part.valueOr:
raise maErr error
if DNS.match(part):
continue
createdAddress &= part
createdAddress
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.tryGet()): continue
createdAddress &= part.tryGet()
createdAddress
proc resolveDnsAddr*(
self: NameResolver, ma: MultiAddress, depth: int = 0
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
self: NameResolver,
ma: MultiAddress,
depth: int = 0): Future[seq[MultiAddress]] {.async.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
@@ -86,67 +88,52 @@ proc resolveDnsAddr*(
info "Stopping DNSADDR recursion, probably malicious", ma
return @[]
let
dnsval = getHostname(ma)
txt = await self.resolveTxt("_dnsaddr." & dnsval)
var dnsval = getHostname(ma)
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
trace "txt entries", txt
const codec = multiCodec("p2p")
let maCodec = block:
let hasCodec = ma.contains(codec).valueOr:
raise maErr error
if hasCodec:
ma[codec]
else:
(static(default(MaResult[MultiAddress])))
var res: seq[MultiAddress]
var result: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="):
continue
let
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
raise maErr error
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
raise maErr error
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
continue
if not entry.startsWith("dnsaddr="): continue
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
if entryValue.contains(multiCodec("p2p")).tryGet() and ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
res.add(r)
result.add(r)
if res.len == 0:
if result.len == 0:
debug "Failed to resolve a DNSADDR", ma
res
return @[]
return result
proc resolveMAddress*(
self: NameResolver, address: MultiAddress
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
self: NameResolver,
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):
res.incl(address)
else:
let
firstPart = address[0].valueOr:
raise maErr error
code = firstPart.protoCode().valueOr:
raise maErr error
ads =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
raise maErr("Unsupported codec " & $code)
for ad in ads:
let code = address[0].get().protoCode().get()
let seq = case code:
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
doAssert false
@[address]
for ad in seq:
res.incl(ad)
res.toSeq
return res.toSeq

Some files were not shown because too many files have changed in this diff Show More