mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 17:18:27 -05:00
Compare commits
71 Commits
lents
...
version-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41649f0999 | ||
|
|
67102873ba | ||
|
|
d40d324160 | ||
|
|
a677b06273 | ||
|
|
6050cdef7e | ||
|
|
fedfa8e817 | ||
|
|
6887b43777 | ||
|
|
225accd11b | ||
|
|
7d6bc545e0 | ||
|
|
a1eb53b181 | ||
|
|
db629dca25 | ||
|
|
a5666789b0 | ||
|
|
b7726bf68f | ||
|
|
0221affe98 | ||
|
|
edbd35b16c | ||
|
|
80cca0ecac | ||
|
|
0041ed4cf8 | ||
|
|
95e98e8c51 | ||
|
|
4aa615c44c | ||
|
|
6b61ce8c91 | ||
|
|
53b060f8f0 | ||
|
|
af5299f26c | ||
|
|
bac754e2ad | ||
|
|
8d5ea43e2b | ||
|
|
e573238705 | ||
|
|
c1a3bd8fee | ||
|
|
ddeb7b3bd4 | ||
|
|
382b992e00 | ||
|
|
408dcf12bd | ||
|
|
0012b639c8 | ||
|
|
f7f1e89669 | ||
|
|
f14ada3dcf | ||
|
|
444b837923 | ||
|
|
f89bd0c77c | ||
|
|
e68186373b | ||
|
|
266c7b117a | ||
|
|
0e28d3b828 | ||
|
|
4ace70d53b | ||
|
|
ca19f8fdbf | ||
|
|
351bda2b56 | ||
|
|
7d9c43a5ce | ||
|
|
c11772c94e | ||
|
|
489c115132 | ||
|
|
166c0d1c87 | ||
|
|
ba451196e8 | ||
|
|
9f658c151e | ||
|
|
e304ad0f7e | ||
|
|
5e3323d43f | ||
|
|
9532bff983 | ||
|
|
676786b00e | ||
|
|
d521c57b82 | ||
|
|
63e1872516 | ||
|
|
67ef25fae0 | ||
|
|
fe7a69e389 | ||
|
|
a17cad710c | ||
|
|
3863a4cd21 | ||
|
|
64cbbe1e0a | ||
|
|
31ad4ae205 | ||
|
|
b3d9360dfc | ||
|
|
1711c204ea | ||
|
|
c43aacdc81 | ||
|
|
711609057c | ||
|
|
192cac6254 | ||
|
|
cc3c637c22 | ||
|
|
afbb1b4d3c | ||
|
|
8c2eca18dc | ||
|
|
ce371f3bb4 | ||
|
|
23338fceaa | ||
|
|
6ab6ab48ef | ||
|
|
d9305bda84 | ||
|
|
f95eda8bf6 |
131
.github/actions/install_nim/action.yml
vendored
Normal file
131
.github/actions/install_nim/action.yml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
name: Install Nim
|
||||
inputs:
|
||||
os:
|
||||
description: "Operating system to build for"
|
||||
required: true
|
||||
cpu:
|
||||
description: "CPU to build for"
|
||||
default: "amd64"
|
||||
nim_branch:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
shell:
|
||||
description: "Shell to run commands in"
|
||||
default: "bash --noprofile --norc -e -o pipefail"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install build dependencies (Linux i386)
|
||||
shell: ${{ inputs.shell }}
|
||||
if: inputs.os == 'Linux' && inputs.cpu == 'i386'
|
||||
run: |
|
||||
sudo dpkg --add-architecture i386
|
||||
sudo apt-get update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
||||
libssl-dev:i386
|
||||
mkdir -p external/bin
|
||||
cat << EOF > external/bin/gcc
|
||||
#!/bin/bash
|
||||
exec $(which gcc) -m32 "\$@"
|
||||
EOF
|
||||
cat << EOF > external/bin/g++
|
||||
#!/bin/bash
|
||||
exec $(which g++) -m32 "\$@"
|
||||
EOF
|
||||
chmod 755 external/bin/gcc external/bin/g++
|
||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: MSYS2 (Windows i386)
|
||||
if: inputs.os == 'Windows' && inputs.cpu == 'i386'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
msystem: MINGW32
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-i686-toolchain
|
||||
|
||||
- name: MSYS2 (Windows amd64)
|
||||
if: inputs.os == 'Windows' && inputs.cpu == 'amd64'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-x86_64-toolchain
|
||||
|
||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: inputs.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
|
||||
- name: Install DLL dependencies (Windows)
|
||||
shell: ${{ inputs.shell }}
|
||||
if: >
|
||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||
inputs.os == 'Windows'
|
||||
run: |
|
||||
mkdir external
|
||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||
7z x external/windeps.zip -oexternal/dlls
|
||||
|
||||
- name: Path to cached dependencies (Windows)
|
||||
shell: ${{ inputs.shell }}
|
||||
if: >
|
||||
inputs.os == 'Windows'
|
||||
run: |
|
||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
||||
|
||||
- name: Derive environment variables
|
||||
shell: ${{ inputs.shell }}
|
||||
run: |
|
||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
ncpu=
|
||||
MAKE_CMD="make"
|
||||
case '${{ inputs.os }}' in
|
||||
'Linux')
|
||||
ncpu=$(nproc)
|
||||
;;
|
||||
'macOS')
|
||||
ncpu=$(sysctl -n hw.ncpu)
|
||||
;;
|
||||
'Windows')
|
||||
ncpu=$NUMBER_OF_PROCESSORS
|
||||
MAKE_CMD="mingw32-make"
|
||||
;;
|
||||
esac
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Restore Nim from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: '${{ github.workspace }}/nim'
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||
|
||||
- name: Build Nim and Nimble
|
||||
shell: ${{ inputs.shell }}
|
||||
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
# We don't want partial matches of the cache restored
|
||||
rm -rf nim
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
|
||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
5
.github/workflows/bumper.yml
vendored
5
.github/workflows/bumper.yml
vendored
@@ -10,11 +10,12 @@ jobs:
|
||||
bumpProjects:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [
|
||||
{ repo: status-im/nimbus-eth2, branch: unstable },
|
||||
{ repo: status-im/nwaku, branch: master },
|
||||
{ repo: status-im/nim-codex, branch: main }
|
||||
{ repo: waku-org/nwaku, branch: master },
|
||||
{ repo: codex-storage/nim-codex, branch: master }
|
||||
]
|
||||
steps:
|
||||
- name: Clone repo
|
||||
|
||||
122
.github/workflows/ci.yml
vendored
122
.github/workflows/ci.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 90
|
||||
@@ -45,111 +49,20 @@ jobs:
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Install build dependencies (Linux i386)
|
||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
||||
run: |
|
||||
sudo dpkg --add-architecture i386
|
||||
sudo apt-get update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
||||
libssl-dev:i386
|
||||
mkdir -p external/bin
|
||||
cat << EOF > external/bin/gcc
|
||||
#!/bin/bash
|
||||
exec $(which gcc) -m32 "\$@"
|
||||
EOF
|
||||
cat << EOF > external/bin/g++
|
||||
#!/bin/bash
|
||||
exec $(which g++) -m32 "\$@"
|
||||
EOF
|
||||
chmod 755 external/bin/gcc external/bin/g++
|
||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: MSYS2 (Windows i386)
|
||||
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
||||
uses: msys2/setup-msys2@v2
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
path-type: inherit
|
||||
msystem: MINGW32
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-i686-toolchain
|
||||
|
||||
- name: MSYS2 (Windows amd64)
|
||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-x86_64-toolchain
|
||||
|
||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: runner.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
|
||||
- name: Install DLL dependencies (Windows)
|
||||
if: >
|
||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
mkdir external
|
||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||
7z x external/windeps.zip -oexternal/dlls
|
||||
|
||||
- name: Path to cached dependencies (Windows)
|
||||
if: >
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
||||
|
||||
- name: Derive environment variables
|
||||
run: |
|
||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
ncpu=
|
||||
MAKE_CMD="make"
|
||||
case '${{ runner.os }}' in
|
||||
'Linux')
|
||||
ncpu=$(nproc)
|
||||
;;
|
||||
'macOS')
|
||||
ncpu=$(sysctl -n hw.ncpu)
|
||||
;;
|
||||
'Windows')
|
||||
ncpu=$NUMBER_OF_PROCESSORS
|
||||
MAKE_CMD="mingw32-make"
|
||||
;;
|
||||
esac
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build Nim and Nimble
|
||||
run: |
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||
os: ${{ matrix.target.os }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
@@ -160,9 +73,20 @@ jobs:
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install_pinned
|
||||
nimble test
|
||||
|
||||
151
.github/workflows/codecov.yml
vendored
151
.github/workflows/codecov.yml
vendored
@@ -5,136 +5,61 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
GossipSub:
|
||||
Coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
nim-options: [
|
||||
"",
|
||||
"-d:libp2p_pubsub_anonymize=true -d:libp2p_pubsub_sign=false -d:libp2p_pubsub_verify=false",
|
||||
"-d:libp2p_pubsub_sign=true -d:libp2p_pubsub_verify=true"
|
||||
]
|
||||
test-program: [
|
||||
"tests/pubsub/testpubsub",
|
||||
"tests/pubsub/testfloodsub",
|
||||
"tests/pubsub/testgossipinternal"
|
||||
]
|
||||
env:
|
||||
CICOV: YES
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: linux
|
||||
cpu: amd64
|
||||
shell: bash
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Run
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
export PATH="$PATH:$PWD/Nim/bin"
|
||||
nimble install_pinned
|
||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
|
||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
||||
cd nimcache; rm *.c; cd ..
|
||||
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
nimble testnative
|
||||
nimble testpubsub
|
||||
nimble testfilter
|
||||
find nimcache -name *.c -delete
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/output
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
- uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
|
||||
Tests:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
nim-options: [
|
||||
""
|
||||
]
|
||||
test-program: [
|
||||
"tests/testnative",
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
export PATH="$PATH:$PWD/Nim/bin"
|
||||
nimble install_pinned
|
||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }} --clearNimblePath --NimblePath:nimbledeps/pkgs"
|
||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
||||
cd nimcache; rm *.c; cd ..
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
- uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
|
||||
Filter:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
nim-options: [
|
||||
"",
|
||||
"-d:libp2p_pki_schemes=secp256k1",
|
||||
"-d:libp2p_pki_schemes=secp256k1;ed25519",
|
||||
"-d:libp2p_pki_schemes=secp256k1;ed25519;ecnist",
|
||||
]
|
||||
test-program: [
|
||||
"tests/testpkifilter",
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
export PATH="$PATH:$PWD/Nim/bin"
|
||||
nimble install_pinned
|
||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
|
||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
||||
cd nimcache; rm *.c; cd ..
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
- uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
|
||||
|
||||
|
||||
#- uses: actions/upload-artifact@master
|
||||
# with:
|
||||
# name: coverage
|
||||
# path: coverage
|
||||
|
||||
54
.github/workflows/interop.yml
vendored
Normal file
54
.github/workflows/interop.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Interoperability Testing
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
name: Run multidimensional interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: libp2p/test-plans
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build image
|
||||
run: >
|
||||
cd multidim-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
run: >
|
||||
(cat << EOF
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
123
.github/workflows/multi_nim.yml
vendored
123
.github/workflows/multi_nim.yml
vendored
@@ -5,7 +5,13 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -42,112 +48,18 @@ jobs:
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
ref: unstable
|
||||
submodules: true
|
||||
|
||||
- name: Install build dependencies (Linux i386)
|
||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
||||
run: |
|
||||
sudo dpkg --add-architecture i386
|
||||
sudo apt-get update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
||||
libssl-dev:i386
|
||||
mkdir -p external/bin
|
||||
cat << EOF > external/bin/gcc
|
||||
#!/bin/bash
|
||||
exec $(which gcc) -m32 "\$@"
|
||||
EOF
|
||||
cat << EOF > external/bin/g++
|
||||
#!/bin/bash
|
||||
exec $(which g++) -m32 "\$@"
|
||||
EOF
|
||||
chmod 755 external/bin/gcc external/bin/g++
|
||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: MSYS2 (Windows i386)
|
||||
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
msystem: MINGW32
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-i686-toolchain
|
||||
|
||||
- name: MSYS2 (Windows amd64)
|
||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-x86_64-toolchain
|
||||
|
||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: runner.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
|
||||
- name: Install DLL dependencies (Windows)
|
||||
if: >
|
||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
mkdir external
|
||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||
7z x external/windeps.zip -oexternal/dlls
|
||||
|
||||
- name: Path to cached dependencies (Windows)
|
||||
if: >
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
||||
|
||||
- name: Derive environment variables
|
||||
run: |
|
||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
ncpu=
|
||||
MAKE_CMD="make"
|
||||
case '${{ runner.os }}' in
|
||||
'Linux')
|
||||
ncpu=$(nproc)
|
||||
;;
|
||||
'macOS')
|
||||
ncpu=$(sysctl -n hw.ncpu)
|
||||
;;
|
||||
'Windows')
|
||||
ncpu=$NUMBER_OF_PROCESSORS
|
||||
MAKE_CMD="mingw32-make"
|
||||
;;
|
||||
esac
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build Nim and Nimble
|
||||
run: |
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
@@ -163,9 +75,8 @@ jobs:
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
nimble test
|
||||
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
export NIMFLAGS="${NIMFLAGS} --gc:orc"
|
||||
nimble test
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
|
||||
28
.pinned
28
.pinned
@@ -1,16 +1,16 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#f4c4233de453cb7eac0ce3f3ffad6496295f83ab
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#266e2c0ed26b455872bccb3ddbd316815a283659
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#1e6350870855541b381d77d4659688bc0d2c4227
|
||||
chronos;https://github.com/status-im/nim-chronos@#ab5a8c2e0f6941fe3debd61dff0293790079d1b0
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#6112432b3a81d9db116cd5d64c39648881cfff29
|
||||
httputils;https://github.com/status-im/nim-http-utils@#e88e231dfcef4585fe3b2fbd9b664dbd28a88040
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4
|
||||
metrics;https://github.com/status-im/nim-metrics@#0a6477268e850d7bc98347b3875301524871765f
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#c7f1a37d9b0f17292649bfed8bf6cef83cf4221f
|
||||
serialization;https://github.com/status-im/nim-serialization@#493d18b8292fc03aa4f835fd825dea1183f97466
|
||||
stew;https://github.com/status-im/nim-stew@#23da07c9b59c0ba3d4efa7e4e6e2c4121ae5a156
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
|
||||
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
|
||||
metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
|
||||
serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
|
||||
stew;https://github.com/status-im/nim-stew@#003fe9f0c83c2b0b2ccbd37087e6d1ccd30a3234
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#f180f596c88dfd266f746ed6f8dbebce39c824db
|
||||
websock;https://github.com/status-im/nim-websock@#acbe30e9ca1e51dcbbfe4c552ee6f16c7eede538
|
||||
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#883c7a50ad3b82158e64d074c5578fe33ab3c452
|
||||
websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
|
||||
zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a
|
||||
|
||||
57
README.md
57
README.md
@@ -2,7 +2,7 @@
|
||||
<a href="https://libp2p.io"><img width="250" src="./.assets/full-logo.svg?raw=true" alt="nim-libp2p logo" /></a>
|
||||
</h1>
|
||||
|
||||
<h3 align="center">The Nim implementation of the libp2p Networking Stack.</h3>
|
||||
<h3 align="center">The <a href="https://nim-lang.org/">Nim</a> implementation of the <a href="https://libp2p.io/">libp2p</a> Networking Stack.</h3>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
@@ -16,30 +16,26 @@
|
||||
<img src="https://img.shields.io/badge/nim-%3E%3D1.2.0-orange.svg?style=flat-square" />
|
||||
</p>
|
||||
|
||||
## Introduction
|
||||
|
||||
An implementation of [libp2p](https://libp2p.io/) in [Nim](https://nim-lang.org/).
|
||||
|
||||
# Table of Contents
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [Development](#development)
|
||||
- [Contribute](#contribute)
|
||||
- [Core Developers](#core-developers)
|
||||
- [Contributors](#contributors)
|
||||
- [Core Maintainers](#core-maintainers)
|
||||
- [License](#license)
|
||||
|
||||
## Background
|
||||
libp2p is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
|
||||
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||
|
||||
libp2p is the product of a long and arduous quest of understanding; a deep dive into the internet's network stack and the peer-to-peer protocols from the past. Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It is a "network stack", a suite of networking protocols that cleanly separates concerns and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability.
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||
|
||||
libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
|
||||
|
||||
- Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow our evolving documentation efforts at [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
- [Here](https://github.com/libp2p/libp2p#description) is an overview of libp2p and its implementations in other programming languages.
|
||||
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
|
||||
## Install
|
||||
**Prerequisite**
|
||||
@@ -49,7 +45,7 @@ nimble install libp2p
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
You'll find the documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
|
||||
**Go Daemon:**
|
||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
@@ -63,25 +59,28 @@ List of packages modules implemented in nim-libp2p:
|
||||
| **Libp2p** | |
|
||||
| [libp2p](libp2p/switch.nim) | The core of the project |
|
||||
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/protocols/#identify) protocol |
|
||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol |
|
||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
|
||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
|
||||
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
||||
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
||||
| **Transports** | |
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | [Secio](https://docs.libp2p.io/concepts/protocols/#secio) secure channel |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://github.com/libp2p/specs/tree/master/noise) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | [Plain Text](https://github.com/libp2p/specs/tree/master/plaintext) for development purposes |
|
||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
| **Stream Multiplexers** | |
|
||||
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/peer-id/) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Phone book" of known peers](https://docs.libp2p.io/concepts/peer-id/#peerinfo) |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
|
||||
@@ -113,7 +112,10 @@ Clone and Install dependencies:
|
||||
```sh
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
# to use dependencies computed by nimble
|
||||
nimble install -dy
|
||||
# OR to install the dependencies versions used in CI
|
||||
nimble install_pinned
|
||||
```
|
||||
|
||||
Run unit tests:
|
||||
@@ -133,8 +135,19 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
||||
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Core Developers
|
||||
[@cheatfate](https://github.com/cheatfate), [Dmitriy Ryajov](https://github.com/dryajov), [Tanguy](https://github.com/Menduist), [Zahary Karadjov](https://github.com/zah)
|
||||
### Contributors
|
||||
<a href="https://github.com/status-im/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=status-im/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
codecov:
|
||||
notify:
|
||||
require_ci_to_pass: true
|
||||
# must be the number of coverage report builds
|
||||
# notice that this number is for PRs;
|
||||
# like this we disabled notify on pure branches report
|
||||
# which is fine I guess
|
||||
after_n_builds: 28
|
||||
comment:
|
||||
layout: "reach, diff, flags, files"
|
||||
after_n_builds: 28 # must be the number of coverage report builds
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
@@ -16,4 +10,4 @@ coverage:
|
||||
# basic settings
|
||||
target: auto
|
||||
threshold: 5%
|
||||
base: auto
|
||||
base: auto
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# to allow locking
|
||||
if dirExists("nimbledeps/pkgs"):
|
||||
switch("NimblePath", "nimbledeps/pkgs")
|
||||
if dirExists("nimbledeps/pkgs2"):
|
||||
switch("NimblePath", "nimbledeps/pkgs2")
|
||||
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
@@ -10,6 +12,7 @@ switch("warning", "LockLevel:off")
|
||||
if (NimMajor, NimMinor) < (1, 6):
|
||||
--styleCheck:hint
|
||||
else:
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
|
||||
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# nim-libp2p documentation
|
||||
# nim-libp2p examples
|
||||
|
||||
Welcome to the nim-libp2p documentation!
|
||||
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
|
||||
|
||||
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
|
||||
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
||||
We recommand to follow the tutorials on the website, but feel free to grok the sources here!
|
||||
|
||||
@@ -57,8 +57,7 @@ proc main() {.async.} =
|
||||
let
|
||||
# Create a relay address to swDst using swRel as the relay
|
||||
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
|
||||
$swRel.peerInfo.peerId & "/p2p-circuit/p2p/" &
|
||||
$swDst.peerInfo.peerId).get()
|
||||
$swRel.peerInfo.peerId & "/p2p-circuit").get()
|
||||
|
||||
# Connect Dst to the relay
|
||||
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
||||
|
||||
6
examples/index.md
Normal file
6
examples/index.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# nim-libp2p documentation
|
||||
|
||||
Welcome to the nim-libp2p documentation!
|
||||
|
||||
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
|
||||
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
||||
@@ -32,7 +32,7 @@ proc new(T: typedesc[TestProto]): T =
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
return T(codecs: @[TestCodec], handler: handle)
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
## This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
|
||||
## In our handle, we simply read a message from the connection and `echo` it.
|
||||
|
||||
@@ -107,7 +107,7 @@ type
|
||||
metricGetter: MetricCallback
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
let res = MetricProto(metricGetter: cb)
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
@@ -115,8 +115,8 @@ proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
await conn.writeLp(asProtobuf.buffer)
|
||||
await conn.close()
|
||||
|
||||
res.codecs = @["/metric-getter/1.0.0"]
|
||||
res.handler = handle
|
||||
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
|
||||
res.metricGetter = cb
|
||||
return res
|
||||
|
||||
proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
|
||||
@@ -36,7 +36,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
return T(codecs: @[DumbCodec], handler: handle)
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
|
||||
## ## Bootnodes
|
||||
## The first time a p2p program is ran, he needs to know how to join
|
||||
|
||||
@@ -157,7 +157,7 @@ proc new(T: typedesc[GameProto], g: Game): T =
|
||||
# The handler of a protocol must wait for the stream to
|
||||
# be finished before returning
|
||||
await conn.join()
|
||||
return T(codecs: @["/tron/1.0.0"], handler: handle)
|
||||
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
|
||||
|
||||
proc networking(g: Game) {.async.} =
|
||||
# Create our switch, similar to the GossipSub example and
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -48,12 +48,9 @@ else:
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
transports/wstransport,
|
||||
protocols/secure/noise,
|
||||
protocols/ping,
|
||||
cid,
|
||||
multihash,
|
||||
multibase,
|
||||
multicodec,
|
||||
errors,
|
||||
switch,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.0.0"
|
||||
version = "1.1.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -16,18 +16,20 @@ requires "nim >= 1.2.0",
|
||||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock"
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & < 0.1.0"
|
||||
|
||||
import hashes
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
|
||||
excstr.add(" -d:chronicles_sinks=textlines[stdout],json[dynamic] -d:chronicles_log_level=TRACE ")
|
||||
excstr.add(" -d:chronicles_runtime_filtering=TRUE ")
|
||||
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
|
||||
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
||||
excstr.add(" --verbosity:0 --hints:off ")
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
exec excstr & " -r " & " tests/" & filename
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
@@ -108,6 +110,7 @@ task examples_build, "Build the samples":
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
# Nico doesn't work in 1.2
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
|
||||
@@ -134,10 +137,24 @@ task install_pinned, "Reads the lockfile":
|
||||
|
||||
# Remove the automatically installed deps
|
||||
# (inefficient you say?)
|
||||
let allowedDirectories = toInstall.mapIt(it[0] & "-" & it[1].split('@')[1])
|
||||
for dependency in listDirs("nimbledeps/pkgs"):
|
||||
if dependency.extractFilename notin allowedDirectories:
|
||||
rmDir(dependency)
|
||||
let nimblePkgs =
|
||||
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
|
||||
else: "nimbledeps/pkgs2"
|
||||
for dependency in listDirs(nimblePkgs):
|
||||
let
|
||||
fileName = dependency.extractFilename
|
||||
fileContent = readFile(dependency & "/nimblemeta.json")
|
||||
packageName = fileName.split('-')[0]
|
||||
|
||||
if toInstall.anyIt(
|
||||
it[0] == packageName and
|
||||
(
|
||||
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
|
||||
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
|
||||
)
|
||||
) == false or
|
||||
fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
|
||||
rmDir(dependency)
|
||||
|
||||
task unpin, "Restore global package use":
|
||||
rmDir("nimbledeps")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -27,7 +27,7 @@ import
|
||||
crypto/crypto, transports/[transport, tcptransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat, relay/relay, relay/client, relay/rtransport],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
@@ -61,6 +61,7 @@ type
|
||||
autonat: bool
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
@@ -199,6 +200,10 @@ proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): Swit
|
||||
b.rdv = rdv
|
||||
b
|
||||
|
||||
proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
|
||||
@@ -225,7 +230,7 @@ proc build*(b: SwitchBuilder): Switch
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(identify, b.muxers, secureManagerInstances, connManager, ms)
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
|
||||
|
||||
let
|
||||
transports = block:
|
||||
@@ -242,19 +247,21 @@ proc build*(b: SwitchBuilder): Switch
|
||||
|
||||
let peerStore =
|
||||
if isSome(b.peerStoreCapacity):
|
||||
PeerStore.new(b.peerStoreCapacity.get())
|
||||
PeerStore.new(identify, b.peerStoreCapacity.get())
|
||||
else:
|
||||
PeerStore.new()
|
||||
PeerStore.new(identify)
|
||||
|
||||
let switch = newSwitch(
|
||||
peerInfo = peerInfo,
|
||||
transports = transports,
|
||||
identity = identify,
|
||||
secureManagers = secureManagerInstances,
|
||||
connManager = connManager,
|
||||
ms = ms,
|
||||
nameResolver = b.nameResolver,
|
||||
peerStore = peerStore)
|
||||
peerStore = peerStore,
|
||||
services = b.services)
|
||||
|
||||
switch.mount(identify)
|
||||
|
||||
if b.autonat:
|
||||
let autonat = Autonat.new(switch)
|
||||
@@ -291,9 +298,10 @@ proc newStandardSwitch*(
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
quit("Secio is deprecated!") # use of secio is unsafe
|
||||
{.pop.}
|
||||
|
||||
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
|
||||
var b = SwitchBuilder
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -32,6 +32,7 @@ const
|
||||
|
||||
type
|
||||
TooManyConnectionsError* = object of LPError
|
||||
AlreadyExpectingConnectionError* = object of LPError
|
||||
|
||||
ConnEventKind* {.pure.} = enum
|
||||
Connected, # A connection was made and securely upgraded - there may be
|
||||
@@ -54,7 +55,6 @@ type
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left,
|
||||
Identified,
|
||||
Joined
|
||||
|
||||
PeerEvent* = object
|
||||
@@ -67,18 +67,14 @@ type
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
|
||||
|
||||
MuxerHolder = object
|
||||
muxer: Muxer
|
||||
handle: Future[void]
|
||||
|
||||
ConnManager* = ref object of RootObj
|
||||
maxConnsPerPeer: int
|
||||
inSema*: AsyncSemaphore
|
||||
outSema*: AsyncSemaphore
|
||||
conns: Table[PeerId, HashSet[Connection]]
|
||||
muxed: Table[Connection, MuxerHolder]
|
||||
muxed: Table[PeerId, seq[Muxer]]
|
||||
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
|
||||
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
|
||||
expectedConnectionsOverLimit*: Table[(PeerId, Direction), Future[Muxer]]
|
||||
peerStore*: PeerStore
|
||||
|
||||
ConnectionSlot* = object
|
||||
@@ -108,35 +104,30 @@ proc new*(C: type ConnManager,
|
||||
outSema: outSema)
|
||||
|
||||
proc connCount*(c: ConnManager, peerId: PeerId): int =
|
||||
c.conns.getOrDefault(peerId).len
|
||||
c.muxed.getOrDefault(peerId).len
|
||||
|
||||
proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
|
||||
var peers = newSeq[PeerId]()
|
||||
for peerId, mux in c.muxed:
|
||||
if mux.anyIt(it.connection.dir == dir):
|
||||
peers.add(peerId)
|
||||
return peers
|
||||
|
||||
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
|
||||
return c.muxed
|
||||
|
||||
proc addConnEventHandler*(c: ConnManager,
|
||||
handler: ConnEventHandler,
|
||||
kind: ConnEventKind) =
|
||||
## Add peer event handler - handlers must not raise exceptions!
|
||||
##
|
||||
|
||||
try:
|
||||
if isNil(handler): return
|
||||
c.connEvents[kind].incl(handler)
|
||||
except Exception as exc:
|
||||
# TODO: there is an Exception being raised
|
||||
# somewhere in the depths of the std.
|
||||
# Might be related to https://github.com/nim-lang/Nim/issues/17382
|
||||
|
||||
raiseAssert exc.msg
|
||||
if isNil(handler): return
|
||||
c.connEvents[kind].incl(handler)
|
||||
|
||||
proc removeConnEventHandler*(c: ConnManager,
|
||||
handler: ConnEventHandler,
|
||||
kind: ConnEventKind) =
|
||||
try:
|
||||
c.connEvents[kind].excl(handler)
|
||||
except Exception as exc:
|
||||
# TODO: there is an Exception being raised
|
||||
# somewhere in the depths of the std.
|
||||
# Might be related to https://github.com/nim-lang/Nim/issues/17382
|
||||
|
||||
raiseAssert exc.msg
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
@@ -163,26 +154,12 @@ proc addPeerEventHandler*(c: ConnManager,
|
||||
##
|
||||
|
||||
if isNil(handler): return
|
||||
try:
|
||||
c.peerEvents[kind].incl(handler)
|
||||
except Exception as exc:
|
||||
# TODO: there is an Exception being raised
|
||||
# somewhere in the depths of the std.
|
||||
# Might be related to https://github.com/nim-lang/Nim/issues/17382
|
||||
|
||||
raiseAssert exc.msg
|
||||
c.peerEvents[kind].incl(handler)
|
||||
|
||||
proc removePeerEventHandler*(c: ConnManager,
|
||||
handler: PeerEventHandler,
|
||||
kind: PeerEventKind) =
|
||||
try:
|
||||
c.peerEvents[kind].excl(handler)
|
||||
except Exception as exc:
|
||||
# TODO: there is an Exception being raised
|
||||
# somewhere in the depths of the std.
|
||||
# Might be related to https://github.com/nim-lang/Nim/issues/17382
|
||||
|
||||
raiseAssert exc.msg
|
||||
c.peerEvents[kind].excl(handler)
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
@@ -193,14 +170,6 @@ proc triggerPeerEvents*(c: ConnManager,
|
||||
return
|
||||
|
||||
try:
|
||||
let count = c.connCount(peerId)
|
||||
if event.kind == PeerEventKind.Joined and count != 1:
|
||||
trace "peer already joined", peer = peerId, event = $event
|
||||
return
|
||||
elif event.kind == PeerEventKind.Left and count != 0:
|
||||
trace "peer still connected or already left", peer = peerId, event = $event
|
||||
return
|
||||
|
||||
trace "triggering peer events", peer = peerId, event = $event
|
||||
|
||||
var peerEvents: seq[Future[void]]
|
||||
@@ -213,18 +182,22 @@ proc triggerPeerEvents*(c: ConnManager,
|
||||
except CatchableError as exc: # handlers should not raise!
|
||||
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
|
||||
|
||||
proc contains*(c: ConnManager, conn: Connection): bool =
|
||||
## checks if a connection is being tracked by the
|
||||
## connection manager
|
||||
##
|
||||
proc expectConnection*(c: ConnManager, p: PeerId, dir: Direction): Future[Muxer] {.async.} =
|
||||
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
|
||||
let key = (p, dir)
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(AlreadyExpectingConnectionError, "Already expecting an incoming connection from that peer")
|
||||
|
||||
if isNil(conn):
|
||||
return
|
||||
let future = newFuture[Muxer]()
|
||||
c.expectedConnectionsOverLimit[key] = future
|
||||
|
||||
return conn in c.conns.getOrDefault(conn.peerId)
|
||||
try:
|
||||
return await future
|
||||
finally:
|
||||
c.expectedConnectionsOverLimit.del(key)
|
||||
|
||||
proc contains*(c: ConnManager, peerId: PeerId): bool =
|
||||
peerId in c.conns
|
||||
peerId in c.muxed
|
||||
|
||||
proc contains*(c: ConnManager, muxer: Muxer): bool =
|
||||
## checks if a muxer is being tracked by the connection
|
||||
@@ -232,184 +205,140 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
|
||||
##
|
||||
|
||||
if isNil(muxer):
|
||||
return
|
||||
return false
|
||||
|
||||
let conn = muxer.connection
|
||||
if conn notin c:
|
||||
return
|
||||
return muxer in c.muxed.getOrDefault(conn.peerId)
|
||||
|
||||
if conn notin c.muxed:
|
||||
return
|
||||
proc closeMuxer(muxer: Muxer) {.async.} =
|
||||
trace "Cleaning up muxer", m = muxer
|
||||
|
||||
return muxer == c.muxed.getOrDefault(conn).muxer
|
||||
|
||||
proc closeMuxerHolder(muxerHolder: MuxerHolder) {.async.} =
|
||||
trace "Cleaning up muxer", m = muxerHolder.muxer
|
||||
|
||||
await muxerHolder.muxer.close()
|
||||
if not(isNil(muxerHolder.handle)):
|
||||
await muxer.close()
|
||||
if not(isNil(muxer.handler)):
|
||||
try:
|
||||
await muxerHolder.handle # TODO noraises?
|
||||
await muxer.handler # TODO noraises?
|
||||
except CatchableError as exc:
|
||||
trace "Exception in close muxer handler", exc = exc.msg
|
||||
trace "Cleaned up muxer", m = muxerHolder.muxer
|
||||
|
||||
proc delConn(c: ConnManager, conn: Connection) =
|
||||
let peerId = conn.peerId
|
||||
c.conns.withValue(peerId, peerConns):
|
||||
peerConns[].excl(conn)
|
||||
|
||||
if peerConns[].len == 0:
|
||||
c.conns.del(peerId) # invalidates `peerConns`
|
||||
|
||||
libp2p_peers.set(c.conns.len.int64)
|
||||
trace "Removed connection", conn
|
||||
|
||||
proc cleanupConn(c: ConnManager, conn: Connection) {.async.} =
|
||||
## clean connection's resources such as muxers and streams
|
||||
|
||||
if isNil(conn):
|
||||
trace "Wont cleanup a nil connection"
|
||||
return
|
||||
|
||||
# Remove connection from all tables without async breaks
|
||||
var muxer = some(MuxerHolder())
|
||||
if not c.muxed.pop(conn, muxer.get()):
|
||||
muxer = none(MuxerHolder)
|
||||
|
||||
delConn(c, conn)
|
||||
trace "Cleaned up muxer", m = muxer
|
||||
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
try:
|
||||
if muxer.isSome:
|
||||
await closeMuxerHolder(muxer.get())
|
||||
finally:
|
||||
await conn.close()
|
||||
trace "Triggering disconnect events", mux
|
||||
let peerId = mux.connection.peerId
|
||||
|
||||
trace "Connection cleaned up", conn
|
||||
let muxers = c.muxed.getOrDefault(peerId).filterIt(it != mux)
|
||||
if muxers.len > 0:
|
||||
c.muxed[peerId] = muxers
|
||||
else:
|
||||
c.muxed.del(peerId)
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
|
||||
|
||||
proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} =
|
||||
try:
|
||||
trace "Triggering connect events", conn
|
||||
conn.upgrade()
|
||||
if not(c.peerStore.isNil):
|
||||
c.peerStore.cleanup(peerId)
|
||||
|
||||
let peerId = conn.peerId
|
||||
await c.triggerPeerEvents(
|
||||
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out))
|
||||
|
||||
await c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In))
|
||||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
warn "Unexpected exception in switch peer connection cleanup",
|
||||
conn, msg = exc.msg
|
||||
|
||||
proc peerCleanup(c: ConnManager, conn: Connection) {.async.} =
|
||||
try:
|
||||
trace "Triggering disconnect events", conn
|
||||
let peerId = conn.peerId
|
||||
await c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
|
||||
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
|
||||
|
||||
if not(c.peerStore.isNil):
|
||||
c.peerStore.cleanup(peerId)
|
||||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
warn "Unexpected exception peer cleanup handler",
|
||||
conn, msg = exc.msg
|
||||
mux, msg = exc.msg
|
||||
|
||||
proc onClose(c: ConnManager, conn: Connection) {.async.} =
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
||||
## connection close even handler
|
||||
##
|
||||
## triggers the connections resource cleanup
|
||||
##
|
||||
try:
|
||||
await conn.join()
|
||||
trace "Connection closed, cleaning up", conn
|
||||
await c.cleanupConn(conn)
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
debug "Unexpected cancellation in connection manager's cleanup", conn
|
||||
await mux.connection.join()
|
||||
trace "Connection closed, cleaning up", mux
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in connection manager's cleanup",
|
||||
errMsg = exc.msg, conn
|
||||
errMsg = exc.msg, mux
|
||||
finally:
|
||||
trace "Triggering peerCleanup", conn
|
||||
asyncSpawn c.peerCleanup(conn)
|
||||
await c.muxCleanup(mux)
|
||||
|
||||
proc selectConn*(c: ConnManager,
|
||||
proc selectMuxer*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Connection =
|
||||
dir: Direction): Muxer =
|
||||
## Select a connection for the provided peer and direction
|
||||
##
|
||||
let conns = toSeq(
|
||||
c.conns.getOrDefault(peerId))
|
||||
.filterIt( it.dir == dir )
|
||||
c.muxed.getOrDefault(peerId))
|
||||
.filterIt( it.connection.dir == dir )
|
||||
|
||||
if conns.len > 0:
|
||||
return conns[0]
|
||||
|
||||
proc selectConn*(c: ConnManager, peerId: PeerId): Connection =
|
||||
proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
|
||||
## Select a connection for the provided giving priority
|
||||
## to outgoing connections
|
||||
##
|
||||
|
||||
var conn = c.selectConn(peerId, Direction.Out)
|
||||
if isNil(conn):
|
||||
conn = c.selectConn(peerId, Direction.In)
|
||||
if isNil(conn):
|
||||
var mux = c.selectMuxer(peerId, Direction.Out)
|
||||
if isNil(mux):
|
||||
mux = c.selectMuxer(peerId, Direction.In)
|
||||
if isNil(mux):
|
||||
trace "connection not found", peerId
|
||||
return mux
|
||||
|
||||
return conn
|
||||
|
||||
proc selectMuxer*(c: ConnManager, conn: Connection): Muxer =
|
||||
## select the muxer for the provided connection
|
||||
proc storeMuxer*(c: ConnManager,
|
||||
muxer: Muxer)
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
if isNil(conn):
|
||||
return
|
||||
if isNil(muxer):
|
||||
raise newException(LPError, "muxer cannot be nil")
|
||||
|
||||
if conn in c.muxed:
|
||||
return c.muxed.getOrDefault(conn).muxer
|
||||
else:
|
||||
debug "no muxer for connection", conn
|
||||
if isNil(muxer.connection):
|
||||
raise newException(LPError, "muxer's connection cannot be nil")
|
||||
|
||||
proc storeConn*(c: ConnManager, conn: Connection)
|
||||
{.raises: [Defect, LPError].} =
|
||||
## store a connection
|
||||
##
|
||||
|
||||
if isNil(conn):
|
||||
raise newException(LPError, "Connection cannot be nil")
|
||||
|
||||
if conn.closed or conn.atEof:
|
||||
if muxer.connection.closed or muxer.connection.atEof:
|
||||
raise newException(LPError, "Connection closed or EOF")
|
||||
|
||||
let peerId = conn.peerId
|
||||
if c.conns.getOrDefault(peerId).len > c.maxConnsPerPeer:
|
||||
debug "Too many connections for peer",
|
||||
conn, conns = c.conns.getOrDefault(peerId).len
|
||||
let
|
||||
peerId = muxer.connection.peerId
|
||||
dir = muxer.connection.dir
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
# we use getOrDefault in the if below instead of [] to avoid the KeyError
|
||||
if c.muxed.getOrDefault(peerId).len > c.maxConnsPerPeer:
|
||||
let key = (peerId, dir)
|
||||
let expectedConn = c.expectedConnectionsOverLimit.getOrDefault(key)
|
||||
if expectedConn != nil and not expectedConn.finished:
|
||||
expectedConn.complete(muxer)
|
||||
else:
|
||||
debug "Too many connections for peer",
|
||||
conns = c.muxed.getOrDefault(peerId).len
|
||||
|
||||
c.conns.mgetOrPut(peerId, HashSet[Connection]()).incl(conn)
|
||||
libp2p_peers.set(c.conns.len.int64)
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
# Launch on close listener
|
||||
# All the errors are handled inside `onClose()` procedure.
|
||||
asyncSpawn c.onClose(conn)
|
||||
assert muxer notin c.muxed.getOrDefault(peerId)
|
||||
|
||||
trace "Stored connection",
|
||||
conn, direction = $conn.dir, connections = c.conns.len
|
||||
let
|
||||
newPeer = peerId notin c.muxed
|
||||
assert newPeer or c.muxed[peerId].len > 0
|
||||
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
|
||||
asyncSpawn c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In))
|
||||
|
||||
if newPeer:
|
||||
asyncSpawn c.triggerPeerEvents(
|
||||
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out))
|
||||
|
||||
asyncSpawn c.onClose(muxer)
|
||||
|
||||
trace "Stored muxer",
|
||||
muxer, direction = $muxer.connection.dir, peers = c.muxed.len
|
||||
|
||||
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
|
||||
await c.inSema.acquire()
|
||||
return ConnectionSlot(connManager: c, direction: In)
|
||||
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): Future[ConnectionSlot] {.async.} =
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
|
||||
if forceDial:
|
||||
c.outSema.forceAcquire()
|
||||
elif not c.outSema.tryAcquire():
|
||||
@@ -418,6 +347,13 @@ proc getOutgoingSlot*(c: ConnManager, forceDial = false): Future[ConnectionSlot]
|
||||
raise newTooManyConnectionsError()
|
||||
return ConnectionSlot(connManager: c, direction: Out)
|
||||
|
||||
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
|
||||
case dir:
|
||||
of Direction.In:
|
||||
return c.inSema.count
|
||||
of Direction.Out:
|
||||
return c.outSema.count
|
||||
|
||||
proc release*(cs: ConnectionSlot) =
|
||||
if cs.direction == In:
|
||||
cs.connManager.inSema.release()
|
||||
@@ -439,39 +375,17 @@ proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
|
||||
|
||||
asyncSpawn semaphoreMonitor()
|
||||
|
||||
proc storeMuxer*(c: ConnManager,
|
||||
muxer: Muxer,
|
||||
handle: Future[void] = nil)
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
if isNil(muxer):
|
||||
raise newException(CatchableError, "muxer cannot be nil")
|
||||
|
||||
if isNil(muxer.connection):
|
||||
raise newException(CatchableError, "muxer's connection cannot be nil")
|
||||
|
||||
if muxer.connection notin c:
|
||||
raise newException(CatchableError, "cant add muxer for untracked connection")
|
||||
|
||||
c.muxed[muxer.connection] = MuxerHolder(
|
||||
muxer: muxer,
|
||||
handle: handle)
|
||||
|
||||
trace "Stored muxer",
|
||||
muxer, handle = not handle.isNil, connections = c.conns.len
|
||||
|
||||
asyncSpawn c.onConnUpgraded(muxer.connection)
|
||||
proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
if isNil(mux):
|
||||
cs.release()
|
||||
return
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
## get a muxed stream for the provided peer
|
||||
## with the given direction
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
let muxer = c.selectMuxer(c.selectConn(peerId, dir))
|
||||
if not(isNil(muxer)):
|
||||
return await muxer.newStream()
|
||||
|
||||
@@ -480,40 +394,25 @@ proc getStream*(c: ConnManager,
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
let muxer = c.selectMuxer(c.selectConn(peerId))
|
||||
if not(isNil(muxer)):
|
||||
return await muxer.newStream()
|
||||
return await c.getStream(c.selectMuxer(peerId))
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
## get a muxed stream for the passed connection
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
let muxer = c.selectMuxer(conn)
|
||||
if not(isNil(muxer)):
|
||||
return await muxer.newStream()
|
||||
return await c.getStream(c.selectMuxer(peerId, dir))
|
||||
|
||||
|
||||
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||
## drop connections and cleanup resources for peer
|
||||
##
|
||||
trace "Dropping peer", peerId
|
||||
let conns = c.conns.getOrDefault(peerId)
|
||||
for conn in conns:
|
||||
trace "Removing connection", conn
|
||||
delConn(c, conn)
|
||||
|
||||
var muxers: seq[MuxerHolder]
|
||||
for conn in conns:
|
||||
if conn in c.muxed:
|
||||
muxers.add c.muxed[conn]
|
||||
c.muxed.del(conn)
|
||||
let muxers = c.muxed.getOrDefault(peerId)
|
||||
|
||||
for muxer in muxers:
|
||||
await closeMuxerHolder(muxer)
|
||||
|
||||
for conn in conns:
|
||||
await conn.close()
|
||||
trace "Dropped peer", peerId
|
||||
await closeMuxer(muxer)
|
||||
|
||||
trace "Peer dropped", peerId
|
||||
|
||||
@@ -523,17 +422,18 @@ proc close*(c: ConnManager) {.async.} =
|
||||
##
|
||||
|
||||
trace "Closing ConnManager"
|
||||
let conns = c.conns
|
||||
c.conns.clear()
|
||||
|
||||
let muxed = c.muxed
|
||||
c.muxed.clear()
|
||||
|
||||
for _, muxer in muxed:
|
||||
await closeMuxerHolder(muxer)
|
||||
let expected = c.expectedConnectionsOverLimit
|
||||
c.expectedConnectionsOverLimit.clear()
|
||||
|
||||
for _, conns2 in conns:
|
||||
for conn in conns2:
|
||||
await conn.close()
|
||||
for _, fut in expected:
|
||||
await fut.cancelAndWait()
|
||||
|
||||
for _, muxers in muxed:
|
||||
for mux in muxers:
|
||||
await closeMuxer(mux)
|
||||
|
||||
trace "Closed ConnManager"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -22,7 +22,7 @@ else:
|
||||
|
||||
import bearssl/blockx
|
||||
from stew/assign2 import assign
|
||||
from stew/ranges/ptr_arith import baseAddr
|
||||
from stew/ptrops import baseAddr
|
||||
|
||||
const
|
||||
ChaChaPolyKeySize = 32
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022-2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -20,7 +20,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand, hash]
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
from stew/assign2 import assign
|
||||
export results
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -25,6 +25,9 @@ import nimcrypto/utils as ncrutils
|
||||
import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/[results, ctops]
|
||||
|
||||
import ../utility
|
||||
|
||||
export results
|
||||
|
||||
const
|
||||
@@ -74,7 +77,7 @@ type
|
||||
EcResult*[T] = Result[T, EcError]
|
||||
|
||||
const
|
||||
EcSupportedCurvesCint* = {cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)}
|
||||
EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
|
||||
|
||||
proc `-`(x: uint32): uint32 {.inline.} =
|
||||
result = (0xFFFF_FFFF'u32 - x) + 1'u32
|
||||
@@ -243,7 +246,7 @@ proc random*(
|
||||
var res = new EcPrivateKey
|
||||
if ecKeygen(addr rng.vtable, ecimp,
|
||||
addr res.key, addr res.buffer[0],
|
||||
cast[cint](kind)) == 0:
|
||||
safeConvert[cint](kind)) == 0:
|
||||
err(EcKeyGenError)
|
||||
else:
|
||||
ok(res)
|
||||
@@ -630,11 +633,11 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if oid == Asn1OidSecp256r1:
|
||||
curve = cast[cint](Secp256r1)
|
||||
curve = safeConvert[cint](Secp256r1)
|
||||
elif oid == Asn1OidSecp384r1:
|
||||
curve = cast[cint](Secp384r1)
|
||||
curve = safeConvert[cint](Secp384r1)
|
||||
elif oid == Asn1OidSecp521r1:
|
||||
curve = cast[cint](Secp521r1)
|
||||
curve = safeConvert[cint](Secp521r1)
|
||||
else:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
@@ -684,11 +687,11 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if oid == Asn1OidSecp256r1:
|
||||
curve = cast[cint](Secp256r1)
|
||||
curve = safeConvert[cint](Secp256r1)
|
||||
elif oid == Asn1OidSecp384r1:
|
||||
curve = cast[cint](Secp384r1)
|
||||
curve = safeConvert[cint](Secp384r1)
|
||||
elif oid == Asn1OidSecp521r1:
|
||||
curve = cast[cint](Secp521r1)
|
||||
curve = safeConvert[cint](Secp521r1)
|
||||
else:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
@@ -774,13 +777,13 @@ proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool =
|
||||
## Procedure returns ``true`` on success, ``false`` otherwise.
|
||||
var curve: cint
|
||||
if len(data) == SecKey256Length:
|
||||
curve = cast[cint](Secp256r1)
|
||||
curve = safeConvert[cint](Secp256r1)
|
||||
result = true
|
||||
elif len(data) == SecKey384Length:
|
||||
curve = cast[cint](Secp384r1)
|
||||
curve = safeConvert[cint](Secp384r1)
|
||||
result = true
|
||||
elif len(data) == SecKey521Length:
|
||||
curve = cast[cint](Secp521r1)
|
||||
curve = safeConvert[cint](Secp521r1)
|
||||
result = true
|
||||
if result:
|
||||
result = false
|
||||
@@ -805,13 +808,13 @@ proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool =
|
||||
if len(data) > 0:
|
||||
if data[0] == 0x04'u8:
|
||||
if len(data) == PubKey256Length:
|
||||
curve = cast[cint](Secp256r1)
|
||||
curve = safeConvert[cint](Secp256r1)
|
||||
result = true
|
||||
elif len(data) == PubKey384Length:
|
||||
curve = cast[cint](Secp384r1)
|
||||
curve = safeConvert[cint](Secp384r1)
|
||||
result = true
|
||||
elif len(data) == PubKey521Length:
|
||||
curve = cast[cint](Secp521r1)
|
||||
curve = safeConvert[cint](Secp521r1)
|
||||
result = true
|
||||
if result:
|
||||
result = false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -22,6 +22,9 @@ import nimcrypto/[hash, sha2]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import stew/[results, ctops]
|
||||
|
||||
import ../../utility
|
||||
|
||||
export results
|
||||
|
||||
# This workaround needed because of some bugs in Nim Static[T].
|
||||
@@ -170,15 +173,15 @@ proc feCopy(h: var Fe, f: Fe) =
|
||||
h[9] = f9
|
||||
|
||||
proc load_3(inp: openArray[byte]): uint64 =
|
||||
result = cast[uint64](inp[0])
|
||||
result = result or (cast[uint64](inp[1]) shl 8)
|
||||
result = result or (cast[uint64](inp[2]) shl 16)
|
||||
result = safeConvert[uint64](inp[0])
|
||||
result = result or (safeConvert[uint64](inp[1]) shl 8)
|
||||
result = result or (safeConvert[uint64](inp[2]) shl 16)
|
||||
|
||||
proc load_4(inp: openArray[byte]): uint64 =
|
||||
result = cast[uint64](inp[0])
|
||||
result = result or (cast[uint64](inp[1]) shl 8)
|
||||
result = result or (cast[uint64](inp[2]) shl 16)
|
||||
result = result or (cast[uint64](inp[3]) shl 24)
|
||||
result = safeConvert[uint64](inp[0])
|
||||
result = result or (safeConvert[uint64](inp[1]) shl 8)
|
||||
result = result or (safeConvert[uint64](inp[2]) shl 16)
|
||||
result = result or (safeConvert[uint64](inp[3]) shl 24)
|
||||
|
||||
proc feFromBytes(h: var Fe, s: openArray[byte]) =
|
||||
var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
|
||||
@@ -299,106 +302,106 @@ proc feMul(h: var Fe, f, g: Fe) =
|
||||
var f5_2 = 2 * f5
|
||||
var f7_2 = 2 * f7
|
||||
var f9_2 = 2 * f9
|
||||
var f0g0 = cast[int64](f0) * cast[int64](g0)
|
||||
var f0g1 = cast[int64](f0) * cast[int64](g1)
|
||||
var f0g2 = cast[int64](f0) * cast[int64](g2)
|
||||
var f0g3 = cast[int64](f0) * cast[int64](g3)
|
||||
var f0g4 = cast[int64](f0) * cast[int64](g4)
|
||||
var f0g5 = cast[int64](f0) * cast[int64](g5)
|
||||
var f0g6 = cast[int64](f0) * cast[int64](g6)
|
||||
var f0g7 = cast[int64](f0) * cast[int64](g7)
|
||||
var f0g8 = cast[int64](f0) * cast[int64](g8)
|
||||
var f0g9 = cast[int64](f0) * cast[int64](g9)
|
||||
var f1g0 = cast[int64](f1) * cast[int64](g0)
|
||||
var f1g1_2 = cast[int64](f1_2) * cast[int64](g1)
|
||||
var f1g2 = cast[int64](f1) * cast[int64](g2)
|
||||
var f1g3_2 = cast[int64](f1_2) * cast[int64](g3)
|
||||
var f1g4 = cast[int64](f1) * cast[int64](g4)
|
||||
var f1g5_2 = cast[int64](f1_2) * cast[int64](g5)
|
||||
var f1g6 = cast[int64](f1) * cast[int64](g6)
|
||||
var f1g7_2 = cast[int64](f1_2) * cast[int64](g7)
|
||||
var f1g8 = cast[int64](f1) * cast[int64](g8)
|
||||
var f1g9_38 = cast[int64](f1_2) * cast[int64](g9_19)
|
||||
var f2g0 = cast[int64](f2) * cast[int64](g0)
|
||||
var f2g1 = cast[int64](f2) * cast[int64](g1)
|
||||
var f2g2 = cast[int64](f2) * cast[int64](g2)
|
||||
var f2g3 = cast[int64](f2) * cast[int64](g3)
|
||||
var f2g4 = cast[int64](f2) * cast[int64](g4)
|
||||
var f2g5 = cast[int64](f2) * cast[int64](g5)
|
||||
var f2g6 = cast[int64](f2) * cast[int64](g6)
|
||||
var f2g7 = cast[int64](f2) * cast[int64](g7)
|
||||
var f2g8_19 = cast[int64](f2) * cast[int64](g8_19)
|
||||
var f2g9_19 = cast[int64](f2) * cast[int64](g9_19)
|
||||
var f3g0 = cast[int64](f3) * cast[int64](g0)
|
||||
var f3g1_2 = cast[int64](f3_2) * cast[int64](g1)
|
||||
var f3g2 = cast[int64](f3) * cast[int64](g2)
|
||||
var f3g3_2 = cast[int64](f3_2) * cast[int64](g3)
|
||||
var f3g4 = cast[int64](f3) * cast[int64](g4)
|
||||
var f3g5_2 = cast[int64](f3_2) * cast[int64](g5)
|
||||
var f3g6 = cast[int64](f3) * cast[int64](g6)
|
||||
var f3g7_38 = cast[int64](f3_2) * cast[int64](g7_19)
|
||||
var f3g8_19 = cast[int64](f3) * cast[int64](g8_19)
|
||||
var f3g9_38 = cast[int64](f3_2) * cast[int64](g9_19)
|
||||
var f4g0 = cast[int64](f4) * cast[int64](g0)
|
||||
var f4g1 = cast[int64](f4) * cast[int64](g1)
|
||||
var f4g2 = cast[int64](f4) * cast[int64](g2)
|
||||
var f4g3 = cast[int64](f4) * cast[int64](g3)
|
||||
var f4g4 = cast[int64](f4) * cast[int64](g4)
|
||||
var f4g5 = cast[int64](f4) * cast[int64](g5)
|
||||
var f4g6_19 = cast[int64](f4) * cast[int64](g6_19)
|
||||
var f4g7_19 = cast[int64](f4) * cast[int64](g7_19)
|
||||
var f4g8_19 = cast[int64](f4) * cast[int64](g8_19)
|
||||
var f4g9_19 = cast[int64](f4) * cast[int64](g9_19)
|
||||
var f5g0 = cast[int64](f5) * cast[int64](g0)
|
||||
var f5g1_2 = cast[int64](f5_2) * cast[int64](g1)
|
||||
var f5g2 = cast[int64](f5) * cast[int64](g2)
|
||||
var f5g3_2 = cast[int64](f5_2) * cast[int64](g3)
|
||||
var f5g4 = cast[int64](f5) * cast[int64](g4)
|
||||
var f5g5_38 = cast[int64](f5_2) * cast[int64](g5_19)
|
||||
var f5g6_19 = cast[int64](f5) * cast[int64](g6_19)
|
||||
var f5g7_38 = cast[int64](f5_2) * cast[int64](g7_19)
|
||||
var f5g8_19 = cast[int64](f5) * cast[int64](g8_19)
|
||||
var f5g9_38 = cast[int64](f5_2) * cast[int64](g9_19)
|
||||
var f6g0 = cast[int64](f6) * cast[int64](g0)
|
||||
var f6g1 = cast[int64](f6) * cast[int64](g1)
|
||||
var f6g2 = cast[int64](f6) * cast[int64](g2)
|
||||
var f6g3 = cast[int64](f6) * cast[int64](g3)
|
||||
var f6g4_19 = cast[int64](f6) * cast[int64](g4_19)
|
||||
var f6g5_19 = cast[int64](f6) * cast[int64](g5_19)
|
||||
var f6g6_19 = cast[int64](f6) * cast[int64](g6_19)
|
||||
var f6g7_19 = cast[int64](f6) * cast[int64](g7_19)
|
||||
var f6g8_19 = cast[int64](f6) * cast[int64](g8_19)
|
||||
var f6g9_19 = cast[int64](f6) * cast[int64](g9_19)
|
||||
var f7g0 = cast[int64](f7) * cast[int64](g0)
|
||||
var f7g1_2 = cast[int64](f7_2) * cast[int64](g1)
|
||||
var f7g2 = cast[int64](f7) * cast[int64](g2)
|
||||
var f7g3_38 = cast[int64](f7_2) * cast[int64](g3_19)
|
||||
var f7g4_19 = cast[int64](f7) * cast[int64](g4_19)
|
||||
var f7g5_38 = cast[int64](f7_2) * cast[int64](g5_19)
|
||||
var f7g6_19 = cast[int64](f7) * cast[int64](g6_19)
|
||||
var f7g7_38 = cast[int64](f7_2) * cast[int64](g7_19)
|
||||
var f7g8_19 = cast[int64](f7) * cast[int64](g8_19)
|
||||
var f7g9_38 = cast[int64](f7_2) * cast[int64](g9_19)
|
||||
var f8g0 = cast[int64](f8) * cast[int64](g0)
|
||||
var f8g1 = cast[int64](f8) * cast[int64](g1)
|
||||
var f8g2_19 = cast[int64](f8) * cast[int64](g2_19)
|
||||
var f8g3_19 = cast[int64](f8) * cast[int64](g3_19)
|
||||
var f8g4_19 = cast[int64](f8) * cast[int64](g4_19)
|
||||
var f8g5_19 = cast[int64](f8) * cast[int64](g5_19)
|
||||
var f8g6_19 = cast[int64](f8) * cast[int64](g6_19)
|
||||
var f8g7_19 = cast[int64](f8) * cast[int64](g7_19)
|
||||
var f8g8_19 = cast[int64](f8) * cast[int64](g8_19)
|
||||
var f8g9_19 = cast[int64](f8) * cast[int64](g9_19)
|
||||
var f9g0 = cast[int64](f9) * cast[int64](g0)
|
||||
var f9g1_38 = cast[int64](f9_2) * cast[int64](g1_19)
|
||||
var f9g2_19 = cast[int64](f9) * cast[int64](g2_19)
|
||||
var f9g3_38 = cast[int64](f9_2) * cast[int64](g3_19)
|
||||
var f9g4_19 = cast[int64](f9) * cast[int64](g4_19)
|
||||
var f9g5_38 = cast[int64](f9_2) * cast[int64](g5_19)
|
||||
var f9g6_19 = cast[int64](f9) * cast[int64](g6_19)
|
||||
var f9g7_38 = cast[int64](f9_2) * cast[int64](g7_19)
|
||||
var f9g8_19 = cast[int64](f9) * cast[int64](g8_19)
|
||||
var f9g9_38 = cast[int64](f9_2) * cast[int64](g9_19)
|
||||
var f0g0 = safeConvert[int64](f0) * safeConvert[int64](g0)
|
||||
var f0g1 = safeConvert[int64](f0) * safeConvert[int64](g1)
|
||||
var f0g2 = safeConvert[int64](f0) * safeConvert[int64](g2)
|
||||
var f0g3 = safeConvert[int64](f0) * safeConvert[int64](g3)
|
||||
var f0g4 = safeConvert[int64](f0) * safeConvert[int64](g4)
|
||||
var f0g5 = safeConvert[int64](f0) * safeConvert[int64](g5)
|
||||
var f0g6 = safeConvert[int64](f0) * safeConvert[int64](g6)
|
||||
var f0g7 = safeConvert[int64](f0) * safeConvert[int64](g7)
|
||||
var f0g8 = safeConvert[int64](f0) * safeConvert[int64](g8)
|
||||
var f0g9 = safeConvert[int64](f0) * safeConvert[int64](g9)
|
||||
var f1g0 = safeConvert[int64](f1) * safeConvert[int64](g0)
|
||||
var f1g1_2 = safeConvert[int64](f1_2) * safeConvert[int64](g1)
|
||||
var f1g2 = safeConvert[int64](f1) * safeConvert[int64](g2)
|
||||
var f1g3_2 = safeConvert[int64](f1_2) * safeConvert[int64](g3)
|
||||
var f1g4 = safeConvert[int64](f1) * safeConvert[int64](g4)
|
||||
var f1g5_2 = safeConvert[int64](f1_2) * safeConvert[int64](g5)
|
||||
var f1g6 = safeConvert[int64](f1) * safeConvert[int64](g6)
|
||||
var f1g7_2 = safeConvert[int64](f1_2) * safeConvert[int64](g7)
|
||||
var f1g8 = safeConvert[int64](f1) * safeConvert[int64](g8)
|
||||
var f1g9_38 = safeConvert[int64](f1_2) * safeConvert[int64](g9_19)
|
||||
var f2g0 = safeConvert[int64](f2) * safeConvert[int64](g0)
|
||||
var f2g1 = safeConvert[int64](f2) * safeConvert[int64](g1)
|
||||
var f2g2 = safeConvert[int64](f2) * safeConvert[int64](g2)
|
||||
var f2g3 = safeConvert[int64](f2) * safeConvert[int64](g3)
|
||||
var f2g4 = safeConvert[int64](f2) * safeConvert[int64](g4)
|
||||
var f2g5 = safeConvert[int64](f2) * safeConvert[int64](g5)
|
||||
var f2g6 = safeConvert[int64](f2) * safeConvert[int64](g6)
|
||||
var f2g7 = safeConvert[int64](f2) * safeConvert[int64](g7)
|
||||
var f2g8_19 = safeConvert[int64](f2) * safeConvert[int64](g8_19)
|
||||
var f2g9_19 = safeConvert[int64](f2) * safeConvert[int64](g9_19)
|
||||
var f3g0 = safeConvert[int64](f3) * safeConvert[int64](g0)
|
||||
var f3g1_2 = safeConvert[int64](f3_2) * safeConvert[int64](g1)
|
||||
var f3g2 = safeConvert[int64](f3) * safeConvert[int64](g2)
|
||||
var f3g3_2 = safeConvert[int64](f3_2) * safeConvert[int64](g3)
|
||||
var f3g4 = safeConvert[int64](f3) * safeConvert[int64](g4)
|
||||
var f3g5_2 = safeConvert[int64](f3_2) * safeConvert[int64](g5)
|
||||
var f3g6 = safeConvert[int64](f3) * safeConvert[int64](g6)
|
||||
var f3g7_38 = safeConvert[int64](f3_2) * safeConvert[int64](g7_19)
|
||||
var f3g8_19 = safeConvert[int64](f3) * safeConvert[int64](g8_19)
|
||||
var f3g9_38 = safeConvert[int64](f3_2) * safeConvert[int64](g9_19)
|
||||
var f4g0 = safeConvert[int64](f4) * safeConvert[int64](g0)
|
||||
var f4g1 = safeConvert[int64](f4) * safeConvert[int64](g1)
|
||||
var f4g2 = safeConvert[int64](f4) * safeConvert[int64](g2)
|
||||
var f4g3 = safeConvert[int64](f4) * safeConvert[int64](g3)
|
||||
var f4g4 = safeConvert[int64](f4) * safeConvert[int64](g4)
|
||||
var f4g5 = safeConvert[int64](f4) * safeConvert[int64](g5)
|
||||
var f4g6_19 = safeConvert[int64](f4) * safeConvert[int64](g6_19)
|
||||
var f4g7_19 = safeConvert[int64](f4) * safeConvert[int64](g7_19)
|
||||
var f4g8_19 = safeConvert[int64](f4) * safeConvert[int64](g8_19)
|
||||
var f4g9_19 = safeConvert[int64](f4) * safeConvert[int64](g9_19)
|
||||
var f5g0 = safeConvert[int64](f5) * safeConvert[int64](g0)
|
||||
var f5g1_2 = safeConvert[int64](f5_2) * safeConvert[int64](g1)
|
||||
var f5g2 = safeConvert[int64](f5) * safeConvert[int64](g2)
|
||||
var f5g3_2 = safeConvert[int64](f5_2) * safeConvert[int64](g3)
|
||||
var f5g4 = safeConvert[int64](f5) * safeConvert[int64](g4)
|
||||
var f5g5_38 = safeConvert[int64](f5_2) * safeConvert[int64](g5_19)
|
||||
var f5g6_19 = safeConvert[int64](f5) * safeConvert[int64](g6_19)
|
||||
var f5g7_38 = safeConvert[int64](f5_2) * safeConvert[int64](g7_19)
|
||||
var f5g8_19 = safeConvert[int64](f5) * safeConvert[int64](g8_19)
|
||||
var f5g9_38 = safeConvert[int64](f5_2) * safeConvert[int64](g9_19)
|
||||
var f6g0 = safeConvert[int64](f6) * safeConvert[int64](g0)
|
||||
var f6g1 = safeConvert[int64](f6) * safeConvert[int64](g1)
|
||||
var f6g2 = safeConvert[int64](f6) * safeConvert[int64](g2)
|
||||
var f6g3 = safeConvert[int64](f6) * safeConvert[int64](g3)
|
||||
var f6g4_19 = safeConvert[int64](f6) * safeConvert[int64](g4_19)
|
||||
var f6g5_19 = safeConvert[int64](f6) * safeConvert[int64](g5_19)
|
||||
var f6g6_19 = safeConvert[int64](f6) * safeConvert[int64](g6_19)
|
||||
var f6g7_19 = safeConvert[int64](f6) * safeConvert[int64](g7_19)
|
||||
var f6g8_19 = safeConvert[int64](f6) * safeConvert[int64](g8_19)
|
||||
var f6g9_19 = safeConvert[int64](f6) * safeConvert[int64](g9_19)
|
||||
var f7g0 = safeConvert[int64](f7) * safeConvert[int64](g0)
|
||||
var f7g1_2 = safeConvert[int64](f7_2) * safeConvert[int64](g1)
|
||||
var f7g2 = safeConvert[int64](f7) * safeConvert[int64](g2)
|
||||
var f7g3_38 = safeConvert[int64](f7_2) * safeConvert[int64](g3_19)
|
||||
var f7g4_19 = safeConvert[int64](f7) * safeConvert[int64](g4_19)
|
||||
var f7g5_38 = safeConvert[int64](f7_2) * safeConvert[int64](g5_19)
|
||||
var f7g6_19 = safeConvert[int64](f7) * safeConvert[int64](g6_19)
|
||||
var f7g7_38 = safeConvert[int64](f7_2) * safeConvert[int64](g7_19)
|
||||
var f7g8_19 = safeConvert[int64](f7) * safeConvert[int64](g8_19)
|
||||
var f7g9_38 = safeConvert[int64](f7_2) * safeConvert[int64](g9_19)
|
||||
var f8g0 = safeConvert[int64](f8) * safeConvert[int64](g0)
|
||||
var f8g1 = safeConvert[int64](f8) * safeConvert[int64](g1)
|
||||
var f8g2_19 = safeConvert[int64](f8) * safeConvert[int64](g2_19)
|
||||
var f8g3_19 = safeConvert[int64](f8) * safeConvert[int64](g3_19)
|
||||
var f8g4_19 = safeConvert[int64](f8) * safeConvert[int64](g4_19)
|
||||
var f8g5_19 = safeConvert[int64](f8) * safeConvert[int64](g5_19)
|
||||
var f8g6_19 = safeConvert[int64](f8) * safeConvert[int64](g6_19)
|
||||
var f8g7_19 = safeConvert[int64](f8) * safeConvert[int64](g7_19)
|
||||
var f8g8_19 = safeConvert[int64](f8) * safeConvert[int64](g8_19)
|
||||
var f8g9_19 = safeConvert[int64](f8) * safeConvert[int64](g9_19)
|
||||
var f9g0 = safeConvert[int64](f9) * safeConvert[int64](g0)
|
||||
var f9g1_38 = safeConvert[int64](f9_2) * safeConvert[int64](g1_19)
|
||||
var f9g2_19 = safeConvert[int64](f9) * safeConvert[int64](g2_19)
|
||||
var f9g3_38 = safeConvert[int64](f9_2) * safeConvert[int64](g3_19)
|
||||
var f9g4_19 = safeConvert[int64](f9) * safeConvert[int64](g4_19)
|
||||
var f9g5_38 = safeConvert[int64](f9_2) * safeConvert[int64](g5_19)
|
||||
var f9g6_19 = safeConvert[int64](f9) * safeConvert[int64](g6_19)
|
||||
var f9g7_38 = safeConvert[int64](f9_2) * safeConvert[int64](g7_19)
|
||||
var f9g8_19 = safeConvert[int64](f9) * safeConvert[int64](g8_19)
|
||||
var f9g9_38 = safeConvert[int64](f9_2) * safeConvert[int64](g9_19)
|
||||
var
|
||||
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
|
||||
h0: int64 = f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 +
|
||||
@@ -493,7 +496,7 @@ proc verify32(x: openArray[byte], y: openArray[byte]): int32 =
|
||||
proc feIsNegative(f: Fe): int32 =
|
||||
var s: array[32, byte]
|
||||
feToBytes(s, f)
|
||||
result = cast[int32](s[0] and 1'u8)
|
||||
result = safeConvert[int32](s[0] and 1'u8)
|
||||
|
||||
proc feIsNonZero(f: Fe): int32 =
|
||||
var s: array[32, byte]
|
||||
@@ -516,61 +519,61 @@ proc feSq(h: var Fe, f: Fe) =
|
||||
var f7_38: int32 = 38 * f7
|
||||
var f8_19: int32 = 19 * f8
|
||||
var f9_38: int32 = 38 * f9
|
||||
var f0f0: int64 = f0 * cast[int64](f0)
|
||||
var f0f1_2: int64 = f0_2 * cast[int64](f1)
|
||||
var f0f2_2: int64 = f0_2 * cast[int64](f2)
|
||||
var f0f3_2: int64 = f0_2 * cast[int64](f3)
|
||||
var f0f4_2: int64 = f0_2 * cast[int64](f4)
|
||||
var f0f5_2: int64 = f0_2 * cast[int64](f5)
|
||||
var f0f6_2: int64 = f0_2 * cast[int64](f6)
|
||||
var f0f7_2: int64 = f0_2 * cast[int64](f7)
|
||||
var f0f8_2: int64 = f0_2 * cast[int64](f8)
|
||||
var f0f9_2: int64 = f0_2 * cast[int64](f9)
|
||||
var f1f1_2: int64 = f1_2 * cast[int64](f1)
|
||||
var f1f2_2: int64 = f1_2 * cast[int64](f2)
|
||||
var f1f3_4: int64 = f1_2 * cast[int64](f3_2)
|
||||
var f1f4_2: int64 = f1_2 * cast[int64](f4)
|
||||
var f1f5_4: int64 = f1_2 * cast[int64](f5_2)
|
||||
var f1f6_2: int64 = f1_2 * cast[int64](f6)
|
||||
var f1f7_4: int64 = f1_2 * cast[int64](f7_2)
|
||||
var f1f8_2: int64 = f1_2 * cast[int64](f8)
|
||||
var f1f9_76: int64 = f1_2 * cast[int64](f9_38)
|
||||
var f2f2: int64 = f2 * cast[int64](f2)
|
||||
var f2f3_2: int64 = f2_2 * cast[int64](f3)
|
||||
var f2f4_2: int64 = f2_2 * cast[int64](f4)
|
||||
var f2f5_2: int64 = f2_2 * cast[int64](f5)
|
||||
var f2f6_2: int64 = f2_2 * cast[int64](f6)
|
||||
var f2f7_2: int64 = f2_2 * cast[int64](f7)
|
||||
var f2f8_38: int64 = f2_2 * cast[int64](f8_19)
|
||||
var f2f9_38: int64 = f2 * cast[int64](f9_38)
|
||||
var f3f3_2: int64 = f3_2 * cast[int64](f3)
|
||||
var f3f4_2: int64 = f3_2 * cast[int64](f4)
|
||||
var f3f5_4: int64 = f3_2 * cast[int64](f5_2)
|
||||
var f3f6_2: int64 = f3_2 * cast[int64](f6)
|
||||
var f3f7_76: int64 = f3_2 * cast[int64](f7_38)
|
||||
var f3f8_38: int64 = f3_2 * cast[int64](f8_19)
|
||||
var f3f9_76: int64 = f3_2 * cast[int64](f9_38)
|
||||
var f4f4: int64 = f4 * cast[int64](f4)
|
||||
var f4f5_2: int64 = f4_2 * cast[int64](f5)
|
||||
var f4f6_38: int64 = f4_2 * cast[int64](f6_19)
|
||||
var f4f7_38: int64 = f4 * cast[int64](f7_38)
|
||||
var f4f8_38: int64 = f4_2 * cast[int64](f8_19)
|
||||
var f4f9_38: int64 = f4 * cast[int64](f9_38)
|
||||
var f5f5_38: int64 = f5 * cast[int64](f5_38)
|
||||
var f5f6_38: int64 = f5_2 * cast[int64](f6_19)
|
||||
var f5f7_76: int64 = f5_2 * cast[int64](f7_38)
|
||||
var f5f8_38: int64 = f5_2 * cast[int64](f8_19)
|
||||
var f5f9_76: int64 = f5_2 * cast[int64](f9_38)
|
||||
var f6f6_19: int64 = f6 * cast[int64](f6_19)
|
||||
var f6f7_38: int64 = f6 * cast[int64](f7_38)
|
||||
var f6f8_38: int64 = f6_2 * cast[int64](f8_19)
|
||||
var f6f9_38: int64 = f6 * cast[int64](f9_38)
|
||||
var f7f7_38: int64 = f7 * cast[int64](f7_38)
|
||||
var f7f8_38: int64 = f7_2 * cast[int64](f8_19)
|
||||
var f7f9_76: int64 = f7_2 * cast[int64](f9_38)
|
||||
var f8f8_19: int64 = f8 * cast[int64](f8_19)
|
||||
var f8f9_38: int64 = f8 * cast[int64](f9_38)
|
||||
var f9f9_38: int64 = f9 * cast[int64](f9_38)
|
||||
var f0f0: int64 = f0 * safeConvert[int64](f0)
|
||||
var f0f1_2: int64 = f0_2 * safeConvert[int64](f1)
|
||||
var f0f2_2: int64 = f0_2 * safeConvert[int64](f2)
|
||||
var f0f3_2: int64 = f0_2 * safeConvert[int64](f3)
|
||||
var f0f4_2: int64 = f0_2 * safeConvert[int64](f4)
|
||||
var f0f5_2: int64 = f0_2 * safeConvert[int64](f5)
|
||||
var f0f6_2: int64 = f0_2 * safeConvert[int64](f6)
|
||||
var f0f7_2: int64 = f0_2 * safeConvert[int64](f7)
|
||||
var f0f8_2: int64 = f0_2 * safeConvert[int64](f8)
|
||||
var f0f9_2: int64 = f0_2 * safeConvert[int64](f9)
|
||||
var f1f1_2: int64 = f1_2 * safeConvert[int64](f1)
|
||||
var f1f2_2: int64 = f1_2 * safeConvert[int64](f2)
|
||||
var f1f3_4: int64 = f1_2 * safeConvert[int64](f3_2)
|
||||
var f1f4_2: int64 = f1_2 * safeConvert[int64](f4)
|
||||
var f1f5_4: int64 = f1_2 * safeConvert[int64](f5_2)
|
||||
var f1f6_2: int64 = f1_2 * safeConvert[int64](f6)
|
||||
var f1f7_4: int64 = f1_2 * safeConvert[int64](f7_2)
|
||||
var f1f8_2: int64 = f1_2 * safeConvert[int64](f8)
|
||||
var f1f9_76: int64 = f1_2 * safeConvert[int64](f9_38)
|
||||
var f2f2: int64 = f2 * safeConvert[int64](f2)
|
||||
var f2f3_2: int64 = f2_2 * safeConvert[int64](f3)
|
||||
var f2f4_2: int64 = f2_2 * safeConvert[int64](f4)
|
||||
var f2f5_2: int64 = f2_2 * safeConvert[int64](f5)
|
||||
var f2f6_2: int64 = f2_2 * safeConvert[int64](f6)
|
||||
var f2f7_2: int64 = f2_2 * safeConvert[int64](f7)
|
||||
var f2f8_38: int64 = f2_2 * safeConvert[int64](f8_19)
|
||||
var f2f9_38: int64 = f2 * safeConvert[int64](f9_38)
|
||||
var f3f3_2: int64 = f3_2 * safeConvert[int64](f3)
|
||||
var f3f4_2: int64 = f3_2 * safeConvert[int64](f4)
|
||||
var f3f5_4: int64 = f3_2 * safeConvert[int64](f5_2)
|
||||
var f3f6_2: int64 = f3_2 * safeConvert[int64](f6)
|
||||
var f3f7_76: int64 = f3_2 * safeConvert[int64](f7_38)
|
||||
var f3f8_38: int64 = f3_2 * safeConvert[int64](f8_19)
|
||||
var f3f9_76: int64 = f3_2 * safeConvert[int64](f9_38)
|
||||
var f4f4: int64 = f4 * safeConvert[int64](f4)
|
||||
var f4f5_2: int64 = f4_2 * safeConvert[int64](f5)
|
||||
var f4f6_38: int64 = f4_2 * safeConvert[int64](f6_19)
|
||||
var f4f7_38: int64 = f4 * safeConvert[int64](f7_38)
|
||||
var f4f8_38: int64 = f4_2 * safeConvert[int64](f8_19)
|
||||
var f4f9_38: int64 = f4 * safeConvert[int64](f9_38)
|
||||
var f5f5_38: int64 = f5 * safeConvert[int64](f5_38)
|
||||
var f5f6_38: int64 = f5_2 * safeConvert[int64](f6_19)
|
||||
var f5f7_76: int64 = f5_2 * safeConvert[int64](f7_38)
|
||||
var f5f8_38: int64 = f5_2 * safeConvert[int64](f8_19)
|
||||
var f5f9_76: int64 = f5_2 * safeConvert[int64](f9_38)
|
||||
var f6f6_19: int64 = f6 * safeConvert[int64](f6_19)
|
||||
var f6f7_38: int64 = f6 * safeConvert[int64](f7_38)
|
||||
var f6f8_38: int64 = f6_2 * safeConvert[int64](f8_19)
|
||||
var f6f9_38: int64 = f6 * safeConvert[int64](f9_38)
|
||||
var f7f7_38: int64 = f7 * safeConvert[int64](f7_38)
|
||||
var f7f8_38: int64 = f7_2 * safeConvert[int64](f8_19)
|
||||
var f7f9_76: int64 = f7_2 * safeConvert[int64](f9_38)
|
||||
var f8f8_19: int64 = f8 * safeConvert[int64](f8_19)
|
||||
var f8f9_38: int64 = f8 * safeConvert[int64](f9_38)
|
||||
var f9f9_38: int64 = f9 * safeConvert[int64](f9_38)
|
||||
var h0: int64 = f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
var h1: int64 = f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
var h2: int64 = f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
@@ -623,61 +626,61 @@ proc feSq2(h: var Fe, f: Fe) =
|
||||
var f7_38 = 38 * f7
|
||||
var f8_19 = 19 * f8
|
||||
var f9_38 = 38 * f9
|
||||
var f0f0 = cast[int64](f0) * cast[int64](f0)
|
||||
var f0f1_2 = cast[int64](f0_2) * cast[int64](f1)
|
||||
var f0f2_2 = cast[int64](f0_2) * cast[int64](f2)
|
||||
var f0f3_2 = cast[int64](f0_2) * cast[int64](f3)
|
||||
var f0f4_2 = cast[int64](f0_2) * cast[int64](f4)
|
||||
var f0f5_2 = cast[int64](f0_2) * cast[int64](f5)
|
||||
var f0f6_2 = cast[int64](f0_2) * cast[int64](f6)
|
||||
var f0f7_2 = cast[int64](f0_2) * cast[int64](f7)
|
||||
var f0f8_2 = cast[int64](f0_2) * cast[int64](f8)
|
||||
var f0f9_2 = cast[int64](f0_2) * cast[int64](f9)
|
||||
var f1f1_2 = cast[int64](f1_2) * cast[int64](f1)
|
||||
var f1f2_2 = cast[int64](f1_2) * cast[int64](f2)
|
||||
var f1f3_4 = cast[int64](f1_2) * cast[int64](f3_2)
|
||||
var f1f4_2 = cast[int64](f1_2) * cast[int64](f4)
|
||||
var f1f5_4 = cast[int64](f1_2) * cast[int64](f5_2)
|
||||
var f1f6_2 = cast[int64](f1_2) * cast[int64](f6)
|
||||
var f1f7_4 = cast[int64](f1_2) * cast[int64](f7_2)
|
||||
var f1f8_2 = cast[int64](f1_2) * cast[int64](f8)
|
||||
var f1f9_76 = cast[int64](f1_2) * cast[int64](f9_38)
|
||||
var f2f2 = cast[int64](f2) * cast[int64](f2)
|
||||
var f2f3_2 = cast[int64](f2_2) * cast[int64](f3)
|
||||
var f2f4_2 = cast[int64](f2_2) * cast[int64](f4)
|
||||
var f2f5_2 = cast[int64](f2_2) * cast[int64](f5)
|
||||
var f2f6_2 = cast[int64](f2_2) * cast[int64](f6)
|
||||
var f2f7_2 = cast[int64](f2_2) * cast[int64](f7)
|
||||
var f2f8_38 = cast[int64](f2_2) * cast[int64](f8_19)
|
||||
var f2f9_38 = cast[int64](f2) * cast[int64](f9_38)
|
||||
var f3f3_2 = cast[int64](f3_2) * cast[int64](f3)
|
||||
var f3f4_2 = cast[int64](f3_2) * cast[int64](f4)
|
||||
var f3f5_4 = cast[int64](f3_2) * cast[int64](f5_2)
|
||||
var f3f6_2 = cast[int64](f3_2) * cast[int64](f6)
|
||||
var f3f7_76 = cast[int64](f3_2) * cast[int64](f7_38)
|
||||
var f3f8_38 = cast[int64](f3_2) * cast[int64](f8_19)
|
||||
var f3f9_76 = cast[int64](f3_2) * cast[int64](f9_38)
|
||||
var f4f4 = cast[int64](f4) * cast[int64](f4)
|
||||
var f4f5_2 = cast[int64](f4_2) * cast[int64](f5)
|
||||
var f4f6_38 = cast[int64](f4_2) * cast[int64](f6_19)
|
||||
var f4f7_38 = cast[int64](f4) * cast[int64](f7_38)
|
||||
var f4f8_38 = cast[int64](f4_2) * cast[int64](f8_19)
|
||||
var f4f9_38 = cast[int64](f4) * cast[int64](f9_38)
|
||||
var f5f5_38 = cast[int64](f5) * cast[int64](f5_38)
|
||||
var f5f6_38 = cast[int64](f5_2) * cast[int64](f6_19)
|
||||
var f5f7_76 = cast[int64](f5_2) * cast[int64](f7_38)
|
||||
var f5f8_38 = cast[int64](f5_2) * cast[int64](f8_19)
|
||||
var f5f9_76 = cast[int64](f5_2) * cast[int64](f9_38)
|
||||
var f6f6_19 = cast[int64](f6) * cast[int64](f6_19)
|
||||
var f6f7_38 = cast[int64](f6) * cast[int64](f7_38)
|
||||
var f6f8_38 = cast[int64](f6_2) * cast[int64](f8_19)
|
||||
var f6f9_38 = cast[int64](f6) * cast[int64](f9_38)
|
||||
var f7f7_38 = cast[int64](f7) * cast[int64](f7_38)
|
||||
var f7f8_38 = cast[int64](f7_2) * cast[int64](f8_19)
|
||||
var f7f9_76 = cast[int64](f7_2) * cast[int64](f9_38)
|
||||
var f8f8_19 = cast[int64](f8) * cast[int64](f8_19)
|
||||
var f8f9_38 = cast[int64](f8) * cast[int64](f9_38)
|
||||
var f9f9_38 = cast[int64](f9) * cast[int64](f9_38)
|
||||
var f0f0 = safeConvert[int64](f0) * safeConvert[int64](f0)
|
||||
var f0f1_2 = safeConvert[int64](f0_2) * safeConvert[int64](f1)
|
||||
var f0f2_2 = safeConvert[int64](f0_2) * safeConvert[int64](f2)
|
||||
var f0f3_2 = safeConvert[int64](f0_2) * safeConvert[int64](f3)
|
||||
var f0f4_2 = safeConvert[int64](f0_2) * safeConvert[int64](f4)
|
||||
var f0f5_2 = safeConvert[int64](f0_2) * safeConvert[int64](f5)
|
||||
var f0f6_2 = safeConvert[int64](f0_2) * safeConvert[int64](f6)
|
||||
var f0f7_2 = safeConvert[int64](f0_2) * safeConvert[int64](f7)
|
||||
var f0f8_2 = safeConvert[int64](f0_2) * safeConvert[int64](f8)
|
||||
var f0f9_2 = safeConvert[int64](f0_2) * safeConvert[int64](f9)
|
||||
var f1f1_2 = safeConvert[int64](f1_2) * safeConvert[int64](f1)
|
||||
var f1f2_2 = safeConvert[int64](f1_2) * safeConvert[int64](f2)
|
||||
var f1f3_4 = safeConvert[int64](f1_2) * safeConvert[int64](f3_2)
|
||||
var f1f4_2 = safeConvert[int64](f1_2) * safeConvert[int64](f4)
|
||||
var f1f5_4 = safeConvert[int64](f1_2) * safeConvert[int64](f5_2)
|
||||
var f1f6_2 = safeConvert[int64](f1_2) * safeConvert[int64](f6)
|
||||
var f1f7_4 = safeConvert[int64](f1_2) * safeConvert[int64](f7_2)
|
||||
var f1f8_2 = safeConvert[int64](f1_2) * safeConvert[int64](f8)
|
||||
var f1f9_76 = safeConvert[int64](f1_2) * safeConvert[int64](f9_38)
|
||||
var f2f2 = safeConvert[int64](f2) * safeConvert[int64](f2)
|
||||
var f2f3_2 = safeConvert[int64](f2_2) * safeConvert[int64](f3)
|
||||
var f2f4_2 = safeConvert[int64](f2_2) * safeConvert[int64](f4)
|
||||
var f2f5_2 = safeConvert[int64](f2_2) * safeConvert[int64](f5)
|
||||
var f2f6_2 = safeConvert[int64](f2_2) * safeConvert[int64](f6)
|
||||
var f2f7_2 = safeConvert[int64](f2_2) * safeConvert[int64](f7)
|
||||
var f2f8_38 = safeConvert[int64](f2_2) * safeConvert[int64](f8_19)
|
||||
var f2f9_38 = safeConvert[int64](f2) * safeConvert[int64](f9_38)
|
||||
var f3f3_2 = safeConvert[int64](f3_2) * safeConvert[int64](f3)
|
||||
var f3f4_2 = safeConvert[int64](f3_2) * safeConvert[int64](f4)
|
||||
var f3f5_4 = safeConvert[int64](f3_2) * safeConvert[int64](f5_2)
|
||||
var f3f6_2 = safeConvert[int64](f3_2) * safeConvert[int64](f6)
|
||||
var f3f7_76 = safeConvert[int64](f3_2) * safeConvert[int64](f7_38)
|
||||
var f3f8_38 = safeConvert[int64](f3_2) * safeConvert[int64](f8_19)
|
||||
var f3f9_76 = safeConvert[int64](f3_2) * safeConvert[int64](f9_38)
|
||||
var f4f4 = safeConvert[int64](f4) * safeConvert[int64](f4)
|
||||
var f4f5_2 = safeConvert[int64](f4_2) * safeConvert[int64](f5)
|
||||
var f4f6_38 = safeConvert[int64](f4_2) * safeConvert[int64](f6_19)
|
||||
var f4f7_38 = safeConvert[int64](f4) * safeConvert[int64](f7_38)
|
||||
var f4f8_38 = safeConvert[int64](f4_2) * safeConvert[int64](f8_19)
|
||||
var f4f9_38 = safeConvert[int64](f4) * safeConvert[int64](f9_38)
|
||||
var f5f5_38 = safeConvert[int64](f5) * safeConvert[int64](f5_38)
|
||||
var f5f6_38 = safeConvert[int64](f5_2) * safeConvert[int64](f6_19)
|
||||
var f5f7_76 = safeConvert[int64](f5_2) * safeConvert[int64](f7_38)
|
||||
var f5f8_38 = safeConvert[int64](f5_2) * safeConvert[int64](f8_19)
|
||||
var f5f9_76 = safeConvert[int64](f5_2) * safeConvert[int64](f9_38)
|
||||
var f6f6_19 = safeConvert[int64](f6) * safeConvert[int64](f6_19)
|
||||
var f6f7_38 = safeConvert[int64](f6) * safeConvert[int64](f7_38)
|
||||
var f6f8_38 = safeConvert[int64](f6_2) * safeConvert[int64](f8_19)
|
||||
var f6f9_38 = safeConvert[int64](f6) * safeConvert[int64](f9_38)
|
||||
var f7f7_38 = safeConvert[int64](f7) * safeConvert[int64](f7_38)
|
||||
var f7f8_38 = safeConvert[int64](f7_2) * safeConvert[int64](f8_19)
|
||||
var f7f9_76 = safeConvert[int64](f7_2) * safeConvert[int64](f9_38)
|
||||
var f8f8_19 = safeConvert[int64](f8) * safeConvert[int64](f8_19)
|
||||
var f8f9_38 = safeConvert[int64](f8) * safeConvert[int64](f9_38)
|
||||
var f9f9_38 = safeConvert[int64](f9) * safeConvert[int64](f9_38)
|
||||
var
|
||||
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
|
||||
h0: int64 = f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
@@ -834,7 +837,7 @@ proc geFromBytesNegateVartime(h: var GeP3, s: openArray[byte]): int32 =
|
||||
return -1;
|
||||
feMul(h.x, h.x, SqrTm1)
|
||||
|
||||
if feIsNegative(h.x) == cast[int32](s[31] shr 7):
|
||||
if feIsNegative(h.x) == safeConvert[int32](s[31] shr 7):
|
||||
feNeg(h.x, h.x)
|
||||
|
||||
feMul(h.t, h.x, h.y)
|
||||
@@ -956,14 +959,14 @@ proc equal(b, c: int8): byte =
|
||||
var ub = cast[byte](b)
|
||||
var uc = cast[byte](c)
|
||||
var x = ub xor uc
|
||||
var y = cast[uint32](x)
|
||||
var y = safeConvert[uint32](x)
|
||||
y = y - 1
|
||||
y = y shr 31
|
||||
result = cast[byte](y)
|
||||
|
||||
proc negative(b: int8): byte =
|
||||
var x = cast[uint64](b)
|
||||
x = x shr 63
|
||||
var x = cast[uint8](b)
|
||||
x = x shr 7
|
||||
result = cast[byte](x)
|
||||
|
||||
proc cmov(t: var GePrecomp, u: GePrecomp, b: byte) =
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -15,7 +15,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import nimcrypto
|
||||
import bearssl/[kdf, rand, hash]
|
||||
import bearssl/[kdf, hash]
|
||||
|
||||
type HkdfResult*[len: static int] = array[len, byte]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -18,6 +18,7 @@ import stew/[endians2, results, ctops]
|
||||
export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
|
||||
type
|
||||
Asn1Error* {.pure.} = enum
|
||||
@@ -119,7 +120,7 @@ template toOpenArray*(af: Asn1Field): untyped =
|
||||
template isEmpty*(ab: Asn1Buffer): bool =
|
||||
ab.offset >= len(ab.buffer)
|
||||
|
||||
template isEnough*(ab: Asn1Buffer, length: int): bool =
|
||||
template isEnough*(ab: Asn1Buffer, length: int64): bool =
|
||||
len(ab.buffer) >= ab.offset + length
|
||||
|
||||
proc len*[T: Asn1Buffer|Asn1Composite](abc: T): int {.inline.} =
|
||||
@@ -344,32 +345,6 @@ proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
|
||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[int]): int =
|
||||
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
|
||||
## return number of bytes (octets) used.
|
||||
##
|
||||
## If length of ``dest`` is less then number of required bytes to encode
|
||||
## ``value``, then result of encoding will not be stored in ``dest``
|
||||
## but number of bytes (octets) required will be returned.
|
||||
var buffer: array[16, byte]
|
||||
var res = 1
|
||||
var oidlen = 1
|
||||
for i in 2..<len(value):
|
||||
oidlen += asn1EncodeTag(buffer, cast[uint64](value[i]))
|
||||
res += asn1EncodeLength(buffer, uint64(oidlen))
|
||||
res += oidlen
|
||||
if len(dest) >= res:
|
||||
let last = dest.high
|
||||
var offset = 1
|
||||
dest[0] = Asn1Tag.Oid.code()
|
||||
offset += asn1EncodeLength(dest.toOpenArray(offset, last), uint64(oidlen))
|
||||
dest[offset] = cast[byte](value[0] * 40 + value[1])
|
||||
offset += 1
|
||||
for i in 2..<len(value):
|
||||
offset += asn1EncodeTag(dest.toOpenArray(offset, last),
|
||||
cast[uint64](value[i]))
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||
## number of bytes (octets) used.
|
||||
@@ -443,26 +418,29 @@ proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
|
||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||
res
|
||||
|
||||
proc getLength(ab: var Asn1Buffer): Asn1Result[uint64] =
|
||||
proc getLength(ab: var Asn1Buffer): Asn1Result[int] =
|
||||
## Decode length part of ASN.1 TLV triplet.
|
||||
if not ab.isEmpty():
|
||||
let b = ab.buffer[ab.offset]
|
||||
if (b and 0x80'u8) == 0x00'u8:
|
||||
let length = cast[uint64](b)
|
||||
let length = safeConvert[int](b)
|
||||
ab.offset += 1
|
||||
return ok(length)
|
||||
if b == 0x80'u8:
|
||||
return err(Asn1Error.Indefinite)
|
||||
if b == 0xFF'u8:
|
||||
return err(Asn1Error.Incorrect)
|
||||
let octets = cast[uint64](b and 0x7F'u8)
|
||||
if octets > 8'u64:
|
||||
let octets = safeConvert[int](b and 0x7F'u8)
|
||||
if octets > 8:
|
||||
return err(Asn1Error.Overflow)
|
||||
if ab.isEnough(int(octets)):
|
||||
var length: uint64 = 0
|
||||
for i in 0..<int(octets):
|
||||
length = (length shl 8) or cast[uint64](ab.buffer[ab.offset + i + 1])
|
||||
ab.offset = ab.offset + int(octets) + 1
|
||||
if ab.isEnough(octets):
|
||||
var lengthU: uint64 = 0
|
||||
for i in 0..<octets:
|
||||
lengthU = (lengthU shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i + 1])
|
||||
if lengthU > uint64(int64.high):
|
||||
return err(Asn1Error.Overflow)
|
||||
let length = int(lengthU)
|
||||
ab.offset = ab.offset + octets + 1
|
||||
return ok(length)
|
||||
else:
|
||||
return err(Asn1Error.Incomplete)
|
||||
@@ -474,8 +452,8 @@ proc getTag(ab: var Asn1Buffer, tag: var int): Asn1Result[Asn1Class] =
|
||||
if not ab.isEmpty():
|
||||
let
|
||||
b = ab.buffer[ab.offset]
|
||||
c = int((b and 0xC0'u8) shr 6)
|
||||
tag = int(b and 0x3F)
|
||||
c = safeConvert[int]((b and 0xC0'u8) shr 6)
|
||||
tag = safeConvert[int](b and 0x3F)
|
||||
ab.offset += 1
|
||||
if c >= 0 and c < 4:
|
||||
ok(cast[Asn1Class](c))
|
||||
@@ -489,7 +467,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
var
|
||||
field: Asn1Field
|
||||
tag, ttag, offset: int
|
||||
length, tlength: uint64
|
||||
length, tlength: int
|
||||
aclass: Asn1Class
|
||||
inclass: bool
|
||||
|
||||
@@ -519,7 +497,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
if length != 1:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
let b = ab.buffer[ab.offset]
|
||||
@@ -527,7 +505,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
index: ttag, offset: ab.offset,
|
||||
length: 1, buffer: ab.buffer)
|
||||
field.vbool = (b == 0xFF'u8)
|
||||
ab.offset += 1
|
||||
@@ -538,12 +516,12 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
if length == 0:
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
# Count number of leading zeroes
|
||||
var zc = 0
|
||||
while (zc < int(length)) and (ab.buffer[ab.offset + zc] == 0x00'u8):
|
||||
while (zc < length) and (ab.buffer[ab.offset + zc] == 0x00'u8):
|
||||
inc(zc)
|
||||
|
||||
if zc > 1:
|
||||
@@ -552,45 +530,45 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
if zc == 0:
|
||||
# Negative or Positive integer
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
length: int(length), buffer: ab.buffer)
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
|
||||
# Negative integer
|
||||
if length <= 8:
|
||||
# We need this transformation because our field.vint is uint64.
|
||||
for i in 0 ..< 8:
|
||||
if i < 8 - int(length):
|
||||
if i < 8 - length:
|
||||
field.vint = (field.vint shl 8) or 0xFF'u64
|
||||
else:
|
||||
let offset = ab.offset + i - (8 - int(length))
|
||||
field.vint = (field.vint shl 8) or uint64(ab.buffer[offset])
|
||||
let offset = ab.offset + i - (8 - length)
|
||||
field.vint = (field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
|
||||
else:
|
||||
# Positive integer
|
||||
if length <= 8:
|
||||
for i in 0 ..< int(length):
|
||||
for i in 0 ..< length:
|
||||
field.vint = (field.vint shl 8) or
|
||||
uint64(ab.buffer[ab.offset + i])
|
||||
ab.offset += int(length)
|
||||
safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
else:
|
||||
if length == 1:
|
||||
# Zero value integer
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
length: int(length), vint: 0'u64,
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, vint: 0'u64,
|
||||
buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
else:
|
||||
# Positive integer with leading zero
|
||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset) + 1,
|
||||
length: int(length) - 1, buffer: ab.buffer)
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: length - 1, buffer: ab.buffer)
|
||||
if length <= 9:
|
||||
for i in 1 ..< int(length):
|
||||
for i in 1 ..< length:
|
||||
field.vint = (field.vint shl 8) or
|
||||
uint64(ab.buffer[ab.offset + i])
|
||||
ab.offset += int(length)
|
||||
safeConvert[uint64](ab.buffer[ab.offset + i])
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.BitString.code():
|
||||
@@ -606,13 +584,13 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
else:
|
||||
# Zero-length BIT STRING.
|
||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset + 1),
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: 0, ubits: 0, buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
else:
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
let unused = ab.buffer[ab.offset]
|
||||
@@ -620,27 +598,27 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
# Number of unused bits should not be bigger then `7`.
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
let mask = (1'u8 shl int(unused)) - 1'u8
|
||||
if (ab.buffer[ab.offset + int(length) - 1] and mask) != 0x00'u8:
|
||||
let mask = (1'u8 shl safeConvert[int](unused)) - 1'u8
|
||||
if (ab.buffer[ab.offset + length - 1] and mask) != 0x00'u8:
|
||||
## All unused bits should be set to `0`.
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset + 1),
|
||||
length: int(length - 1), ubits: int(unused),
|
||||
index: ttag, offset: ab.offset + 1,
|
||||
length: length - 1, ubits: safeConvert[int](unused),
|
||||
buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.OctetString.code():
|
||||
# OCTET STRING
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
length: int(length), buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Null.code():
|
||||
@@ -649,30 +627,30 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
return err(Asn1Error.Incorrect)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
|
||||
offset: int(ab.offset), length: 0, buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
offset: ab.offset, length: 0, buffer: ab.buffer)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Oid.code():
|
||||
# OID
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
length: int(length), buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
of Asn1Tag.Sequence.code():
|
||||
# SEQUENCE
|
||||
if not ab.isEnough(int(length)):
|
||||
if not ab.isEnough(length):
|
||||
return err(Asn1Error.Incomplete)
|
||||
|
||||
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
|
||||
index: ttag, offset: int(ab.offset),
|
||||
length: int(length), buffer: ab.buffer)
|
||||
ab.offset += int(length)
|
||||
index: ttag, offset: ab.offset,
|
||||
length: length, buffer: ab.buffer)
|
||||
ab.offset += length
|
||||
return ok(field)
|
||||
|
||||
else:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -686,7 +686,7 @@ proc `==`*(a, b: RsaPrivateKey): bool =
|
||||
false
|
||||
else:
|
||||
if a.seck.nBitlen == b.seck.nBitlen:
|
||||
if cast[int](a.seck.nBitlen) > 0:
|
||||
if a.seck.nBitlen > 0'u:
|
||||
let r1 = CT.isEqual(getArray(a.buffer, a.seck.p, a.seck.plen),
|
||||
getArray(b.buffer, b.seck.p, b.seck.plen))
|
||||
let r2 = CT.isEqual(getArray(a.buffer, a.seck.q, a.seck.qlen),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -35,9 +35,6 @@ type
|
||||
SkSignature* = distinct secp256k1.SkSignature
|
||||
SkKeyPair* = distinct secp256k1.SkKeyPair
|
||||
|
||||
template pubkey*(v: SkKeyPair): SkPublicKey = SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
|
||||
template seckey*(v: SkKeyPair): SkPrivateKey = SkPrivateKey(secp256k1.SkKeyPair(v).seckey)
|
||||
|
||||
proc random*(t: typedesc[SkPrivateKey], rng: var HmacDrbgContext): SkPrivateKey =
|
||||
#TODO is there a better way?
|
||||
var rngPtr = addr rng
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -17,7 +17,7 @@ import std/[os, osproc, strutils, tables, strtabs, sequtils]
|
||||
import pkg/[chronos, chronicles]
|
||||
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
|
||||
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
|
||||
import ../crypto/crypto
|
||||
import ../crypto/crypto, ../utility
|
||||
|
||||
export
|
||||
peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
|
||||
@@ -170,7 +170,7 @@ proc requestIdentity(): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||
## Processing function `doIdentify(req *pb.Request)`.
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
result.write(1, cast[uint](RequestType.IDENTIFY))
|
||||
result.write(1, safeConvert[uint](RequestType.IDENTIFY))
|
||||
result.finish()
|
||||
|
||||
proc requestConnect(peerid: PeerId,
|
||||
@@ -185,7 +185,7 @@ proc requestConnect(peerid: PeerId,
|
||||
msg.write(2, item.data.buffer)
|
||||
if timeout > 0:
|
||||
msg.write(3, hint64(timeout))
|
||||
result.write(1, cast[uint](RequestType.CONNECT))
|
||||
result.write(1, safeConvert[uint](RequestType.CONNECT))
|
||||
result.write(2, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -195,7 +195,7 @@ proc requestDisconnect(peerid: PeerId): ProtoBuffer =
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, peerid)
|
||||
result.write(1, cast[uint](RequestType.DISCONNECT))
|
||||
result.write(1, safeConvert[uint](RequestType.DISCONNECT))
|
||||
result.write(7, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -211,7 +211,7 @@ proc requestStreamOpen(peerid: PeerId,
|
||||
msg.write(2, item)
|
||||
if timeout > 0:
|
||||
msg.write(3, hint64(timeout))
|
||||
result.write(1, cast[uint](RequestType.STREAM_OPEN))
|
||||
result.write(1, safeConvert[uint](RequestType.STREAM_OPEN))
|
||||
result.write(3, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -224,7 +224,7 @@ proc requestStreamHandler(address: MultiAddress,
|
||||
msg.write(1, address.data.buffer)
|
||||
for item in protocols:
|
||||
msg.write(2, item)
|
||||
result.write(1, cast[uint](RequestType.STREAM_HANDLER))
|
||||
result.write(1, safeConvert[uint](RequestType.STREAM_HANDLER))
|
||||
result.write(4, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -232,13 +232,13 @@ proc requestListPeers(): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||
## Processing function `doListPeers(req *pb.Request)`
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
result.write(1, cast[uint](RequestType.LIST_PEERS))
|
||||
result.write(1, safeConvert[uint](RequestType.LIST_PEERS))
|
||||
result.finish()
|
||||
|
||||
proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTFindPeer(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.FIND_PEER)
|
||||
let msgid = safeConvert[uint](DHTRequestType.FIND_PEER)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -246,7 +246,7 @@ proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -254,7 +254,7 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
|
||||
timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
|
||||
let msgid = safeConvert[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -262,7 +262,7 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -270,7 +270,7 @@ proc requestDHTFindProviders(cid: Cid,
|
||||
count: uint32, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTFindProviders(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.FIND_PROVIDERS)
|
||||
let msgid = safeConvert[uint](DHTRequestType.FIND_PROVIDERS)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -279,14 +279,14 @@ proc requestDHTFindProviders(cid: Cid,
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTGetClosestPeers(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.GET_CLOSEST_PEERS)
|
||||
let msgid = safeConvert[uint](DHTRequestType.GET_CLOSEST_PEERS)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -294,14 +294,14 @@ proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTGetPublicKey(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
|
||||
let msgid = safeConvert[uint](DHTRequestType.GET_PUBLIC_KEY)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -309,14 +309,14 @@ proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTGetValue(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.GET_VALUE)
|
||||
let msgid = safeConvert[uint](DHTRequestType.GET_VALUE)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -324,14 +324,14 @@ proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTSearchValue(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.SEARCH_VALUE)
|
||||
let msgid = safeConvert[uint](DHTRequestType.SEARCH_VALUE)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -339,7 +339,7 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -347,7 +347,7 @@ proc requestDHTPutValue(key: string, value: openArray[byte],
|
||||
timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.PUT_VALUE)
|
||||
let msgid = safeConvert[uint](DHTRequestType.PUT_VALUE)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -356,14 +356,14 @@ proc requestDHTPutValue(key: string, value: openArray[byte],
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, uint(RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||
## Processing function `doDHTProvide(req *pb.DHTRequest)`.
|
||||
let msgid = cast[uint](DHTRequestType.PROVIDE)
|
||||
let msgid = safeConvert[uint](DHTRequestType.PROVIDE)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -371,13 +371,13 @@ proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
||||
if timeout > 0:
|
||||
msg.write(7, hint64(timeout))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.DHT))
|
||||
result.write(1, safeConvert[uint](RequestType.DHT))
|
||||
result.write(5, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L18
|
||||
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
|
||||
let msgid = safeConvert[uint](ConnManagerRequestType.TAG_PEER)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
@@ -385,83 +385,83 @@ proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
|
||||
msg.write(3, tag)
|
||||
msg.write(4, hint64(weight))
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
|
||||
result.write(6, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestCMUntagPeer(peer: PeerId, tag: string): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L33
|
||||
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
|
||||
let msgid = safeConvert[uint](ConnManagerRequestType.UNTAG_PEER)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.write(2, peer)
|
||||
msg.write(3, tag)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
|
||||
result.write(6, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestCMTrim(): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L47
|
||||
let msgid = cast[uint](ConnManagerRequestType.TRIM)
|
||||
let msgid = safeConvert[uint](ConnManagerRequestType.TRIM)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
|
||||
result.write(6, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestPSGetTopics(): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
||||
## Processing function `doPubsubGetTopics(req *pb.PSRequest)`.
|
||||
let msgid = cast[uint](PSRequestType.GET_TOPICS)
|
||||
let msgid = safeConvert[uint](PSRequestType.GET_TOPICS)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||
result.write(1, safeConvert[uint](RequestType.PUBSUB))
|
||||
result.write(8, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestPSListPeers(topic: string): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
||||
## Processing function `doPubsubListPeers(req *pb.PSRequest)`.
|
||||
let msgid = cast[uint](PSRequestType.LIST_PEERS)
|
||||
let msgid = safeConvert[uint](PSRequestType.LIST_PEERS)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.write(2, topic)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||
result.write(1, safeConvert[uint](RequestType.PUBSUB))
|
||||
result.write(8, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestPSPublish(topic: string, data: openArray[byte]): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
||||
## Processing function `doPubsubPublish(req *pb.PSRequest)`.
|
||||
let msgid = cast[uint](PSRequestType.PUBLISH)
|
||||
let msgid = safeConvert[uint](PSRequestType.PUBLISH)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.write(2, topic)
|
||||
msg.write(3, data)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||
result.write(1, safeConvert[uint](RequestType.PUBSUB))
|
||||
result.write(8, msg)
|
||||
result.finish()
|
||||
|
||||
proc requestPSSubscribe(topic: string): ProtoBuffer =
|
||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
||||
## Processing function `doPubsubSubscribe(req *pb.PSRequest)`.
|
||||
let msgid = cast[uint](PSRequestType.SUBSCRIBE)
|
||||
let msgid = safeConvert[uint](PSRequestType.SUBSCRIBE)
|
||||
result = initProtoBuffer({WithVarintLength})
|
||||
var msg = initProtoBuffer()
|
||||
msg.write(1, msgid)
|
||||
msg.write(2, topic)
|
||||
msg.finish()
|
||||
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||
result.write(1, safeConvert[uint](RequestType.PUBSUB))
|
||||
result.write(8, msg)
|
||||
result.finish()
|
||||
|
||||
@@ -492,7 +492,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
res = PB.getUVarint(buffer.toOpenArray(0, i), length, size)
|
||||
if res.isOk():
|
||||
break
|
||||
if res.isErr() or size > MaxMessageSize:
|
||||
if res.isErr() or size > 1'u shl 22:
|
||||
buffer.setLen(0)
|
||||
result = buffer
|
||||
return
|
||||
@@ -515,7 +515,7 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
|
||||
var transp = await connect(address)
|
||||
await transp.closeWait()
|
||||
result = true
|
||||
except:
|
||||
except CatchableError, Defect:
|
||||
result = false
|
||||
|
||||
when defined(windows):
|
||||
@@ -525,7 +525,7 @@ when defined(windows):
|
||||
result = cast[int](getCurrentProcessId())
|
||||
else:
|
||||
proc getProcessId(): int =
|
||||
result = cast[int](posix.getpid())
|
||||
result = int(posix.getpid())
|
||||
|
||||
proc getSocket(pattern: string,
|
||||
count: ptr int): Future[MultiAddress] {.async.} =
|
||||
@@ -759,12 +759,8 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||
# Starting daemon process
|
||||
# echo "Starting ", cmd, " ", args.join(" ")
|
||||
api.process =
|
||||
try:
|
||||
exceptionToAssert:
|
||||
startProcess(cmd, "", args, env, {poParentStreams})
|
||||
except CatchableError as exc:
|
||||
raise exc
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
# Waiting until daemon will not be bound to control socket.
|
||||
while true:
|
||||
if not api.process.running():
|
||||
@@ -872,7 +868,7 @@ proc connect*(api: DaemonAPI, peer: PeerId,
|
||||
timeout))
|
||||
pb.withMessage() do:
|
||||
discard
|
||||
except:
|
||||
except CatchableError, Defect:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||
@@ -1033,7 +1029,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
var dtype: uint
|
||||
if pbDhtResponse.getRequiredField(1, dtype).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||
if dtype != cast[uint](rt):
|
||||
if dtype != safeConvert[uint](rt):
|
||||
raise newException(DaemonLocalError, "Wrong DHT answer type! ")
|
||||
|
||||
var value: seq[byte]
|
||||
@@ -1057,9 +1053,9 @@ proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
|
||||
var dtype: uint
|
||||
if pb.getRequiredField(1, dtype).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||
if dtype == cast[uint](DHTResponseType.VALUE):
|
||||
if dtype == safeConvert[uint](DHTResponseType.VALUE):
|
||||
result = DHTResponseType.VALUE
|
||||
elif dtype == cast[uint](DHTResponseType.END):
|
||||
elif dtype == safeConvert[uint](DHTResponseType.END):
|
||||
result = DHTResponseType.END
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Wrong DHT answer type!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -27,7 +27,9 @@ method connect*(
|
||||
self: Dial,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false) {.async, base.} =
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -36,7 +38,8 @@ method connect*(
|
||||
|
||||
method connect*(
|
||||
self: Dial,
|
||||
addrs: seq[MultiAddress]): Future[PeerId] {.async, base.} =
|
||||
address: MultiAddress,
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,7 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import std/[sugar, tables, sequtils]
|
||||
import std/tables
|
||||
|
||||
import stew/results
|
||||
import pkg/[chronos,
|
||||
@@ -17,7 +17,9 @@ import pkg/[chronos,
|
||||
import dial,
|
||||
peerid,
|
||||
peerinfo,
|
||||
peerstore,
|
||||
multicodec,
|
||||
muxers/muxer,
|
||||
multistream,
|
||||
connmanager,
|
||||
stream/connection,
|
||||
@@ -34,25 +36,25 @@ logScope:
|
||||
declareCounter(libp2p_total_dial_attempts, "total attempted dials")
|
||||
declareCounter(libp2p_successful_dials, "dialed successful peers")
|
||||
declareCounter(libp2p_failed_dials, "failed dials")
|
||||
declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
|
||||
|
||||
type
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
Dialer* = ref object of Dial
|
||||
localPeerId*: PeerId
|
||||
ms: MultistreamSelect
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerId, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress):
|
||||
Future[Connection] {.async.} =
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
@@ -60,7 +62,7 @@ proc dialAndUpgrade(
|
||||
let dialed =
|
||||
try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address)
|
||||
await transport.dial(hostname, address, peerId)
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
raise exc
|
||||
@@ -69,29 +71,29 @@ proc dialAndUpgrade(
|
||||
libp2p_failed_dials.inc()
|
||||
return nil # Try the next address
|
||||
|
||||
# also keep track of the connection's bottom unsafe transport direction
|
||||
# required by gossipsub scoring
|
||||
dialed.transportDir = Direction.Out
|
||||
|
||||
libp2p_successful_dials.inc()
|
||||
|
||||
let conn =
|
||||
let mux =
|
||||
try:
|
||||
await transport.upgradeOutgoing(dialed, peerId)
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
if upgradeDir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||
debug "Dial successful", conn, peerId = conn.peerId
|
||||
return conn
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
|
||||
proc expandDnsAddr(
|
||||
@@ -125,8 +127,9 @@ proc expandDnsAddr(
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress]):
|
||||
Future[Connection] {.async.} =
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId
|
||||
|
||||
@@ -143,16 +146,26 @@ proc dialAndUpgrade(
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
|
||||
let muxer = self.connManager.selectMuxer(peerId)
|
||||
if muxer == nil:
|
||||
return Opt.none(Muxer)
|
||||
|
||||
trace "Reusing existing connection", muxer, direction = $muxer.connection.dir
|
||||
return Opt.some(muxer)
|
||||
|
||||
proc internalConnect(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool):
|
||||
Future[Connection] {.async.} =
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
|
||||
@@ -161,44 +174,31 @@ proc internalConnect(
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
# Check if we have a connection already and try to reuse it
|
||||
var conn =
|
||||
if peerId.isSome: self.connManager.selectConn(peerId.get())
|
||||
else: nil
|
||||
if conn != nil:
|
||||
if conn.atEof or conn.closed:
|
||||
# This connection should already have been removed from the connection
|
||||
# manager - it's essentially a bug that we end up here - we'll fail
|
||||
# for now, hoping that this will clean themselves up later...
|
||||
warn "dead connection in connection manager", conn
|
||||
await conn.close()
|
||||
raise newException(DialFailedError, "Zombie connection encountered")
|
||||
if peerId.isSome and reuseConnection:
|
||||
let muxOpt = await self.tryReusingConnection(peerId.get())
|
||||
if muxOpt.isSome:
|
||||
return muxOpt.get()
|
||||
|
||||
trace "Reusing existing connection", conn, direction = $conn.dir
|
||||
return conn
|
||||
|
||||
let slot = await self.connManager.getOutgoingSlot(forceDial)
|
||||
conn =
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs)
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
slot.trackConnection(conn)
|
||||
if isNil(conn): # None of the addresses connected
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
|
||||
# A disconnect could have happened right after
|
||||
# we've added the connection so we check again
|
||||
# to prevent races due to that.
|
||||
if conn.closed() or conn.atEof():
|
||||
# This can happen when the other ends drops us
|
||||
# before we get a chance to return the connection
|
||||
# back to the dialer.
|
||||
trace "Connection dead on arrival", conn
|
||||
raise newLPStreamClosedError()
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
await self.peerStore.identify(muxed)
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoung upgrade", err=exc.msg
|
||||
await muxed.close()
|
||||
raise exc
|
||||
|
||||
return conn
|
||||
return muxed
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
@@ -207,30 +207,44 @@ method connect*(
|
||||
self: Dialer,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false) {.async.} =
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
if self.connManager.connCount(peerId) > 0:
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial)
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
addrs: seq[MultiAddress],
|
||||
): Future[PeerId] {.async.} =
|
||||
address: MultiAddress,
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async.} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
return (await self.internalConnect(Opt.none(PeerId), addrs, false)).peerId
|
||||
let fullAddress = parseFullAddress(address)
|
||||
if fullAddress.isOk:
|
||||
return (await self.internalConnect(
|
||||
Opt.some(fullAddress.get()[0]),
|
||||
@[fullAddress.get()[1]],
|
||||
false)).connection.peerId
|
||||
else:
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
false)).connection.peerId
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer,
|
||||
conn: Connection,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
let selected = await self.ms.select(conn, protos)
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
@@ -248,11 +262,11 @@ method tryDial*(
|
||||
|
||||
trace "Check if it can dial", peerId, addrs
|
||||
try:
|
||||
let conn = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if conn.isNil():
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
await conn.close()
|
||||
return conn.observedAddr
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -284,7 +298,7 @@ method dial*(
|
||||
##
|
||||
|
||||
var
|
||||
conn: Connection
|
||||
conn: Muxer
|
||||
stream: Connection
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
@@ -321,12 +335,12 @@ proc new*(
|
||||
T: type Dialer,
|
||||
localPeerId: PeerId,
|
||||
connManager: ConnManager,
|
||||
peerStore: PeerStore,
|
||||
transports: seq[Transport],
|
||||
ms: MultistreamSelect,
|
||||
nameResolver: NameResolver = nil): Dialer =
|
||||
|
||||
T(localPeerId: localPeerId,
|
||||
connManager: connManager,
|
||||
transports: transports,
|
||||
ms: ms,
|
||||
peerStore: peerStore,
|
||||
nameResolver: nameResolver)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,6 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils
|
||||
import chronos
|
||||
import ./discoverymngr,
|
||||
../protocols/rendezvous,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -15,7 +15,7 @@ else:
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronos, chronicles
|
||||
import std/[nativesockets, hashes]
|
||||
import tables, strutils, sets, stew/shims/net
|
||||
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
|
||||
@@ -23,6 +23,9 @@ import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
|
||||
import stew/[base58, base32, endians2, results]
|
||||
export results, minprotobuf, vbuffer, utility
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multiaddress"
|
||||
|
||||
type
|
||||
MAKind* = enum
|
||||
None, Fixed, Length, Path, Marker
|
||||
@@ -34,7 +37,7 @@ type
|
||||
coder*: Transcoder
|
||||
|
||||
MultiAddress* = object
|
||||
data*: VBuffer
|
||||
data: VBuffer
|
||||
|
||||
MaPatternOp* = enum
|
||||
Eq, Or, And
|
||||
@@ -63,6 +66,10 @@ const
|
||||
IPPROTO_TCP = Protocol.IPPROTO_TCP
|
||||
IPPROTO_UDP = Protocol.IPPROTO_UDP
|
||||
|
||||
proc data*(ma: MultiAddress): VBuffer =
|
||||
## Returns the data buffer of the MultiAddress.
|
||||
return ma.data
|
||||
|
||||
proc hash*(a: MultiAddress): Hash =
|
||||
var h: Hash = 0
|
||||
h = h !& hash(a.data.buffer)
|
||||
@@ -76,7 +83,7 @@ proc ip4StB(s: string, vb: var VBuffer): bool =
|
||||
if a.family == IpAddressFamily.IPv4:
|
||||
vb.writeArray(a.address_v4)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc ip4BtS(vb: var VBuffer, s: var string): bool =
|
||||
@@ -99,7 +106,7 @@ proc ip6StB(s: string, vb: var VBuffer): bool =
|
||||
if a.family == IpAddressFamily.IPv6:
|
||||
vb.writeArray(a.address_v6)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc ip6BtS(vb: var VBuffer, s: var string): bool =
|
||||
@@ -143,14 +150,14 @@ proc portStB(s: string, vb: var VBuffer): bool =
|
||||
port[1] = cast[byte](nport and 0xFF)
|
||||
vb.writeArray(port)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc portBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Port number bufferToString() implementation.
|
||||
var port: array[2, byte]
|
||||
if vb.readArray(port) == 2:
|
||||
var nport = (cast[uint16](port[0]) shl 8) or cast[uint16](port[1])
|
||||
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
s = $nport
|
||||
result = true
|
||||
|
||||
@@ -168,7 +175,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
|
||||
if MultiHash.decode(data, mh).isOk:
|
||||
vb.writeSeq(data)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc p2pBtS(vb: var VBuffer, s: var string): bool =
|
||||
@@ -203,14 +210,14 @@ proc onionStB(s: string, vb: var VBuffer): bool =
|
||||
address[11] = cast[byte](nport and 0xFF)
|
||||
vb.writeArray(address)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc onionBtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[12, byte]
|
||||
if vb.readArray(buf) == 12:
|
||||
var nport = (cast[uint16](buf[10]) shl 8) or cast[uint16](buf[11])
|
||||
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 9))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -237,14 +244,14 @@ proc onion3StB(s: string, vb: var VBuffer): bool =
|
||||
address[36] = cast[byte](nport and 0xFF)
|
||||
vb.writeArray(address)
|
||||
result = true
|
||||
except:
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
proc onion3BtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[37, byte]
|
||||
if vb.readArray(buf) == 37:
|
||||
var nport = (cast[uint16](buf[35]) shl 8) or cast[uint16](buf[36])
|
||||
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 34))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -462,14 +469,27 @@ const
|
||||
IP6* = mapEq("ip6")
|
||||
DNS* = mapOr(DNSANY, DNS4, DNS6, DNSADDR)
|
||||
IP* = mapOr(IP4, IP6)
|
||||
TCP* = mapOr(mapAnd(DNS, mapEq("tcp")), mapAnd(IP, mapEq("tcp")))
|
||||
UDP* = mapOr(mapAnd(DNS, mapEq("udp")), mapAnd(IP, mapEq("udp")))
|
||||
DNS_OR_IP* = mapOr(DNS, IP)
|
||||
TCP_DNS* = mapAnd(DNS, mapEq("tcp"))
|
||||
TCP_IP* =mapAnd(IP, mapEq("tcp"))
|
||||
TCP* = mapOr(TCP_DNS, TCP_IP)
|
||||
UDP_DNS* = mapAnd(DNS, mapEq("udp"))
|
||||
UDP_IP* = mapAnd(IP, mapEq("udp"))
|
||||
UDP* = mapOr(UDP_DNS, UDP_IP)
|
||||
UTP* = mapAnd(UDP, mapEq("utp"))
|
||||
QUIC* = mapAnd(UDP, mapEq("quic"))
|
||||
UNIX* = mapEq("unix")
|
||||
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
|
||||
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
|
||||
WS* = mapAnd(TCP, mapEq("ws"))
|
||||
WSS_DNS* = mapAnd(TCP_DNS, mapEq("wss"))
|
||||
WSS_IP* = mapAnd(TCP_IP, mapEq("wss"))
|
||||
WSS* = mapAnd(TCP, mapEq("wss"))
|
||||
WebSockets_DNS* = mapOr(WS_DNS, WSS_DNS)
|
||||
WebSockets_IP* = mapOr(WS_IP, WSS_IP)
|
||||
WebSockets* = mapOr(WS, WSS)
|
||||
Onion3* = mapEq("onion3")
|
||||
TcpOnion3* = mapAnd(TCP, Onion3)
|
||||
|
||||
Unreliable* = mapOr(UDP)
|
||||
|
||||
@@ -870,6 +890,8 @@ proc getProtocol(name: string): MAProtocol {.inline.} =
|
||||
proc init*(mtype: typedesc[MultiAddress],
|
||||
value: string): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress object from string representation ``value``.
|
||||
if len(value) == 0 or value == "/":
|
||||
return err("multiaddress: Address must not be empty!")
|
||||
var parts = value.trimRight('/').split('/')
|
||||
if len(parts[0]) != 0:
|
||||
err("multiaddress: Invalid MultiAddress, must start with `/`")
|
||||
@@ -917,7 +939,7 @@ proc init*(mtype: typedesc[MultiAddress],
|
||||
data: openArray[byte]): MaResult[MultiAddress] =
|
||||
## Initialize MultiAddress with array of bytes ``data``.
|
||||
if len(data) == 0:
|
||||
err("multiaddress: Address could not be empty!")
|
||||
err("multiaddress: Address must not be empty!")
|
||||
else:
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
@@ -1108,6 +1130,10 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
|
||||
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
|
||||
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
|
||||
var items: seq[seq[byte]]
|
||||
value.setLen(0)
|
||||
let res = ? pb.getRepeatedField(field, items)
|
||||
@@ -1119,6 +1145,8 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
if ma.isOk():
|
||||
value.add(ma.get())
|
||||
else:
|
||||
value.setLen(0)
|
||||
return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
debug "Not supported MultiAddress in blob", ma = item
|
||||
if value.len == 0:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
else:
|
||||
ok(true)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -19,8 +19,6 @@ import varint, vbuffer
|
||||
import stew/results
|
||||
export results
|
||||
|
||||
{.deadCodeElim: on.}
|
||||
|
||||
## List of officially supported codecs can BE found here
|
||||
## https://github.com/multiformats/multicodec/blob/master/table.csv
|
||||
const MultiCodecList = [
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strutils, sequtils]
|
||||
import std/[strutils, sequtils, tables]
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import stream/connection,
|
||||
protocols/protocol
|
||||
@@ -21,29 +21,31 @@ logScope:
|
||||
topics = "libp2p multistream"
|
||||
|
||||
const
|
||||
MsgSize* = 64*1024
|
||||
Codec* = "/multistream/1.0.0"
|
||||
MsgSize = 1024
|
||||
Codec = "/multistream/1.0.0"
|
||||
|
||||
MSCodec* = "\x13" & Codec & "\n"
|
||||
Na* = "\x03na\n"
|
||||
Ls* = "\x03ls\n"
|
||||
Na = "na\n"
|
||||
Ls = "ls\n"
|
||||
|
||||
type
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
|
||||
|
||||
MultiStreamError* = object of LPError
|
||||
|
||||
HandlerHolder* = object
|
||||
HandlerHolder* = ref object
|
||||
protos*: seq[string]
|
||||
protocol*: LPProtocol
|
||||
match*: Matcher
|
||||
openedStreams: CountTable[PeerId]
|
||||
|
||||
MultistreamSelect* = ref object of RootObj
|
||||
handlers*: seq[HandlerHolder]
|
||||
codec*: string
|
||||
|
||||
proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
T(codec: MSCodec)
|
||||
T(
|
||||
codec: Codec,
|
||||
)
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
@@ -51,13 +53,13 @@ template validateSuffix(str: string): untyped =
|
||||
else:
|
||||
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
|
||||
|
||||
proc select*(m: MultistreamSelect,
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]):
|
||||
Future[string] {.async.} =
|
||||
trace "initiating handshake", conn, codec = m.codec
|
||||
trace "initiating handshake", conn, codec = Codec
|
||||
## select a remote protocol
|
||||
await conn.write(m.codec) # write handshake
|
||||
await conn.writeLp(Codec & "\n") # write handshake
|
||||
if proto.len() > 0:
|
||||
trace "selecting proto", conn, proto = proto[0]
|
||||
await conn.writeLp((proto[0] & "\n")) # select proto
|
||||
@@ -99,13 +101,13 @@ proc select*(m: MultistreamSelect,
|
||||
# No alternatives, fail
|
||||
return ""
|
||||
|
||||
proc select*(m: MultistreamSelect,
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string): Future[bool] {.async.} =
|
||||
if proto.len > 0:
|
||||
return (await m.select(conn, @[proto])) == proto
|
||||
return (await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
else:
|
||||
return (await m.select(conn, @[])) == Codec
|
||||
return (await MultistreamSelect.select(conn, @[])) == Codec
|
||||
|
||||
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
||||
m.select(conn, "")
|
||||
@@ -116,7 +118,7 @@ proc list*(m: MultistreamSelect,
|
||||
if not await m.select(conn):
|
||||
return
|
||||
|
||||
await conn.write(Ls) # send ls
|
||||
await conn.writeLp(Ls) # send ls
|
||||
|
||||
var list = newSeq[string]()
|
||||
let ms = string.fromBytes(await conn.readLp(MsgSize))
|
||||
@@ -126,55 +128,86 @@ proc list*(m: MultistreamSelect,
|
||||
|
||||
result = list
|
||||
|
||||
proc handle*(
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
var ms = string.fromBytes(await conn.readLp(MsgSize))
|
||||
validateSuffix(ms)
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
debug "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
trace "handle: invalid proto", conn
|
||||
await conn.writeLp(Na)
|
||||
|
||||
case ms:
|
||||
of "ls":
|
||||
trace "handle: listing protos", conn
|
||||
#TODO this doens't seem to follow spec, each protocol
|
||||
# should be length prefixed. Not very important
|
||||
# since LS is getting deprecated
|
||||
await conn.writeLp(protos.join("\n") & "\n")
|
||||
of Codec:
|
||||
if not handshaked:
|
||||
await conn.writeLp(Codec & "\n")
|
||||
handshaked = true
|
||||
else:
|
||||
trace "handle: sending `na` for duplicate handshake while handshaked",
|
||||
conn
|
||||
await conn.writeLp(Na)
|
||||
elif ms in protos or matchers.anyIt(it(ms)):
|
||||
trace "found handler", conn, protocol = ms
|
||||
await conn.writeLp(ms & "\n")
|
||||
conn.protocol = ms
|
||||
return ms
|
||||
else:
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
if not isNil(h.match):
|
||||
matchers.add(h.match)
|
||||
for proto in h.protos:
|
||||
protos.add(proto)
|
||||
|
||||
try:
|
||||
while not conn.atEof:
|
||||
var ms = string.fromBytes(await conn.readLp(MsgSize))
|
||||
validateSuffix(ms)
|
||||
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
notice "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
trace "handle: invalid proto", conn
|
||||
await conn.write(Na)
|
||||
|
||||
if m.handlers.len() == 0:
|
||||
trace "handle: sending `na` for protocol", conn, protocol = ms
|
||||
await conn.write(Na)
|
||||
continue
|
||||
|
||||
case ms:
|
||||
of "ls":
|
||||
trace "handle: listing protos", conn
|
||||
var protos = ""
|
||||
for h in m.handlers:
|
||||
for proto in h.protos:
|
||||
protos &= (proto & "\n")
|
||||
await conn.writeLp(protos)
|
||||
of Codec:
|
||||
if not handshaked:
|
||||
await conn.write(m.codec)
|
||||
handshaked = true
|
||||
else:
|
||||
trace "handle: sending `na` for duplicate handshake while handshaked",
|
||||
conn
|
||||
await conn.write(Na)
|
||||
else:
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
await conn.writeLp(ms & "\n")
|
||||
conn.protocol = ms
|
||||
await h.protocol.handler(conn, ms)
|
||||
return
|
||||
debug "no handlers", conn, protocol = ms
|
||||
await conn.write(Na)
|
||||
var protocolHolder = h
|
||||
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
|
||||
debug "Max streams for protocol reached, blocking new stream",
|
||||
conn, protocol = ms, maxIncomingStreams
|
||||
return
|
||||
protocolHolder.openedStreams.inc(conn.peerId)
|
||||
try:
|
||||
await protocolHolder.protocol.handler(conn, ms)
|
||||
finally:
|
||||
protocolHolder.openedStreams.inc(conn.peerId, -1)
|
||||
if protocolHolder.openedStreams[conn.peerId] == 0:
|
||||
protocolHolder.openedStreams.del(conn.peerId)
|
||||
return
|
||||
debug "no handlers", conn, ms
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/[chronos, nimcrypto/utils, chronicles, stew/byteutils]
|
||||
import pkg/[chronos, chronicles, stew/byteutils]
|
||||
import ../../stream/connection,
|
||||
../../utility,
|
||||
../../varint,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -13,7 +13,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics, nimcrypto/utils]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import ./coder,
|
||||
../muxer,
|
||||
../../stream/[bufferstream, connection, streamseq],
|
||||
@@ -236,7 +236,7 @@ proc completeWrite(
|
||||
else:
|
||||
await fut
|
||||
|
||||
when defined(libp2p_network_protocol_metrics):
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
|
||||
|
||||
@@ -244,7 +244,11 @@ proc completeWrite(
|
||||
except CancelledError as exc:
|
||||
# Chronos may still send the data
|
||||
raise exc
|
||||
except LPStreamClosedError as exc:
|
||||
except LPStreamConnDownError as exc:
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -177,8 +177,14 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
@@ -197,7 +203,8 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
|
||||
proc new*(M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout, outTimeout: Duration = DefaultChanTimeout,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
M(connection: conn,
|
||||
inChannTimeout: inTimeout,
|
||||
@@ -242,3 +249,7 @@ method close*(m: Mplex) {.async, gcsafe.} =
|
||||
m.channels[true].clear()
|
||||
|
||||
trace "Closed mplex", m
|
||||
|
||||
method getStreams*(m: Mplex): seq[Connection] =
|
||||
for c in m.channels[false].values: result.add(c)
|
||||
for c in m.channels[true].values: result.add(c)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -13,8 +13,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import ../protocols/protocol,
|
||||
../stream/connection,
|
||||
import ../stream/connection,
|
||||
../errors
|
||||
|
||||
logScope:
|
||||
@@ -32,24 +31,28 @@ type
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
handler*: Future[void]
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
|
||||
|
||||
# this wraps a creator proc that knows how to make muxers
|
||||
MuxerProvider* = ref object of LPProtocol
|
||||
MuxerProvider* = object
|
||||
newMuxer*: MuxerConstructor
|
||||
streamHandler*: StreamHandler # triggered every time there is a new stream, called for any muxer instance
|
||||
muxerHandler*: MuxerHandler # triggered every time there is a new muxed connection created
|
||||
codec*: string
|
||||
|
||||
func shortLog*(m: Muxer): auto = shortLog(m.connection)
|
||||
func shortLog*(m: Muxer): auto =
|
||||
if isNil(m): "nil"
|
||||
else: shortLog(m.connection)
|
||||
chronicles.formatIt(Muxer): shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
if not isNil(m.connection):
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
proc new*(
|
||||
@@ -57,36 +60,7 @@ proc new*(
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
let muxerProvider = T(newMuxer: creator)
|
||||
muxerProvider.codec = codec
|
||||
muxerProvider.init()
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method init(c: MuxerProvider) =
|
||||
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
trace "starting muxer handler", proto=proto, conn
|
||||
try:
|
||||
let
|
||||
muxer = c.newMuxer(conn)
|
||||
|
||||
if not isNil(c.streamHandler):
|
||||
muxer.streamHandler = c.streamHandler
|
||||
|
||||
var futs = newSeq[Future[void]]()
|
||||
futs &= muxer.handle()
|
||||
|
||||
# finally await both the futures
|
||||
if not isNil(c.muxerHandler):
|
||||
await c.muxerHandler(muxer)
|
||||
when defined(libp2p_agents_metrics):
|
||||
conn.shortAgent = muxer.connection.shortAgent
|
||||
|
||||
checkFutures(await allFinished(futs))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in muxer handler", exc = exc.msg, conn, proto
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
c.handler = handler
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -84,7 +84,7 @@ proc `$`(header: YamuxHeader): string =
|
||||
proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[0] = header.version
|
||||
result[1] = uint8(header.msgType)
|
||||
result[2..3] = toBytesBE(cast[uint16](header.flags))
|
||||
result[2..3] = toBytesBE(uint16(cast[uint8](header.flags))) # workaround https://github.com/nim-lang/Nim/issues/21789
|
||||
result[4..7] = toBytesBE(header.streamId)
|
||||
result[8..11] = toBytesBE(header.length)
|
||||
|
||||
@@ -356,6 +356,8 @@ proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
|
||||
type
|
||||
Yamux* = ref object of Muxer
|
||||
channels: Table[uint32, YamuxChannel]
|
||||
@@ -414,7 +416,8 @@ method close*(m: Yamux) {.async.} =
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(true)
|
||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||
await m.connection.close()
|
||||
trace "Closed yamux"
|
||||
|
||||
@@ -505,6 +508,9 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
|
||||
method getStreams*(m: Yamux): seq[Connection] =
|
||||
for c in m.channels.values: result.add(c)
|
||||
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -15,7 +15,8 @@ else:
|
||||
import
|
||||
std/[streams, strutils, sets, sequtils],
|
||||
chronos, chronicles, stew/byteutils,
|
||||
dnsclientpkg/[protocol, types]
|
||||
dnsclientpkg/[protocol, types],
|
||||
../utility
|
||||
|
||||
import
|
||||
nameresolver
|
||||
@@ -80,9 +81,7 @@ proc getDnsResponse(
|
||||
# parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
return parseResponse(string.fromBytes(rawResponse))
|
||||
except CatchableError as exc: raise exc
|
||||
except Exception as exc: raiseAssert exc.msg
|
||||
return exceptionToAssert: parseResponse(string.fromBytes(rawResponse))
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
@@ -118,9 +117,7 @@ method resolveIp*(
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
resolvedAddresses.incl(
|
||||
try: answer.toString()
|
||||
except CatchableError as exc: raise exc
|
||||
except Exception as exc: raiseAssert exc.msg
|
||||
exceptionToAssert(answer.toString())
|
||||
)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
@@ -151,9 +148,13 @@ method resolveTxt*(
|
||||
for _ in 0 ..< self.nameServers.len:
|
||||
let server = self.nameServers[0]
|
||||
try:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
let response = await getDnsResponse(server, address, TXT)
|
||||
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
|
||||
return response.answers.mapIt(it.toString())
|
||||
return exceptionToAssert:
|
||||
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
|
||||
response.answers.mapIt(it.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
@@ -161,11 +162,6 @@ method resolveTxt*(
|
||||
self.nameServers.add(self.nameServers[0])
|
||||
self.nameServers.delete(0)
|
||||
continue
|
||||
except Exception as e:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert e.msg
|
||||
|
||||
debug "Failed to resolve TXT, returning empty set"
|
||||
return @[]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -13,7 +13,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[streams, strutils, tables],
|
||||
std/tables,
|
||||
chronos, chronicles
|
||||
|
||||
import nameresolver
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -16,7 +16,7 @@ import std/[sugar, sets, sequtils, strutils]
|
||||
import
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/[endians2, byteutils]
|
||||
stew/endians2
|
||||
import ".."/[multiaddress, multicodec]
|
||||
|
||||
logScope:
|
||||
|
||||
98
libp2p/observedaddrmanager.nim
Normal file
98
libp2p/observedaddrmanager.nim
Normal file
@@ -0,0 +1,98 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
chronos, chronicles,
|
||||
multiaddress, multicodec
|
||||
|
||||
type
|
||||
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
|
||||
ObservedAddrManager* = ref object of RootObj
|
||||
observedIPsAndPorts: seq[MultiAddress]
|
||||
maxSize: int
|
||||
minCount: int
|
||||
|
||||
proc addObservation*(self:ObservedAddrManager, observedAddr: MultiAddress): bool =
|
||||
## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed.
|
||||
if self.observedIPsAndPorts.len >= self.maxSize:
|
||||
self.observedIPsAndPorts.del(0)
|
||||
self.observedIPsAndPorts.add(observedAddr)
|
||||
return true
|
||||
|
||||
proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
var countTable = toCountTable(observations)
|
||||
countTable.sort()
|
||||
var orderedPairs = toSeq(countTable.pairs)
|
||||
for (ma, count) in orderedPairs:
|
||||
let maFirst = ma[0].get()
|
||||
if maFirst.protoCode.get() == multiCodec and count >= self.minCount:
|
||||
return Opt.some(ma)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
## Returns the most observed IP address or none if the number of observations are less than minCount.
|
||||
let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get())
|
||||
return self.getProtocol(observedIPs, multiCodec)
|
||||
|
||||
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
## Returns the most observed IP/Port address or none if the number of observations are less than minCount.
|
||||
return self.getProtocol(self.observedIPsAndPorts, multiCodec)
|
||||
|
||||
proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress] =
|
||||
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
|
||||
## are less than minCount.
|
||||
var res: seq[MultiAddress]
|
||||
let ip4 = self.getMostObservedProtoAndPort(multiCodec("ip4"))
|
||||
if ip4.isSome():
|
||||
res.add(ip4.get())
|
||||
let ip6 = self.getMostObservedProtoAndPort(multiCodec("ip6"))
|
||||
if ip6.isSome():
|
||||
res.add(ip6.get())
|
||||
return res
|
||||
|
||||
proc guessDialableAddr*(
|
||||
self: ObservedAddrManager,
|
||||
ma: MultiAddress): MultiAddress =
|
||||
## Replaces the first proto valeu of each listen address by the corresponding (matching the proto code) most observed value.
|
||||
## If the most observed value is not available, the original MultiAddress is returned.
|
||||
try:
|
||||
let maFirst = ma[0]
|
||||
let maRest = ma[1..^1]
|
||||
if maRest.isErr():
|
||||
return ma
|
||||
|
||||
let observedIP = self.getMostObservedProtocol(maFirst.get().protoCode().get())
|
||||
return
|
||||
if observedIP.isNone() or maFirst.get() == observedIP.get():
|
||||
ma
|
||||
else:
|
||||
observedIP.get() & maRest.get()
|
||||
except CatchableError as error:
|
||||
debug "Error while handling manual port forwarding", msg = error.msg
|
||||
return ma
|
||||
|
||||
proc `$`*(self: ObservedAddrManager): string =
|
||||
## Returns a string representation of the ObservedAddrManager.
|
||||
return "IPs and Ports: " & $self.observedIPsAndPorts
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ObservedAddrManager],
|
||||
maxSize = 10,
|
||||
minCount = 3): T =
|
||||
## Creates a new ObservedAddrManager.
|
||||
return T(
|
||||
observedIPsAndPorts: newSeq[MultiAddress](),
|
||||
maxSize: maxSize,
|
||||
minCount: minCount)
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -184,6 +184,11 @@ func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
|
||||
## Create new peer id from private key ``seckey``.
|
||||
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
|
||||
|
||||
proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
|
||||
## Create new peer id with random public key.
|
||||
let randomKey = PrivateKey.random(Secp256k1, rng[])[]
|
||||
PeerId.init(randomKey).orError(cstring("failed to generate random key"))
|
||||
|
||||
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
||||
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
||||
let p = PeerId.init(pubkey)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -16,8 +16,7 @@ runnableExamples:
|
||||
# Create a custom book type
|
||||
type MoodBook = ref object of PeerBook[string]
|
||||
|
||||
var somePeerId: PeerId
|
||||
discard somePeerId.init("")
|
||||
var somePeerId = PeerId.random().get()
|
||||
|
||||
peerStore[MoodBook][somePeerId] = "Happy"
|
||||
doAssert peerStore[MoodBook][somePeerId] == "Happy"
|
||||
@@ -29,11 +28,16 @@ else:
|
||||
|
||||
import
|
||||
std/[tables, sets, options, macros],
|
||||
chronos,
|
||||
./crypto/crypto,
|
||||
./protocols/identify,
|
||||
./protocols/protocol,
|
||||
./peerid, ./peerinfo,
|
||||
./routing_record,
|
||||
./multiaddress,
|
||||
./stream/connection,
|
||||
./multistream,
|
||||
./muxers/muxer,
|
||||
utility
|
||||
|
||||
type
|
||||
@@ -71,11 +75,15 @@ type
|
||||
|
||||
PeerStore* {.public.} = ref object
|
||||
books: Table[string, BasePeerBook]
|
||||
identify: Identify
|
||||
capacity*: int
|
||||
toClean*: seq[PeerId]
|
||||
|
||||
proc new*(T: type PeerStore, capacity = 1000): PeerStore {.public.} =
|
||||
T(capacity: capacity)
|
||||
proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} =
|
||||
T(
|
||||
identify: identify,
|
||||
capacity: capacity
|
||||
)
|
||||
|
||||
#########################
|
||||
# Generic Peer Book API #
|
||||
@@ -153,6 +161,9 @@ proc updatePeerInfo*(
|
||||
if info.addrs.len > 0:
|
||||
peerStore[AddressBook][info.peerId] = info.addrs
|
||||
|
||||
if info.pubkey.isSome:
|
||||
peerStore[KeyBook][info.peerId] = info.pubkey.get()
|
||||
|
||||
if info.agentVersion.isSome:
|
||||
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
|
||||
|
||||
@@ -184,3 +195,34 @@ proc cleanup*(
|
||||
while peerStore.toClean.len > peerStore.capacity:
|
||||
peerStore.del(peerStore.toClean[0])
|
||||
peerStore.toClean.delete(0)
|
||||
|
||||
proc identify*(
|
||||
peerStore: PeerStore,
|
||||
muxer: Muxer) {.async.} =
|
||||
|
||||
# new stream for identify
|
||||
var stream = await muxer.newStream()
|
||||
if stream == nil:
|
||||
return
|
||||
|
||||
try:
|
||||
if (await MultistreamSelect.select(stream, peerStore.identify.codec())):
|
||||
let info = await peerStore.identify.identify(stream, stream.peerId)
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
var knownAgent = "unknown"
|
||||
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
|
||||
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
|
||||
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
|
||||
knownAgent = shortAgent.get()
|
||||
muxer.connection.setShortAgent(knownAgent)
|
||||
|
||||
peerStore.updatePeerInfo(info)
|
||||
finally:
|
||||
await stream.closeWithEOF()
|
||||
|
||||
proc getMostObservedProtosAndPorts*(self: PeerStore): seq[MultiAddress] =
|
||||
return self.identify.observedAddrManager.getMostObservedProtosAndPorts()
|
||||
|
||||
proc guessDialableAddr*(self: PeerStore, ma: MultiAddress): MultiAddress =
|
||||
return self.identify.observedAddrManager.guessDialableAddr(ma)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -19,8 +19,7 @@ export results, utility
|
||||
|
||||
{.push public.}
|
||||
|
||||
const
|
||||
MaxMessageSize* = 1'u shl 22
|
||||
const MaxMessageSize = 1'u shl 22
|
||||
|
||||
type
|
||||
ProtoFieldKind* = enum
|
||||
@@ -37,6 +36,7 @@ type
|
||||
buffer*: seq[byte]
|
||||
offset*: int
|
||||
length*: int
|
||||
maxSize*: uint
|
||||
|
||||
ProtoHeader* = object
|
||||
wire*: ProtoFieldKind
|
||||
@@ -72,12 +72,12 @@ type
|
||||
hint | hint32 | hint64 | float32 | float64
|
||||
|
||||
const
|
||||
SupportedWireTypes* = {
|
||||
int(ProtoFieldKind.Varint),
|
||||
int(ProtoFieldKind.Fixed64),
|
||||
int(ProtoFieldKind.Length),
|
||||
int(ProtoFieldKind.Fixed32)
|
||||
}
|
||||
SupportedWireTypes* = @[
|
||||
uint64(ProtoFieldKind.Varint),
|
||||
uint64(ProtoFieldKind.Fixed64),
|
||||
uint64(ProtoFieldKind.Length),
|
||||
uint64(ProtoFieldKind.Fixed32)
|
||||
]
|
||||
|
||||
template checkFieldNumber*(i: int) =
|
||||
doAssert((i > 0 and i < (1 shl 29)) and not(i >= 19000 and i <= 19999),
|
||||
@@ -122,23 +122,28 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
|
||||
0
|
||||
|
||||
proc initProtoBuffer*(data: seq[byte], offset = 0,
|
||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
options: set[ProtoFlags] = {},
|
||||
maxSize = MaxMessageSize): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with shallow copy of ``data``.
|
||||
result.buffer = data
|
||||
result.offset = offset
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
|
||||
proc initProtoBuffer*(data: openArray[byte], offset = 0,
|
||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
options: set[ProtoFlags] = {},
|
||||
maxSize = MaxMessageSize): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with copy of ``data``.
|
||||
result.buffer = @data
|
||||
result.offset = offset
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
|
||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}, maxSize = MaxMessageSize): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
||||
result.buffer = newSeq[byte]()
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
if WithVarintLength in options:
|
||||
# Our buffer will start from position 10, so we can store length of buffer
|
||||
# in [0, 9].
|
||||
@@ -335,7 +340,7 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
|
||||
var bsize = 0'u64
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(MaxMessageSize):
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
@@ -399,7 +404,7 @@ proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
|
||||
outLength = 0
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(MaxMessageSize):
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
outLength = int(bsize)
|
||||
if len(outBytes) >= int(bsize):
|
||||
@@ -427,7 +432,7 @@ proc getValue[T:seq[byte]|string](data: var ProtoBuffer, header: ProtoHeader,
|
||||
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(MaxMessageSize):
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
outBytes.setLen(bsize)
|
||||
if bsize > 0'u64:
|
||||
|
||||
@@ -1,309 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
import stew/results
|
||||
import chronos, chronicles, stew/objects
|
||||
import ../protocol,
|
||||
../../switch,
|
||||
../../multiaddress,
|
||||
../../multicodec,
|
||||
../../peerid,
|
||||
../../utils/semaphore,
|
||||
../../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
const
|
||||
AutonatCodec* = "/libp2p/autonat/1.0.0"
|
||||
AddressLimit = 8
|
||||
|
||||
type
|
||||
AutonatError* = object of LPError
|
||||
|
||||
MsgType* = enum
|
||||
Dial = 0
|
||||
DialResponse = 1
|
||||
|
||||
ResponseStatus* = enum
|
||||
Ok = 0
|
||||
DialError = 100
|
||||
DialRefused = 101
|
||||
BadRequest = 200
|
||||
InternalError = 300
|
||||
|
||||
AutonatPeerInfo* = object
|
||||
id: Option[PeerId]
|
||||
addrs: seq[MultiAddress]
|
||||
|
||||
AutonatDial* = object
|
||||
peerInfo: Option[AutonatPeerInfo]
|
||||
|
||||
AutonatDialResponse* = object
|
||||
status*: ResponseStatus
|
||||
text*: Option[string]
|
||||
ma*: Option[MultiAddress]
|
||||
|
||||
AutonatMsg = object
|
||||
msgType: MsgType
|
||||
dial: Option[AutonatDial]
|
||||
response: Option[AutonatDialResponse]
|
||||
|
||||
proc encode*(msg: AutonatMsg): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, msg.msgType.uint)
|
||||
if msg.dial.isSome():
|
||||
var dial = initProtoBuffer()
|
||||
if msg.dial.get().peerInfo.isSome():
|
||||
var bufferPeerInfo = initProtoBuffer()
|
||||
let peerInfo = msg.dial.get().peerInfo.get()
|
||||
if peerInfo.id.isSome():
|
||||
bufferPeerInfo.write(1, peerInfo.id.get())
|
||||
for ma in peerInfo.addrs:
|
||||
bufferPeerInfo.write(2, ma.data.buffer)
|
||||
bufferPeerInfo.finish()
|
||||
dial.write(1, bufferPeerInfo.buffer)
|
||||
dial.finish()
|
||||
result.write(2, dial.buffer)
|
||||
if msg.response.isSome():
|
||||
var bufferResponse = initProtoBuffer()
|
||||
let response = msg.response.get()
|
||||
bufferResponse.write(1, response.status.uint)
|
||||
if response.text.isSome():
|
||||
bufferResponse.write(2, response.text.get())
|
||||
if response.ma.isSome():
|
||||
bufferResponse.write(3, response.ma.get())
|
||||
bufferResponse.finish()
|
||||
result.write(3, bufferResponse.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(d: AutonatDial): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.Dial.uint)
|
||||
var dial = initProtoBuffer()
|
||||
if d.peerInfo.isSome():
|
||||
var bufferPeerInfo = initProtoBuffer()
|
||||
let peerInfo = d.peerInfo.get()
|
||||
if peerInfo.id.isSome():
|
||||
bufferPeerInfo.write(1, peerInfo.id.get())
|
||||
for ma in peerInfo.addrs:
|
||||
bufferPeerInfo.write(2, ma.data.buffer)
|
||||
bufferPeerInfo.finish()
|
||||
dial.write(1, bufferPeerInfo.buffer)
|
||||
dial.finish()
|
||||
result.write(2, dial.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(r: AutonatDialResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.DialResponse.uint)
|
||||
var bufferResponse = initProtoBuffer()
|
||||
bufferResponse.write(1, r.status.uint)
|
||||
if r.text.isSome():
|
||||
bufferResponse.write(2, r.text.get())
|
||||
if r.ma.isSome():
|
||||
bufferResponse.write(3, r.ma.get())
|
||||
bufferResponse.finish()
|
||||
result.write(3, bufferResponse.buffer)
|
||||
result.finish()
|
||||
|
||||
proc decode(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
pbDial: ProtoBuffer
|
||||
pbResponse: ProtoBuffer
|
||||
msg: AutonatMsg
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbDial)
|
||||
r3 = pb.getField(3, pbResponse)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
|
||||
|
||||
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(AutonatMsg)
|
||||
if r2.get():
|
||||
var
|
||||
pbPeerInfo: ProtoBuffer
|
||||
dial: AutonatDial
|
||||
let
|
||||
r4 = pbDial.getField(1, pbPeerInfo)
|
||||
if r4.isErr(): return none(AutonatMsg)
|
||||
|
||||
var peerInfo: AutonatPeerInfo
|
||||
if r4.get():
|
||||
var pid: PeerId
|
||||
let
|
||||
r5 = pbPeerInfo.getField(1, pid)
|
||||
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
|
||||
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
|
||||
if r5.get(): peerInfo.id = some(pid)
|
||||
dial.peerInfo = some(peerInfo)
|
||||
msg.dial = some(dial)
|
||||
|
||||
if r3.get():
|
||||
var
|
||||
statusOrd: uint
|
||||
text: string
|
||||
ma: MultiAddress
|
||||
response: AutonatDialResponse
|
||||
|
||||
let
|
||||
r4 = pbResponse.getField(1, statusOrd)
|
||||
r5 = pbResponse.getField(2, text)
|
||||
r6 = pbResponse.getField(3, ma)
|
||||
|
||||
if r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
|
||||
return none(AutonatMsg)
|
||||
if r5.get(): response.text = some(text)
|
||||
if r6.get(): response.ma = some(ma)
|
||||
msg.response = some(response)
|
||||
|
||||
return some(msg)
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: status,
|
||||
text: if text == "": none(string) else: some(text),
|
||||
ma: none(MultiAddress)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: ResponseStatus.Ok,
|
||||
text: some("Ok"),
|
||||
ma: some(ma)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
type
|
||||
Autonat* = ref object of LPProtocol
|
||||
sem: AsyncSemaphore
|
||||
switch*: Switch
|
||||
|
||||
proc dialMe*(a: Autonat, pid: PeerId, ma: MultiAddress|seq[MultiAddress]):
|
||||
Future[MultiAddress] {.async.} =
|
||||
let addrs = when ma is MultiAddress: @[ma] else: ma
|
||||
let conn = await a.switch.dial(pid, addrs, AutonatCodec)
|
||||
defer: await conn.close()
|
||||
await conn.sendDial(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs)
|
||||
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||
if msgOpt.isNone() or
|
||||
msgOpt.get().msgType != DialResponse or
|
||||
msgOpt.get().response.isNone():
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
let response = msgOpt.get().response.get()
|
||||
if response.status != ResponseStatus.Ok:
|
||||
raise newException(AutonatError, "Bad status " &
|
||||
$response.status & " " &
|
||||
response.text.get(""))
|
||||
if response.ma.isNone():
|
||||
raise newException(AutonatError, "Missing address")
|
||||
return response.ma.get()
|
||||
|
||||
proc tryDial(a: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
|
||||
try:
|
||||
await a.sem.acquire()
|
||||
let ma = await a.switch.dialer.tryDial(conn.peerId, addrs)
|
||||
if ma.isSome:
|
||||
await conn.sendResponseOk(ma.get())
|
||||
else:
|
||||
await conn.sendResponseError(DialError, "Missing observed address")
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
await conn.sendResponseError(DialError, exc.msg)
|
||||
finally:
|
||||
a.sem.release()
|
||||
|
||||
proc handleDial(a: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
|
||||
return conn.sendResponseError(BadRequest, "Missing Peer Info")
|
||||
let peerInfo = msg.dial.get().peerInfo.get()
|
||||
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
|
||||
if conn.observedAddr.isNone:
|
||||
return conn.sendResponseError(BadRequest, "Missing observed address")
|
||||
let observedAddr = conn.observedAddr.get()
|
||||
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
|
||||
let hostIp = observedAddr[0]
|
||||
if hostIp.isErr() or not IP.match(hostIp.get()):
|
||||
trace "wrong observed address", address=observedAddr
|
||||
return conn.sendResponseError(InternalError, "Expected an IP address")
|
||||
var addrs = initHashSet[MultiAddress]()
|
||||
addrs.incl(observedAddr)
|
||||
for ma in peerInfo.addrs:
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
let maFirst = ma[0]
|
||||
if maFirst.isErr() or not IP.match(maFirst.get()):
|
||||
continue
|
||||
|
||||
try:
|
||||
addrs.incl(
|
||||
if maFirst.get() == hostIp.get():
|
||||
ma
|
||||
else:
|
||||
let maEnd = ma[1..^1]
|
||||
if maEnd.isErr(): continue
|
||||
hostIp.get() & maEnd.get()
|
||||
)
|
||||
except LPError as exc:
|
||||
continue
|
||||
if len(addrs) >= AddressLimit:
|
||||
break
|
||||
|
||||
if len(addrs) == 0:
|
||||
return conn.sendResponseError(DialRefused, "No dialable address")
|
||||
return a.tryDial(conn, toSeq(addrs))
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize))
|
||||
autonat.init()
|
||||
autonat
|
||||
|
||||
method init*(a: Autonat) =
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
try:
|
||||
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
let msg = msgOpt.get()
|
||||
await a.handleDial(conn, msg)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in autonat handler", exc = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting autonat handler", conn
|
||||
await conn.close()
|
||||
|
||||
a.handler = handleStream
|
||||
a.codec = AutonatCodec
|
||||
76
libp2p/protocols/connectivity/autonat/client.nim
Normal file
76
libp2p/protocols/connectivity/autonat/client.nim
Normal file
@@ -0,0 +1,76 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../../switch,
|
||||
../../../multiaddress,
|
||||
../../../peerid
|
||||
import core
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
type
|
||||
AutonatClient* = ref object of RootObj
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
|
||||
Future[MultiAddress] {.base, async.} =
|
||||
|
||||
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [Defect, AutonatError].} =
|
||||
if autonatMsg.isNone() or
|
||||
autonatMsg.get().msgType != DialResponse or
|
||||
autonatMsg.get().response.isNone() or
|
||||
(autonatMsg.get().response.get().status == Ok and
|
||||
autonatMsg.get().response.get().ma.isNone()):
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
else:
|
||||
autonatMsg.get().response.get()
|
||||
|
||||
let conn =
|
||||
try:
|
||||
if addrs.len == 0:
|
||||
await switch.dial(pid, @[AutonatCodec])
|
||||
else:
|
||||
await switch.dial(pid, addrs, AutonatCodec)
|
||||
except CatchableError as err:
|
||||
raise newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
|
||||
|
||||
# To bypass maxConnectionsPerPeer
|
||||
let incomingConnection = switch.connManager.expectConnection(pid, In)
|
||||
if incomingConnection.failed() and incomingConnection.error of AlreadyExpectingConnectionError:
|
||||
raise newException(AutonatError, incomingConnection.error.msg)
|
||||
defer:
|
||||
await conn.close()
|
||||
incomingConnection.cancel() # Safer to always try to cancel cause we aren't sure if the peer dialled us or not
|
||||
if incomingConnection.completed():
|
||||
await (await incomingConnection).connection.close()
|
||||
trace "sending Dial", addrs = switch.peerInfo.addrs
|
||||
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
|
||||
return case response.status:
|
||||
of ResponseStatus.Ok:
|
||||
response.ma.get()
|
||||
of ResponseStatus.DialError:
|
||||
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
|
||||
else:
|
||||
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))
|
||||
155
libp2p/protocols/connectivity/autonat/core.nim
Normal file
155
libp2p/protocols/connectivity/autonat/core.nim
Normal file
@@ -0,0 +1,155 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import ../../../multiaddress,
|
||||
../../../peerid,
|
||||
../../../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
const
|
||||
AutonatCodec* = "/libp2p/autonat/1.0.0"
|
||||
AddressLimit* = 8
|
||||
|
||||
type
|
||||
AutonatError* = object of LPError
|
||||
AutonatUnreachableError* = object of LPError
|
||||
|
||||
MsgType* = enum
|
||||
Dial = 0
|
||||
DialResponse = 1
|
||||
|
||||
ResponseStatus* = enum
|
||||
Ok = 0
|
||||
DialError = 100
|
||||
DialRefused = 101
|
||||
BadRequest = 200
|
||||
InternalError = 300
|
||||
|
||||
AutonatPeerInfo* = object
|
||||
id*: Option[PeerId]
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
AutonatDial* = object
|
||||
peerInfo*: Option[AutonatPeerInfo]
|
||||
|
||||
AutonatDialResponse* = object
|
||||
status*: ResponseStatus
|
||||
text*: Option[string]
|
||||
ma*: Option[MultiAddress]
|
||||
|
||||
AutonatMsg* = object
|
||||
msgType*: MsgType
|
||||
dial*: Option[AutonatDial]
|
||||
response*: Option[AutonatDialResponse]
|
||||
|
||||
NetworkReachability* {.pure.} = enum
|
||||
Unknown, NotReachable, Reachable
|
||||
|
||||
proc encode(p: AutonatPeerInfo): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
if p.id.isSome():
|
||||
result.write(1, p.id.get())
|
||||
for ma in p.addrs:
|
||||
result.write(2, ma.data.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(d: AutonatDial): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.Dial.uint)
|
||||
var dial = initProtoBuffer()
|
||||
if d.peerInfo.isSome():
|
||||
dial.write(1, encode(d.peerInfo.get()))
|
||||
dial.finish()
|
||||
result.write(2, dial.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(r: AutonatDialResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.DialResponse.uint)
|
||||
var bufferResponse = initProtoBuffer()
|
||||
bufferResponse.write(1, r.status.uint)
|
||||
if r.text.isSome():
|
||||
bufferResponse.write(2, r.text.get())
|
||||
if r.ma.isSome():
|
||||
bufferResponse.write(3, r.ma.get())
|
||||
bufferResponse.finish()
|
||||
result.write(3, bufferResponse.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(msg: AutonatMsg): ProtoBuffer =
|
||||
if msg.dial.isSome():
|
||||
return encode(msg.dial.get())
|
||||
if msg.response.isSome():
|
||||
return encode(msg.response.get())
|
||||
|
||||
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
pbDial: ProtoBuffer
|
||||
pbResponse: ProtoBuffer
|
||||
msg: AutonatMsg
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbDial)
|
||||
r3 = pb.getField(3, pbResponse)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
|
||||
|
||||
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(AutonatMsg)
|
||||
if r2.get():
|
||||
var
|
||||
pbPeerInfo: ProtoBuffer
|
||||
dial: AutonatDial
|
||||
let
|
||||
r4 = pbDial.getField(1, pbPeerInfo)
|
||||
if r4.isErr(): return none(AutonatMsg)
|
||||
|
||||
var peerInfo: AutonatPeerInfo
|
||||
if r4.get():
|
||||
var pid: PeerId
|
||||
let
|
||||
r5 = pbPeerInfo.getField(1, pid)
|
||||
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
|
||||
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
|
||||
if r5.get(): peerInfo.id = some(pid)
|
||||
dial.peerInfo = some(peerInfo)
|
||||
msg.dial = some(dial)
|
||||
|
||||
if r3.get():
|
||||
var
|
||||
statusOrd: uint
|
||||
text: string
|
||||
ma: MultiAddress
|
||||
response: AutonatDialResponse
|
||||
|
||||
let
|
||||
r4 = pbResponse.getField(1, statusOrd)
|
||||
r5 = pbResponse.getField(2, text)
|
||||
r6 = pbResponse.getField(3, ma)
|
||||
|
||||
if r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
|
||||
return none(AutonatMsg)
|
||||
if r5.get(): response.text = some(text)
|
||||
if r6.get(): response.ma = some(ma)
|
||||
msg.response = some(response)
|
||||
|
||||
return some(msg)
|
||||
165
libp2p/protocols/connectivity/autonat/server.nim
Normal file
165
libp2p/protocols/connectivity/autonat/server.nim
Normal file
@@ -0,0 +1,165 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../protocol,
|
||||
../../../switch,
|
||||
../../../multiaddress,
|
||||
../../../multicodec,
|
||||
../../../peerid,
|
||||
../../../utils/[semaphore, future],
|
||||
../../../errors
|
||||
import core
|
||||
|
||||
export core
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
type
|
||||
Autonat* = ref object of LPProtocol
|
||||
sem: AsyncSemaphore
|
||||
switch*: Switch
|
||||
dialTimeout: Duration
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: status,
|
||||
text: if text == "": none(string) else: some(text),
|
||||
ma: none(MultiAddress)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: ResponseStatus.Ok,
|
||||
text: some("Ok"),
|
||||
ma: some(ma)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
|
||||
await autonat.sem.acquire()
|
||||
var futs: seq[Future[Opt[MultiAddress]]]
|
||||
try:
|
||||
# This is to bypass the per peer max connections limit
|
||||
let outgoingConnection = autonat.switch.connManager.expectConnection(conn.peerId, Out)
|
||||
if outgoingConnection.failed() and outgoingConnection.error of AlreadyExpectingConnectionError:
|
||||
await conn.sendResponseError(DialRefused, outgoingConnection.error.msg)
|
||||
return
|
||||
# Safer to always try to cancel cause we aren't sure if the connection was established
|
||||
defer: outgoingConnection.cancel()
|
||||
# tryDial is to bypass the global max connections limit
|
||||
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
|
||||
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
|
||||
let ma = await fut
|
||||
if ma.isSome:
|
||||
await conn.sendResponseOk(ma.get())
|
||||
else:
|
||||
await conn.sendResponseError(DialError, "Missing observed address")
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except AllFuturesFailedError as exc:
|
||||
debug "All dial attempts failed", addrs, exc = exc.msg
|
||||
await conn.sendResponseError(DialError, "All dial attempts failed")
|
||||
except AsyncTimeoutError as exc:
|
||||
debug "Dial timeout", addrs, exc = exc.msg
|
||||
await conn.sendResponseError(DialError, "Dial timeout")
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected error", addrs, exc = exc.msg
|
||||
await conn.sendResponseError(DialError, "Unexpected error")
|
||||
finally:
|
||||
autonat.sem.release()
|
||||
for f in futs:
|
||||
if not f.finished():
|
||||
f.cancel()
|
||||
|
||||
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
|
||||
return conn.sendResponseError(BadRequest, "Missing Peer Info")
|
||||
let peerInfo = msg.dial.get().peerInfo.get()
|
||||
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
|
||||
if conn.observedAddr.isNone:
|
||||
return conn.sendResponseError(BadRequest, "Missing observed address")
|
||||
let observedAddr = conn.observedAddr.get()
|
||||
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
|
||||
let hostIp = observedAddr[0]
|
||||
if hostIp.isErr() or not IP.match(hostIp.get()):
|
||||
trace "wrong observed address", address=observedAddr
|
||||
return conn.sendResponseError(InternalError, "Expected an IP address")
|
||||
var addrs = initHashSet[MultiAddress]()
|
||||
addrs.incl(observedAddr)
|
||||
trace "addrs received", addrs = peerInfo.addrs
|
||||
for ma in peerInfo.addrs:
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
let maFirst = ma[0]
|
||||
if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
|
||||
continue
|
||||
|
||||
try:
|
||||
addrs.incl(
|
||||
if maFirst.get() == hostIp.get():
|
||||
ma
|
||||
else:
|
||||
let maEnd = ma[1..^1]
|
||||
if maEnd.isErr(): continue
|
||||
hostIp.get() & maEnd.get()
|
||||
)
|
||||
except LPError as exc:
|
||||
continue
|
||||
if len(addrs) >= AddressLimit:
|
||||
break
|
||||
|
||||
if len(addrs) == 0:
|
||||
return conn.sendResponseError(DialRefused, "No dialable address")
|
||||
let addrsSeq = toSeq(addrs)
|
||||
trace "trying to dial", addrs = addrsSeq
|
||||
return autonat.tryDial(conn, addrsSeq)
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
try:
|
||||
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
let msg = msgOpt.get()
|
||||
await autonat.handleDial(conn, msg)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "exception in autonat handler", exc = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting autonat handler", conn
|
||||
await conn.close()
|
||||
|
||||
autonat.handler = handleStream
|
||||
autonat.codec = AutonatCodec
|
||||
autonat
|
||||
216
libp2p/protocols/connectivity/autonat/service.nim
Normal file
216
libp2p/protocols/connectivity/autonat/service.nim
Normal file
@@ -0,0 +1,216 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, deques, sequtils]
|
||||
import chronos, metrics
|
||||
import ../../../switch
|
||||
import ../../../wire
|
||||
import client
|
||||
from core import NetworkReachability, AutonatUnreachableError
|
||||
import ../../../utils/heartbeat
|
||||
import ../../../crypto/crypto
|
||||
|
||||
export options, core.NetworkReachability
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonatservice"
|
||||
|
||||
declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability confidence", labels = ["reachability"])
|
||||
|
||||
type
|
||||
AutonatService* = ref object of Service
|
||||
newConnectedPeerHandler: PeerEventHandler
|
||||
addressMapper: AddressMapper
|
||||
scheduleHandle: Future[void]
|
||||
networkReachability*: NetworkReachability
|
||||
confidence: Option[float]
|
||||
answers: Deque[NetworkReachability]
|
||||
autonatClient: AutonatClient
|
||||
statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||
rng: ref HmacDrbgContext
|
||||
scheduleInterval: Option[Duration]
|
||||
askNewConnectedPeers: bool
|
||||
numPeersToAsk: int
|
||||
maxQueueSize: int
|
||||
minConfidence: float
|
||||
dialTimeout: Duration
|
||||
enableAddressMapper: bool
|
||||
|
||||
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatService],
|
||||
autonatClient: AutonatClient,
|
||||
rng: ref HmacDrbgContext,
|
||||
scheduleInterval: Option[Duration] = none(Duration),
|
||||
askNewConnectedPeers = true,
|
||||
numPeersToAsk: int = 5,
|
||||
maxQueueSize: int = 10,
|
||||
minConfidence: float = 0.3,
|
||||
dialTimeout = 30.seconds,
|
||||
enableAddressMapper = true): T =
|
||||
return T(
|
||||
scheduleInterval: scheduleInterval,
|
||||
networkReachability: Unknown,
|
||||
confidence: none(float),
|
||||
answers: initDeque[NetworkReachability](),
|
||||
autonatClient: autonatClient,
|
||||
rng: rng,
|
||||
askNewConnectedPeers: askNewConnectedPeers,
|
||||
numPeersToAsk: numPeersToAsk,
|
||||
maxQueueSize: maxQueueSize,
|
||||
minConfidence: minConfidence,
|
||||
dialTimeout: dialTimeout,
|
||||
enableAddressMapper: enableAddressMapper)
|
||||
|
||||
proc callHandler(self: AutonatService) {.async.} =
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
|
||||
proc hasEnoughIncomingSlots(switch: Switch): bool =
|
||||
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
|
||||
return switch.connManager.slotsAvailable(In) >= 2
|
||||
|
||||
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
|
||||
return switch.connManager.selectMuxer(peerId, In) != nil
|
||||
|
||||
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
|
||||
|
||||
if ans == Unknown:
|
||||
return
|
||||
|
||||
if self.answers.len == self.maxQueueSize:
|
||||
self.answers.popFirst()
|
||||
self.answers.addLast(ans)
|
||||
|
||||
self.networkReachability = Unknown
|
||||
self.confidence = none(float)
|
||||
const reachabilityPriority = [Reachable, NotReachable]
|
||||
for reachability in reachabilityPriority:
|
||||
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
|
||||
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
|
||||
if self.confidence.isNone and confidence >= self.minConfidence:
|
||||
self.networkReachability = reachability
|
||||
self.confidence = some(confidence)
|
||||
|
||||
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
|
||||
|
||||
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
|
||||
logScope:
|
||||
peerId = $peerId
|
||||
|
||||
if doesPeerHaveIncomingConn(switch, peerId):
|
||||
return Unknown
|
||||
|
||||
if not hasEnoughIncomingSlots(switch):
|
||||
debug "No incoming slots available, not asking peer", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
|
||||
return Unknown
|
||||
|
||||
trace "Asking peer for reachability"
|
||||
let ans =
|
||||
try:
|
||||
discard await self.autonatClient.dialMe(switch, peerId).wait(self.dialTimeout)
|
||||
debug "dialMe answer is reachable"
|
||||
Reachable
|
||||
except AutonatUnreachableError as error:
|
||||
debug "dialMe answer is not reachable", msg = error.msg
|
||||
NotReachable
|
||||
except AsyncTimeoutError as error:
|
||||
debug "dialMe timed out", msg = error.msg
|
||||
Unknown
|
||||
except CatchableError as error:
|
||||
debug "dialMe unexpected error", msg = error.msg
|
||||
Unknown
|
||||
await self.handleAnswer(ans)
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
await switch.peerInfo.update()
|
||||
return ans
|
||||
|
||||
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
|
||||
trace "Asking peers for reachability"
|
||||
var peers = switch.connectedPeers(Direction.Out)
|
||||
self.rng.shuffle(peers)
|
||||
var answersFromPeers = 0
|
||||
for peer in peers:
|
||||
if answersFromPeers >= self.numPeersToAsk:
|
||||
break
|
||||
if not hasEnoughIncomingSlots(switch):
|
||||
debug "No incoming slots available, not asking peers", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
|
||||
break
|
||||
if (await askPeer(self, switch, peer)) != Unknown:
|
||||
answersFromPeers.inc()
|
||||
|
||||
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
|
||||
heartbeat "Scheduling AutonatService run", interval:
|
||||
await service.run(switch)
|
||||
|
||||
proc addressMapper(
|
||||
self: AutonatService,
|
||||
peerStore: PeerStore,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
|
||||
var addrs = newSeq[MultiAddress]()
|
||||
for listenAddr in listenAddrs:
|
||||
var processedMA = listenAddr
|
||||
try:
|
||||
let hostIP = initTAddress(listenAddr).get()
|
||||
if not hostIP.isGlobal() and self.networkReachability == NetworkReachability.Reachable:
|
||||
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
|
||||
except CatchableError as exc:
|
||||
debug "Error while handling address mapper", msg = exc.msg
|
||||
addrs.add(processedMA)
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetup:
|
||||
if self.askNewConnectedPeers:
|
||||
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
|
||||
discard askPeer(self, switch, peerId)
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
if self.scheduleInterval.isSome():
|
||||
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
|
||||
if self.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
return hasBeenSetup
|
||||
|
||||
method run*(self: AutonatService, switch: Switch) {.async, public.} =
|
||||
trace "Running AutonatService"
|
||||
await askConnectedPeers(self, switch)
|
||||
await self.callHandler()
|
||||
|
||||
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
|
||||
info "Stopping AutonatService"
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
if not isNil(self.scheduleHandle):
|
||||
self.scheduleHandle.cancel()
|
||||
self.scheduleHandle = nil
|
||||
if not isNil(self.newConnectedPeerHandler):
|
||||
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
if self.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
|
||||
await switch.peerInfo.update()
|
||||
return hasBeenStopped
|
||||
|
||||
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =
|
||||
self.statusAndConfidenceHandler = statusAndConfidenceHandler
|
||||
92
libp2p/protocols/connectivity/dcutr/client.nim
Normal file
92
libp2p/protocols/connectivity/dcutr/client.nim
Normal file
@@ -0,0 +1,92 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
|
||||
import core
|
||||
import ../../protocol,
|
||||
../../../stream/connection,
|
||||
../../../switch,
|
||||
../../../utils/future
|
||||
|
||||
export DcutrError
|
||||
|
||||
type
|
||||
DcutrClient* = ref object
|
||||
connectTimeout: Duration
|
||||
maxDialableAddrs: int
|
||||
|
||||
logScope:
|
||||
topics = "libp2p dcutrclient"
|
||||
|
||||
proc new*(T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
return T(connectTimeout: connectTimeout, maxDialableAddrs: maxDialableAddrs)
|
||||
|
||||
proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
logScope:
|
||||
peerId = switch.peerInfo.peerId
|
||||
|
||||
var
|
||||
peerDialableAddrs: seq[MultiAddress]
|
||||
stream: Connection
|
||||
try:
|
||||
var ourDialableAddrs = getHolePunchableAddrs(addrs)
|
||||
if ourDialableAddrs.len == 0:
|
||||
debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.", addrs
|
||||
return
|
||||
|
||||
stream = await switch.dial(remotePeerId, DcutrCodec)
|
||||
await stream.send(MsgType.Connect, addrs)
|
||||
debug "Dcutr initiator has sent a Connect message."
|
||||
let rttStart = Moment.now()
|
||||
let connectAnswer = DcutrMsg.decode(await stream.readLp(1024))
|
||||
|
||||
peerDialableAddrs = getHolePunchableAddrs(connectAnswer.addrs)
|
||||
if peerDialableAddrs.len == 0:
|
||||
debug "Dcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs
|
||||
return
|
||||
|
||||
let rttEnd = Moment.now()
|
||||
debug "Dcutr initiator has received a Connect message back.", connectAnswer
|
||||
let halfRtt = (rttEnd - rttStart) div 2'i64
|
||||
await stream.send(MsgType.Sync, @[])
|
||||
debug "Dcutr initiator has sent a Sync message."
|
||||
await sleepAsync(halfRtt)
|
||||
|
||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||
debug "Dcutr initiator has directly connected to the remote peer."
|
||||
finally:
|
||||
for fut in futs: fut.cancel()
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts failed", err)
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
|
||||
except CatchableError as err:
|
||||
debug "Unexpected error when trying direct conn", err = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when trying a direct conn", err)
|
||||
finally:
|
||||
if stream != nil:
|
||||
await stream.close()
|
||||
|
||||
63
libp2p/protocols/connectivity/dcutr/core.nim
Normal file
63
libp2p/protocols/connectivity/dcutr/core.nim
Normal file
@@ -0,0 +1,63 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import chronos
|
||||
import stew/objects
|
||||
|
||||
import ../../../multiaddress,
|
||||
../../../errors,
|
||||
../../../stream/connection
|
||||
|
||||
export multiaddress
|
||||
|
||||
const
|
||||
DcutrCodec* = "/libp2p/dcutr"
|
||||
|
||||
type
|
||||
MsgType* = enum
|
||||
Connect = 100
|
||||
Sync = 300
|
||||
|
||||
DcutrMsg* = object
|
||||
msgType*: MsgType
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
DcutrError* = object of LPError
|
||||
|
||||
proc encode*(msg: DcutrMsg): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, msg.msgType.uint)
|
||||
for addr in msg.addrs:
|
||||
result.write(2, addr)
|
||||
result.finish()
|
||||
|
||||
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, DcutrError].} =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
dcutrMsg: DcutrMsg
|
||||
var pb = initProtoBuffer(buf)
|
||||
var r1 = pb.getField(1, msgTypeOrd)
|
||||
let r2 = pb.getRepeatedField(2, dcutrMsg.addrs)
|
||||
if r1.isErr or r2.isErr or not checkedEnumAssign(dcutrMsg.msgType, msgTypeOrd):
|
||||
raise newException(DcutrError, "Received malformed message")
|
||||
return dcutrMsg
|
||||
|
||||
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
||||
addrs.filterIt(TCP.match(it))
|
||||
84
libp2p/protocols/connectivity/dcutr/server.nim
Normal file
84
libp2p/protocols/connectivity/dcutr/server.nim
Normal file
@@ -0,0 +1,84 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
|
||||
import core
|
||||
import ../../protocol,
|
||||
../../../stream/connection,
|
||||
../../../switch,
|
||||
../../../utils/future
|
||||
|
||||
export DcutrError
|
||||
export chronicles
|
||||
|
||||
type Dcutr* = ref object of LPProtocol
|
||||
|
||||
logScope:
|
||||
topics = "libp2p dcutr"
|
||||
|
||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
|
||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
debug "Dcutr receiver received a Connect message.", connectMsg
|
||||
|
||||
var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable
|
||||
if ourAddrs.len == 0:
|
||||
# this list should be the same as the peer's public addrs when it is reachable
|
||||
ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
|
||||
var ourDialableAddrs = getHolePunchableAddrs(ourAddrs)
|
||||
if ourDialableAddrs.len == 0:
|
||||
debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.", ourAddrs
|
||||
return
|
||||
|
||||
await stream.send(MsgType.Connect, ourAddrs)
|
||||
debug "Dcutr receiver has sent a Connect message back."
|
||||
let syncMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
debug "Dcutr receiver has received a Sync message.", syncMsg
|
||||
|
||||
peerDialableAddrs = getHolePunchableAddrs(connectMsg.addrs)
|
||||
if peerDialableAddrs.len == 0:
|
||||
debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectMsg.addrs
|
||||
return
|
||||
|
||||
if peerDialableAddrs.len > maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(connectTimeout)
|
||||
debug "Dcutr receiver has directly connected to the remote peer."
|
||||
finally:
|
||||
for fut in futs: fut.cancel()
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error in dcutr handler", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error in dcutr handler", err)
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
self.codec = DcutrCodec
|
||||
self
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -25,6 +25,7 @@ import ./relay,
|
||||
../../../multiaddress,
|
||||
../../../stream/connection
|
||||
|
||||
export options
|
||||
|
||||
logScope:
|
||||
topics = "libp2p relay relay-client"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import options, macros, sequtils
|
||||
import options, macros
|
||||
import stew/objects
|
||||
import ../../../peerinfo,
|
||||
../../../signed_envelope
|
||||
@@ -115,9 +115,10 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
|
||||
if r2.get(): rMsg.srcPeer = some(src)
|
||||
if r3.get(): rMsg.dstPeer = some(dst)
|
||||
if r4.get():
|
||||
if statusOrd.int notin StatusV1:
|
||||
var status: StatusV1
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(RelayMessage)
|
||||
rMsg.status = some(StatusV1(statusOrd))
|
||||
rMsg.status = some(status)
|
||||
some(rMsg)
|
||||
|
||||
# Voucher
|
||||
@@ -285,9 +286,10 @@ proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Option[HopMessage] =
|
||||
if r3.get(): msg.reservation = some(reservation)
|
||||
if r4.get(): msg.limit = limit
|
||||
if r5.get():
|
||||
if statusOrd.int notin StatusV2:
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(HopMessage)
|
||||
msg.status = some(StatusV2(statusOrd))
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
|
||||
# Circuit Relay V2 Stop Message
|
||||
@@ -364,7 +366,8 @@ proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Option[StopMessage] =
|
||||
if r2.get(): msg.peer = some(peer)
|
||||
if r3.get(): msg.limit = limit
|
||||
if r4.get():
|
||||
if statusOrd.int notin StatusV2:
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(StopMessage)
|
||||
msg.status = some(StatusV2(statusOrd))
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils, tables, sugar
|
||||
import options, sequtils, tables
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
@@ -25,7 +25,6 @@ import ./messages,
|
||||
../../../multicodec,
|
||||
../../../stream/connection,
|
||||
../../../protocols/protocol,
|
||||
../../../transports/transport,
|
||||
../../../errors,
|
||||
../../../utils/heartbeat,
|
||||
../../../signed_envelope
|
||||
@@ -101,7 +100,7 @@ proc createReserveResponse(
|
||||
status: some(Ok))
|
||||
return ok(msg)
|
||||
|
||||
proc isRelayed(conn: Connection): bool =
|
||||
proc isRelayed*(conn: Connection): bool =
|
||||
var wrappedConn = conn
|
||||
while not isNil(wrappedConn):
|
||||
if wrappedConn of RelayConnection:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -92,15 +92,15 @@ proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async,
|
||||
method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
|
||||
result = await self.dial(address)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
|
||||
if ma.protocols.isOk():
|
||||
let sma = toSeq(ma.items())
|
||||
if sma.len >= 3:
|
||||
result = CircuitRelay.match(sma[^2].get()) and
|
||||
P2PPattern.match(sma[^1].get())
|
||||
result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
|
||||
trace "Handles return", ma, result
|
||||
|
||||
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -26,7 +26,10 @@ import ../protobuf/minprotobuf,
|
||||
../multiaddress,
|
||||
../protocols/protocol,
|
||||
../utility,
|
||||
../errors
|
||||
../errors,
|
||||
../observedaddrmanager
|
||||
|
||||
export observedaddrmanager
|
||||
|
||||
logScope:
|
||||
topics = "libp2p identify"
|
||||
@@ -56,6 +59,7 @@ type
|
||||
Identify* = ref object of LPProtocol
|
||||
peerInfo*: PeerInfo
|
||||
sendSignedPeerRecord*: bool
|
||||
observedAddrManager*: ObservedAddrManager
|
||||
|
||||
IdentifyPushHandler* = proc (
|
||||
peer: PeerId,
|
||||
@@ -160,7 +164,8 @@ proc new*(
|
||||
): T =
|
||||
let identify = T(
|
||||
peerInfo: peerInfo,
|
||||
sendSignedPeerRecord: sendSignedPeerRecord
|
||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||
observedAddrManager: ObservedAddrManager.new(),
|
||||
)
|
||||
identify.init()
|
||||
identify
|
||||
@@ -182,7 +187,7 @@ method init*(p: Identify) =
|
||||
p.handler = handle
|
||||
p.codec = IdentifyCodec
|
||||
|
||||
proc identify*(p: Identify,
|
||||
proc identify*(self: Identify,
|
||||
conn: Connection,
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
trace "initiating identify", conn
|
||||
@@ -194,23 +199,25 @@ proc identify*(p: Identify,
|
||||
let infoOpt = decodeMsg(message)
|
||||
if infoOpt.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
result = infoOpt.get()
|
||||
|
||||
if result.pubkey.isSome:
|
||||
let peer = PeerId.init(result.pubkey.get())
|
||||
if peer.isErr:
|
||||
raise newException(IdentityInvalidMsgError, $peer.error)
|
||||
else:
|
||||
result.peerId = peer.get()
|
||||
if peer.get() != remotePeerId:
|
||||
trace "Peer ids don't match",
|
||||
remote = peer,
|
||||
local = remotePeerId
|
||||
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
else:
|
||||
var info = infoOpt.get()
|
||||
if info.pubkey.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
|
||||
let peer = PeerId.init(info.pubkey.get())
|
||||
if peer.isErr:
|
||||
raise newException(IdentityInvalidMsgError, $peer.error)
|
||||
|
||||
if peer.get() != remotePeerId:
|
||||
trace "Peer ids don't match", remote = peer, local = remotePeerId
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
info.peerId = peer.get()
|
||||
|
||||
if info.observedAddr.isSome:
|
||||
if not self.observedAddrManager.addObservation(info.observedAddr.get()):
|
||||
debug "Observed address is not valid", observedAddr = info.observedAddr.get()
|
||||
return info
|
||||
|
||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||
## Create a IdentifyPush protocol. `handler` will be called every time
|
||||
## a peer sends us new `PeerInfo`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -15,7 +15,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import bearssl/[rand, hash]
|
||||
import bearssl/rand
|
||||
import ../protobuf/minprotobuf,
|
||||
../peerinfo,
|
||||
../stream/connection,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,9 +12,14 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import chronos, stew/results
|
||||
import ../stream/connection
|
||||
|
||||
export results
|
||||
|
||||
const
|
||||
DefaultMaxIncomingStreams* = 10
|
||||
|
||||
type
|
||||
LPProtoHandler* = proc (
|
||||
conn: Connection,
|
||||
@@ -26,11 +31,17 @@ type
|
||||
codecs*: seq[string]
|
||||
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
|
||||
started*: bool
|
||||
maxIncomingStreams: Opt[int]
|
||||
|
||||
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
||||
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
||||
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
||||
|
||||
proc maxIncomingStreams*(p: LPProtocol): int =
|
||||
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
|
||||
|
||||
proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
|
||||
p.maxIncomingStreams = Opt.some(val)
|
||||
|
||||
func codec*(p: LPProtocol): string =
|
||||
assert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||
@@ -40,3 +51,16 @@ func `codec=`*(p: LPProtocol, codec: string) =
|
||||
# always insert as first codec
|
||||
# if we use this abstraction
|
||||
p.codecs.insert(codec, 0)
|
||||
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
|
||||
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
|
||||
T(
|
||||
codecs: codecs,
|
||||
handler: handler,
|
||||
maxIncomingStreams:
|
||||
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
|
||||
else: maxIncomingStreams
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, sets, hashes, tables]
|
||||
import std/[sets, hashes, tables]
|
||||
import chronos, chronicles, metrics
|
||||
import ./pubsub,
|
||||
./pubsubpeer,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -14,7 +14,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets, options, sequtils]
|
||||
import std/[sets, sequtils]
|
||||
import chronos, chronicles, metrics
|
||||
import ./pubsub,
|
||||
./floodsub,
|
||||
@@ -158,8 +158,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
||||
peer.appScore = stats.appScore
|
||||
peer.behaviourPenalty = stats.behaviourPenalty
|
||||
|
||||
peer.iWantBudget = IWantPeerBudget
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
@@ -340,7 +339,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
trace "forwared message to peers", peers = toSendPeers.len, msgId, peer
|
||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,13 +12,16 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils, sets, algorithm]
|
||||
import std/[tables, sequtils, sets, algorithm, deques]
|
||||
import chronos, chronicles, metrics
|
||||
import "."/[types, scoring]
|
||||
import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
|
||||
import ".."/[pubsubpeer, peertable, mcache, floodsub, pubsub]
|
||||
import "../rpc"/[messages]
|
||||
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
|
||||
|
||||
logScope:
|
||||
topics = "libp2p gossipsub"
|
||||
|
||||
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
|
||||
declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"])
|
||||
declareGauge(libp2p_gossipsub_peers_per_topic_fanout, "gossipsub peers per topic in fanout", labels = ["topic"])
|
||||
@@ -28,7 +31,7 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no
|
||||
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
|
||||
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
|
||||
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
||||
declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups")
|
||||
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
|
||||
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
|
||||
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
||||
@@ -127,6 +130,10 @@ proc handleGraft*(g: GossipSub,
|
||||
|
||||
continue
|
||||
|
||||
if g.mesh.hasPeer(topic, peer):
|
||||
trace "peer already in mesh", peer, topic
|
||||
continue
|
||||
|
||||
# Check backingOff
|
||||
# Ignore BackoffSlackTime here, since this only for outbound activity
|
||||
# and subtract a second time to avoid race conditions
|
||||
@@ -157,7 +164,8 @@ proc handleGraft*(g: GossipSub,
|
||||
# If they send us a graft before they send us a subscribe, what should
|
||||
# we do? For now, we add them to mesh but don't add them to gossipsub.
|
||||
if topic in g.topics:
|
||||
if g.mesh.peers(topic) < g.parameters.dHigh or peer.outbound:
|
||||
if g.mesh.peers(topic) < g.parameters.dHigh or
|
||||
(peer.outbound and g.mesh.outboundPeers(topic) < g.parameters.dOut):
|
||||
# In the spec, there's no mention of DHi here, but implicitly, a
|
||||
# peer will be removed from the mesh on next rebalance, so we don't want
|
||||
# this peer to push someone else out
|
||||
@@ -173,6 +181,10 @@ proc handleGraft*(g: GossipSub,
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64))
|
||||
|
||||
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
||||
g.backingOff
|
||||
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
||||
else:
|
||||
trace "peer grafting topic we're not interested in", peer, topic
|
||||
# gossip 1.1, we do not send a control message prune anymore
|
||||
@@ -269,28 +281,30 @@ proc handleIHave*(g: GossipSub,
|
||||
proc handleIWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
|
||||
var messages: seq[Message]
|
||||
var
|
||||
messages: seq[Message]
|
||||
invalidRequests = 0
|
||||
if peer.score < g.parameters.gossipThreshold:
|
||||
trace "iwant: ignoring low score peer", peer, score = peer.score
|
||||
elif peer.iWantBudget <= 0:
|
||||
trace "iwant: ignoring out of budget peer", peer, score = peer.score
|
||||
else:
|
||||
let deIwants = iwants.deduplicate()
|
||||
for iwant in deIwants:
|
||||
let deIwantsMsgs = iwant.messageIds.deduplicate()
|
||||
for mid in deIwantsMsgs:
|
||||
for iwant in iwants:
|
||||
for mid in iwant.messageIds:
|
||||
trace "peer sent iwant", peer, messageID = mid
|
||||
# canAskIWant will only return true once for a specific message
|
||||
if not peer.canAskIWant(mid):
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["notsent"])
|
||||
|
||||
invalidRequests.inc()
|
||||
if invalidRequests > 20:
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
|
||||
return messages
|
||||
continue
|
||||
let msg = g.mcache.get(mid)
|
||||
if msg.isSome:
|
||||
libp2p_gossipsub_mcache_hit.observe(1)
|
||||
# avoid spam
|
||||
if peer.iWantBudget > 0:
|
||||
messages.add(msg.get())
|
||||
dec peer.iWantBudget
|
||||
else:
|
||||
break
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
|
||||
messages.add(msg.get())
|
||||
else:
|
||||
libp2p_gossipsub_mcache_hit.observe(0)
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
|
||||
return messages
|
||||
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||
@@ -315,10 +329,11 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
var
|
||||
prunes, grafts: seq[PubSubPeer]
|
||||
npeers = g.mesh.peers(topic)
|
||||
nOutPeers = g.mesh.outboundPeers(topic)
|
||||
defaultMesh: HashSet[PubSubPeer]
|
||||
backingOff = g.backingOff.getOrDefault(topic)
|
||||
|
||||
if npeers < g.parameters.dLow:
|
||||
if npeers < g.parameters.dLow:
|
||||
trace "replenishing mesh", peers = npeers
|
||||
# replenish the mesh if we're below Dlo
|
||||
|
||||
@@ -357,7 +372,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
g.fanout.removePeer(topic, peer)
|
||||
grafts &= peer
|
||||
|
||||
else:
|
||||
elif nOutPeers < g.parameters.dOut:
|
||||
trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic)
|
||||
|
||||
var
|
||||
@@ -385,8 +400,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# sort peers by score, high score first, we are grafting
|
||||
candidates.sort(byScore, SortOrder.Descending)
|
||||
|
||||
# Graft peers so we reach a count of D
|
||||
candidates.setLen(min(candidates.len, g.parameters.dOut))
|
||||
# Graft outgoing peers so we reach a count of dOut
|
||||
candidates.setLen(min(candidates.len, g.parameters.dOut - nOutPeers))
|
||||
|
||||
trace "grafting outbound peers", topic, peers = candidates.len
|
||||
|
||||
@@ -613,8 +628,11 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
g.rng.shuffle(allPeers)
|
||||
allPeers.setLen(target)
|
||||
|
||||
let msgIdsAsSet = ihave.messageIds.toHashSet()
|
||||
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
peer.sentIHaves[^1].incl(msgIdsAsSet)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
@@ -625,7 +643,9 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
# reset IHAVE cap
|
||||
block:
|
||||
for peer in g.peers.values:
|
||||
peer.iWantBudget = IWantPeerBudget
|
||||
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
if peer.sentIHaves.len > g.parameters.historyLength:
|
||||
discard peer.sentIHaves.popLast()
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
|
||||
var meshMetrics = MeshMetrics()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -16,7 +16,10 @@ import std/[tables, sets, options]
|
||||
import chronos, chronicles, metrics
|
||||
import "."/[types]
|
||||
import ".."/[pubsubpeer]
|
||||
import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat]
|
||||
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
|
||||
|
||||
logScope:
|
||||
topics = "libp2p gossipsub"
|
||||
|
||||
declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"])
|
||||
declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"])
|
||||
@@ -247,7 +250,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and
|
||||
peer.peerId notin g.parameters.directPeers:
|
||||
debug "disconnecting bad score peer", peer, score = peer.score
|
||||
asyncSpawn(try: g.disconnectPeer(peer) except Exception as exc: raiseAssert exc.msg)
|
||||
asyncSpawn(g.disconnectPeer(peer))
|
||||
|
||||
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
|
||||
|
||||
@@ -292,11 +295,11 @@ proc rewardDelivered*(
|
||||
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(tt, tstats):
|
||||
if tstats[].inMesh:
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
|
||||
if tstats[].inMesh:
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't loose this information
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -13,11 +13,13 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import std/[tables, sets]
|
||||
import std/[options, tables, sets]
|
||||
import ".."/[floodsub, peertable, mcache, pubsubpeer]
|
||||
import "../rpc"/[messages]
|
||||
import "../../.."/[peerid, multiaddress, utility]
|
||||
|
||||
export options, tables, sets
|
||||
|
||||
const
|
||||
GossipSubCodec* = "/meshsub/1.1.0"
|
||||
GossipSubCodec_10* = "/meshsub/1.0.0"
|
||||
@@ -46,7 +48,6 @@ const
|
||||
|
||||
const
|
||||
BackoffSlackTime* = 2 # seconds
|
||||
IWantPeerBudget* = 25 # 25 messages per second ( reset every heartbeat )
|
||||
IHavePeerBudget* = 10
|
||||
# the max amount of IHave to expose, not by spec, but go as example
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,9 +12,11 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets]
|
||||
import std/[tables, sets, sequtils]
|
||||
import ./pubsubpeer, ../../peerid
|
||||
|
||||
export tables, sets
|
||||
|
||||
type
|
||||
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
|
||||
|
||||
@@ -51,3 +53,10 @@ func peers*(table: PeerTable, topic: string): int =
|
||||
except KeyError: raiseAssert "checked with in"
|
||||
else:
|
||||
0
|
||||
|
||||
func outboundPeers*(table: PeerTable, topic: string): int =
|
||||
if topic in table:
|
||||
try: table[topic].countIt(it.outbound)
|
||||
except KeyError: raiseAssert "checked with in"
|
||||
else:
|
||||
0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -36,6 +36,7 @@ import metrics
|
||||
import stew/results
|
||||
export results
|
||||
|
||||
export tables, sets
|
||||
export PubSubPeer
|
||||
export PubSubObserver
|
||||
export protocol
|
||||
@@ -118,7 +119,7 @@ type
|
||||
anonymize*: bool ## if we omit fromPeer and seqno from RPC messages we send
|
||||
subscriptionValidator*: SubscriptionValidator # callback used to validate subscriptions
|
||||
topicsHigh*: int ## the maximum number of topics a peer is allowed to subscribe to
|
||||
maxMessageSize*: int ##\
|
||||
maxMessageSize*: int ##\
|
||||
## the maximum raw message size we'll globally allow
|
||||
## for finer tuning, check message size on topic validator
|
||||
##
|
||||
@@ -130,7 +131,7 @@ type
|
||||
|
||||
knownTopics*: HashSet[string]
|
||||
|
||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
|
||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
## handle peer disconnects
|
||||
##
|
||||
|
||||
@@ -202,7 +203,7 @@ proc broadcast*(
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
peer.sendEncoded(encoded)
|
||||
asyncSpawn peer.sendEncoded(encoded)
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
@@ -307,8 +308,6 @@ proc getOrCreatePeer*(
|
||||
# metrics
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
pubSubPeer.connect()
|
||||
|
||||
return pubSubPeer
|
||||
|
||||
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
||||
@@ -377,12 +376,13 @@ method handleConn*(p: PubSub,
|
||||
finally:
|
||||
await conn.closeWithEOF()
|
||||
|
||||
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
|
||||
method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} =
|
||||
## subscribe to remote peer to receive/send pubsub
|
||||
## messages
|
||||
##
|
||||
|
||||
discard p.getOrCreatePeer(peer, p.codecs)
|
||||
let pubSubPeer = p.getOrCreatePeer(peer, p.codecs)
|
||||
pubSubPeer.connect()
|
||||
|
||||
proc updateTopicMetrics(p: PubSub, topic: string) =
|
||||
# metrics
|
||||
@@ -400,13 +400,17 @@ proc updateTopicMetrics(p: PubSub, topic: string) =
|
||||
|
||||
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
|
||||
|
||||
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base.} =
|
||||
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, gcsafe.} =
|
||||
# Called when subscribe is called the first time for a topic or unsubscribe
|
||||
# removes the last handler
|
||||
|
||||
# Notify others that we are no longer interested in the topic
|
||||
for _, peer in p.peers:
|
||||
p.sendSubs(peer, [topic], subscribed)
|
||||
# If we don't have a sendConn yet, we will
|
||||
# send the full sub list when we get the sendConn,
|
||||
# so no need to send it here
|
||||
if peer.hasSendConn:
|
||||
p.sendSubs(peer, [topic], subscribed)
|
||||
|
||||
if subscribed:
|
||||
libp2p_pubsub_subscriptions.inc()
|
||||
@@ -433,7 +437,7 @@ proc unsubscribe*(p: PubSub, topics: openArray[TopicPair]) {.public.} =
|
||||
for t in topics:
|
||||
p.unsubscribe(t.topic, t.handler)
|
||||
|
||||
proc unsubscribeAll*(p: PubSub, topic: string) {.public.} =
|
||||
proc unsubscribeAll*(p: PubSub, topic: string) {.public, gcsafe.} =
|
||||
## unsubscribe every `handler` from `topic`
|
||||
if topic notin p.topics:
|
||||
debug "unsubscribeAll called for an unknown topic", topic
|
||||
@@ -495,7 +499,7 @@ method initPubSub*(p: PubSub)
|
||||
|
||||
method addValidator*(p: PubSub,
|
||||
topic: varargs[string],
|
||||
hook: ValidatorHandler) {.base, public.} =
|
||||
hook: ValidatorHandler) {.base, public, gcsafe.} =
|
||||
## Add a validator to a `topic`. Each new message received in this
|
||||
## will be sent to `hook`. `hook` can return either `Accept`,
|
||||
## `Ignore` or `Reject` (which can descore the peer)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils, tables, hashes, options]
|
||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||
import stew/results
|
||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||
import rpc/[messages, message, protobuf],
|
||||
@@ -55,24 +55,31 @@ type
|
||||
onEvent*: OnEvent # Connectivity updates for peer
|
||||
codec*: string # the protocol that this peer joined from
|
||||
sendConn*: Connection # cached send connection
|
||||
connectedFut: Future[void]
|
||||
address*: Option[MultiAddress]
|
||||
peerId*: PeerId
|
||||
handler*: RPCHandler
|
||||
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
|
||||
|
||||
score*: float64
|
||||
iWantBudget*: int
|
||||
sentIHaves*: Deque[HashSet[MessageId]]
|
||||
iHaveBudget*: int
|
||||
maxMessageSize: int
|
||||
appScore*: float64 # application specific score
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
shortAgent*: string
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
func shortAgent*(p: PubSubPeer): string =
|
||||
if p.sendConn.isNil or p.sendConn.getWrapped().isNil:
|
||||
"unknown"
|
||||
else:
|
||||
#TODO the sendConn is setup before identify,
|
||||
#so we have to read the parents short agent..
|
||||
p.sendConn.getWrapped().shortAgent
|
||||
|
||||
func hash*(p: PubSubPeer): Hash =
|
||||
p.peerId.hash
|
||||
|
||||
@@ -165,6 +172,8 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
|
||||
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
try:
|
||||
if p.connectedFut.finished:
|
||||
p.connectedFut = newFuture[void]()
|
||||
let newConn = await p.getConn()
|
||||
if newConn.isNil:
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
@@ -174,6 +183,11 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
# stop working so we make an effort to only keep a single channel alive
|
||||
|
||||
trace "Get new send connection", p, newConn
|
||||
|
||||
# Careful to race conditions here.
|
||||
# Topic subscription relies on either connectedFut
|
||||
# to be completed, or onEvent to be called later
|
||||
p.connectedFut.complete()
|
||||
p.sendConn = newConn
|
||||
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
|
||||
|
||||
@@ -208,27 +222,13 @@ proc connectImpl(p: PubSubPeer) {.async.} =
|
||||
debug "Could not establish send connection", msg = exc.msg
|
||||
|
||||
proc connect*(p: PubSubPeer) =
|
||||
if p.connected:
|
||||
return
|
||||
|
||||
asyncSpawn connectImpl(p)
|
||||
|
||||
proc sendImpl(conn: Connection, encoded: seq[byte]): Future[void] {.raises: [Defect].} =
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(encoded)
|
||||
|
||||
let fut = conn.writeLp(encoded) # Avoid copying `encoded` into future
|
||||
proc sendWaiter(): Future[void] {.async.} =
|
||||
try:
|
||||
await fut
|
||||
trace "sent pubsub message to remote", conn
|
||||
|
||||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
# asyncSpawn, no exceptions may leak out of it
|
||||
trace "Unable to send to remote", conn, msg = exc.msg
|
||||
# Next time sendConn is used, it will be have its close flag set and thus
|
||||
# will be recycled
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
return sendWaiter()
|
||||
proc hasSendConn*(p: PubSubPeer): bool =
|
||||
p.sendConn != nil
|
||||
|
||||
template sendMetrics(msg: RPCMsg): untyped =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
@@ -237,7 +237,7 @@ template sendMetrics(msg: RPCMsg): untyped =
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} =
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
if msg.len <= 0:
|
||||
@@ -248,14 +248,27 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} =
|
||||
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
|
||||
let conn = p.sendConn
|
||||
if p.sendConn == nil:
|
||||
discard await p.connectedFut.withTimeout(1.seconds)
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
trace "No send connection, skipping message", p, msg = shortLog(msg)
|
||||
debug "No send connection, skipping message", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
# To limit the size of the closure, we only pass the encoded message and
|
||||
# connection to the spawned send task
|
||||
asyncSpawn sendImpl(conn, msg)
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
|
||||
|
||||
try:
|
||||
await conn.writeLp(msg)
|
||||
trace "sent pubsub message to remote", conn
|
||||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
# asyncSpawn, no exceptions may leak out of it
|
||||
trace "Unable to send to remote", conn, msg = exc.msg
|
||||
# Next time sendConn is used, it will be have its close flag set and thus
|
||||
# will be recycled
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
@@ -277,7 +290,14 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
sendMetrics(msg)
|
||||
encodeRpcMsg(msg, anonymize)
|
||||
|
||||
p.sendEncoded(encoded)
|
||||
asyncSpawn p.sendEncoded(encoded)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
if msgId in sentIHave:
|
||||
sentIHave.excl(msgId)
|
||||
return true
|
||||
return false
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
@@ -287,10 +307,12 @@ proc new*(
|
||||
codec: string,
|
||||
maxMessageSize: int): T =
|
||||
|
||||
T(
|
||||
result = T(
|
||||
getConn: getConn,
|
||||
onEvent: onEvent,
|
||||
codec: codec,
|
||||
peerId: peerId,
|
||||
connectedFut: newFuture[void](),
|
||||
maxMessageSize: maxMessageSize
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,6 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import hashes
|
||||
import chronicles, metrics, stew/[byteutils, endians2]
|
||||
import ./messages,
|
||||
./protobuf,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -22,7 +22,7 @@ import messages,
|
||||
|
||||
|
||||
logScope:
|
||||
topics = "pubsubprotobuf"
|
||||
topics = "libp2p pubsubprotobuf"
|
||||
|
||||
when defined(libp2p_protobuf_metrics):
|
||||
import metrics
|
||||
@@ -304,14 +304,15 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
|
||||
if ? pb.getRepeatedField(2, msgpbs):
|
||||
trace "decodeMessages: read messages", count = len(msgpbs)
|
||||
for item in msgpbs:
|
||||
msgs.add(? decodeMessage(initProtoBuffer(item)))
|
||||
# size is constrained at the network level
|
||||
msgs.add(? decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
|
||||
else:
|
||||
trace "decodeMessages: no messages found"
|
||||
ok(msgs)
|
||||
|
||||
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
|
||||
var pb = initProtoBuffer()
|
||||
var pb = initProtoBuffer(maxSize = uint.high)
|
||||
for item in msg.subscriptions:
|
||||
pb.write(1, item)
|
||||
for item in msg.messages:
|
||||
@@ -324,7 +325,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
|
||||
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
||||
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
||||
var pb = initProtoBuffer(msg)
|
||||
var pb = initProtoBuffer(msg, maxSize = uint.high)
|
||||
var rpcMsg = ok(RPCMsg())
|
||||
assign(rpcMsg.get().messages, ? pb.decodeMessages())
|
||||
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -14,7 +14,7 @@ else:
|
||||
|
||||
import std/[tables]
|
||||
|
||||
import chronos/timer
|
||||
import chronos/timer, stew/results
|
||||
|
||||
const Timeout* = 10.seconds # default timeout in ms
|
||||
|
||||
@@ -22,6 +22,7 @@ type
|
||||
TimedEntry*[K] = ref object of RootObj
|
||||
key: K
|
||||
addedAt: Moment
|
||||
expiresAt: Moment
|
||||
next, prev: TimedEntry[K]
|
||||
|
||||
TimedCache*[K] = object of RootObj
|
||||
@@ -30,15 +31,14 @@ type
|
||||
timeout: Duration
|
||||
|
||||
func expire*(t: var TimedCache, now: Moment = Moment.now()) =
|
||||
let expirationLimit = now - t.timeout
|
||||
while t.head != nil and t.head.addedAt < expirationLimit:
|
||||
while t.head != nil and t.head.expiresAt < now:
|
||||
t.entries.del(t.head.key)
|
||||
t.head.prev = nil
|
||||
t.head = t.head.next
|
||||
if t.head == nil: t.tail = nil
|
||||
|
||||
func del*[K](t: var TimedCache[K], key: K): bool =
|
||||
# Removes existing key from cache, returning false if it was not present
|
||||
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
|
||||
# Removes existing key from cache, returning the previous value if present
|
||||
var item: TimedEntry[K]
|
||||
if t.entries.pop(key, item):
|
||||
if t.head == item: t.head = item.next
|
||||
@@ -46,9 +46,9 @@ func del*[K](t: var TimedCache[K], key: K): bool =
|
||||
|
||||
if item.next != nil: item.next.prev = item.prev
|
||||
if item.prev != nil: item.prev.next = item.next
|
||||
true
|
||||
Opt.some(item)
|
||||
else:
|
||||
false
|
||||
Opt.none(TimedEntry[K])
|
||||
|
||||
func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
# Puts k in cache, returning true if the item was already present and false
|
||||
@@ -56,9 +56,13 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
# refreshed.
|
||||
t.expire(now)
|
||||
|
||||
var res = t.del(k) # Refresh existing item
|
||||
var previous = t.del(k) # Refresh existing item
|
||||
|
||||
let node = TimedEntry[K](key: k, addedAt: now)
|
||||
let addedAt =
|
||||
if previous.isSome: previous.get().addedAt
|
||||
else: now
|
||||
|
||||
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
|
||||
|
||||
if t.head == nil:
|
||||
t.tail = node
|
||||
@@ -66,7 +70,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
else:
|
||||
# search from tail because typically that's where we add when now grows
|
||||
var cur = t.tail
|
||||
while cur != nil and node.addedAt < cur.addedAt:
|
||||
while cur != nil and node.expiresAt < cur.expiresAt:
|
||||
cur = cur.prev
|
||||
|
||||
if cur == nil:
|
||||
@@ -82,7 +86,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
|
||||
t.entries[k] = node
|
||||
|
||||
res
|
||||
previous.isSome()
|
||||
|
||||
func contains*[K](t: TimedCache[K], k: K): bool =
|
||||
k in t.entries
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import std/strformat
|
||||
import chronos
|
||||
import chronicles
|
||||
import bearssl/[rand, hash]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -56,7 +56,6 @@ proc new*(T: type SecureConn,
|
||||
peerId: peerId,
|
||||
observedAddr: observedAddr,
|
||||
closeEvent: conn.closeEvent,
|
||||
upgraded: conn.upgraded,
|
||||
timeout: timeout,
|
||||
dir: conn.dir)
|
||||
result.initStream()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -51,10 +51,12 @@ proc decode*(
|
||||
for address in addressInfos:
|
||||
var addressInfo = AddressInfo()
|
||||
let subProto = initProtoBuffer(address)
|
||||
if ? subProto.getField(1, addressInfo.address) == false:
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
let f = subProto.getField(1, addressInfo.address)
|
||||
if f.isOk() and f.get():
|
||||
record.addresses &= addressInfo
|
||||
|
||||
record.addresses &= addressInfo
|
||||
if record.addresses.len == 0:
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
|
||||
ok(record)
|
||||
|
||||
|
||||
147
libp2p/services/autorelayservice.nim
Normal file
147
libp2p/services/autorelayservice.nim
Normal file
@@ -0,0 +1,147 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles, times, tables, sequtils
|
||||
import ../switch,
|
||||
../protocols/connectivity/relay/[client, utils]
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autorelay"
|
||||
|
||||
type
|
||||
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
|
||||
|
||||
AutoRelayService* = ref object of Service
|
||||
running: bool
|
||||
runner: Future[void]
|
||||
client: RelayClient
|
||||
numRelays: int
|
||||
relayPeers: Table[PeerId, Future[void]]
|
||||
relayAddresses: Table[PeerId, seq[MultiAddress]]
|
||||
backingOff: seq[PeerId]
|
||||
peerAvailable: AsyncEvent
|
||||
onReservation: OnReservationHandler
|
||||
addressMapper: AddressMapper
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
proc addressMapper(
|
||||
self: AutoRelayService,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
return concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||
while self.running:
|
||||
let
|
||||
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
|
||||
relayedAddr = rsvp.addrs.mapIt(
|
||||
MultiAddress.init($it & "/p2p-circuit").tryGet())
|
||||
ttl = rsvp.expire.int64 - times.now().utc.toTime.toUnix
|
||||
if ttl <= 60:
|
||||
# A reservation under a minute is basically useless
|
||||
break
|
||||
if relayPid notin self.relayAddresses or self.relayAddresses[relayPid] != relayedAddr:
|
||||
self.relayAddresses[relayPid] = relayedAddr
|
||||
await switch.peerInfo.update()
|
||||
debug "Updated relay addresses", relayPid, relayedAddr
|
||||
if not self.onReservation.isNil():
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
await sleepAsync chronos.seconds(ttl - 30)
|
||||
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
return await addressMapper(self, listenAddrs)
|
||||
|
||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||
if hasBeenSetUp:
|
||||
proc handlePeerJoined(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
trace "Peer Joined", peerId
|
||||
if self.relayPeers.len < self.numRelays:
|
||||
self.peerAvailable.fire()
|
||||
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
trace "Peer Left", peerId
|
||||
self.relayPeers.withValue(peerId, future):
|
||||
future[].cancel()
|
||||
switch.addPeerEventHandler(handlePeerJoined, Joined)
|
||||
switch.addPeerEventHandler(handlePeerLeft, Left)
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
await self.run(switch)
|
||||
return hasBeenSetUp
|
||||
|
||||
proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
||||
await sleepAsync(chronos.seconds(5))
|
||||
self.backingOff.keepItIf(it != pid)
|
||||
self.peerAvailable.fire()
|
||||
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
while true:
|
||||
# Remove relayPeers that failed
|
||||
let peers = toSeq(self.relayPeers.keys())
|
||||
for k in peers:
|
||||
if self.relayPeers[k].finished():
|
||||
self.relayPeers.del(k)
|
||||
self.relayAddresses.del(k)
|
||||
if not self.onReservation.isNil():
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
# To avoid ddosing our peers in certain conditions
|
||||
self.backingOff.add(k)
|
||||
asyncSpawn self.manageBackedOff(k)
|
||||
|
||||
# Get all connected relayPeers
|
||||
self.peerAvailable.clear()
|
||||
var connectedPeers = switch.connectedPeers(Direction.Out)
|
||||
connectedPeers.keepItIf(RelayV2HopCodec in switch.peerStore[ProtoBook][it] and
|
||||
it notin self.relayPeers and
|
||||
it notin self.backingOff)
|
||||
self.rng.shuffle(connectedPeers)
|
||||
|
||||
for relayPid in connectedPeers:
|
||||
if self.relayPeers.len() >= self.numRelays:
|
||||
break
|
||||
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
|
||||
|
||||
if self.relayPeers.len() > 0:
|
||||
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
|
||||
else:
|
||||
await self.peerAvailable.wait()
|
||||
await sleepAsync(200.millis)
|
||||
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
if self.running:
|
||||
trace "Autorelay is already running"
|
||||
return
|
||||
self.running = true
|
||||
self.runner = self.innerRun(switch)
|
||||
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
self.running = false
|
||||
self.runner.cancel()
|
||||
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
|
||||
await switch.peerInfo.update()
|
||||
return hasBeenStopped
|
||||
|
||||
proc getAddresses*(self: AutoRelayService): seq[MultiAddress] =
|
||||
result = concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc new*(T: typedesc[AutoRelayService],
|
||||
numRelays: int,
|
||||
client: RelayClient,
|
||||
onReservation: OnReservationHandler,
|
||||
rng: ref HmacDrbgContext): T =
|
||||
T(numRelays: numRelays,
|
||||
client: client,
|
||||
onReservation: onReservation,
|
||||
peerAvailable: newAsyncEvent(),
|
||||
rng: rng)
|
||||
129
libp2p/services/hpservice.nim
Normal file
129
libp2p/services/hpservice.nim
Normal file
@@ -0,0 +1,129 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils]
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
import ../switch, ../wire
|
||||
import ../protocols/rendezvous
|
||||
import ../services/autorelayservice
|
||||
import ../protocols/connectivity/relay/relay
|
||||
import ../protocols/connectivity/autonat/service
|
||||
import ../protocols/connectivity/dcutr/[client, server]
|
||||
import ../multicodec
|
||||
|
||||
logScope:
|
||||
topics = "libp2p hpservice"
|
||||
|
||||
type
|
||||
HPService* = ref object of Service
|
||||
newConnectedPeerHandler: PeerEventHandler
|
||||
onNewStatusHandler: StatusAndConfidenceHandler
|
||||
autoRelayService: AutoRelayService
|
||||
autonatService: AutonatService
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc
|
||||
|
||||
IsPublicIPAddrProc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService,
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc = isGlobal): T =
|
||||
return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc)
|
||||
|
||||
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
|
||||
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
|
||||
debug "Trying to create direct connection", peerId, address
|
||||
await switch.connect(peerId, @[address], true, false)
|
||||
debug "Direct connection created."
|
||||
return true
|
||||
|
||||
await sleepAsync(500.milliseconds) # wait for AddressBook to be populated
|
||||
for address in switch.peerStore[AddressBook][peerId]:
|
||||
try:
|
||||
let isRelayed = address.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
if DNS.matchPartial(address):
|
||||
return await tryConnect(address)
|
||||
else:
|
||||
let ta = initTAddress(address)
|
||||
if ta.isOk() and self.isPublicIPAddrProc(ta.get()):
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
continue
|
||||
return false
|
||||
|
||||
proc closeRelayConn(relayedConn: Connection) {.async.} =
|
||||
await sleepAsync(2000.milliseconds) # grace period before closing relayed connection
|
||||
await relayedConn.close()
|
||||
|
||||
proc newConnectedPeerHandler(self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
try:
|
||||
# Get all connections to the peer. If there is at least one non-relayed connection, return.
|
||||
let connections = switch.connManager.getConnections()[peerId].mapIt(it.connection)
|
||||
if connections.anyIt(not isRelayed(it)):
|
||||
return
|
||||
let incomingRelays = connections.filterIt(it.transportDir == Direction.In)
|
||||
if incomingRelays.len == 0:
|
||||
return
|
||||
|
||||
let relayedConn = incomingRelays[0]
|
||||
|
||||
if await self.tryStartingDirectConn(switch, peerId):
|
||||
await closeRelayConn(relayedConn)
|
||||
return
|
||||
|
||||
let dcutrClient = DcutrClient.new()
|
||||
var natAddrs = switch.peerStore.getMostObservedProtosAndPorts()
|
||||
if natAddrs.len == 0:
|
||||
natAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
|
||||
await dcutrClient.startSync(switch, peerId, natAddrs)
|
||||
await closeRelayConn(relayedConn)
|
||||
except CatchableError as err:
|
||||
debug "Hole punching failed during dcutr", err = err.msg
|
||||
|
||||
method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
||||
var hasBeenSetup = await procCall Service(self).setup(switch)
|
||||
hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch)
|
||||
|
||||
if hasBeenSetup:
|
||||
let dcutrProto = Dcutr.new(switch)
|
||||
switch.mount(dcutrProto)
|
||||
|
||||
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
await newConnectedPeerHandler(self, switch, peerId, event)
|
||||
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable:
|
||||
discard await self.autoRelayService.setup(switch)
|
||||
elif networkReachability == NetworkReachability.Reachable:
|
||||
discard await self.autoRelayService.stop(switch)
|
||||
|
||||
# We do it here instead of in the AutonatService because this is useful only when hole punching.
|
||||
for t in switch.transports:
|
||||
t.networkReachability = networkReachability
|
||||
|
||||
self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler)
|
||||
return hasBeenSetup
|
||||
|
||||
method run*(self: HPService, switch: Switch) {.async, public.} =
|
||||
await self.autonatService.run(switch)
|
||||
|
||||
method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} =
|
||||
discard await self.autonatService.stop(switch)
|
||||
if not isNil(self.newConnectedPeerHandler):
|
||||
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -18,9 +18,6 @@ import chronos, chronicles, metrics
|
||||
import ../stream/connection
|
||||
import ./streamseq
|
||||
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
import oids
|
||||
|
||||
export connection
|
||||
|
||||
logScope:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
import chronos, chronicles, metrics
|
||||
import connection
|
||||
@@ -103,11 +103,11 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
|
||||
withExceptions:
|
||||
result = await s.client.readOnce(pbytes, nbytes)
|
||||
s.activity = true # reset activity flag
|
||||
libp2p_network_bytes.inc(nbytes.int64, labelValues = ["in"])
|
||||
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
|
||||
when defined(libp2p_agents_metrics):
|
||||
s.trackPeerIdentity()
|
||||
if s.tracked:
|
||||
libp2p_peers_traffic_read.inc(nbytes.int64, labelValues = [s.shortAgent])
|
||||
libp2p_peers_traffic_read.inc(result.int64, labelValues = [s.shortAgent])
|
||||
|
||||
proc completeWrite(
|
||||
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -39,7 +39,6 @@ type
|
||||
timeoutHandler*: TimeoutHandler # timeout handler
|
||||
peerId*: PeerId
|
||||
observedAddr*: Opt[MultiAddress]
|
||||
upgraded*: Future[void]
|
||||
protocol*: string # protocol used by the connection, used as tag for metrics
|
||||
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||
when defined(libp2p_agents_metrics):
|
||||
@@ -47,22 +46,6 @@ type
|
||||
|
||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
||||
|
||||
proc isUpgraded*(s: Connection): bool =
|
||||
if not isNil(s.upgraded):
|
||||
return s.upgraded.finished
|
||||
|
||||
proc upgrade*(s: Connection, failed: ref CatchableError = nil) =
|
||||
if not isNil(s.upgraded):
|
||||
if not isNil(failed):
|
||||
s.upgraded.fail(failed)
|
||||
return
|
||||
|
||||
s.upgraded.complete()
|
||||
|
||||
proc onUpgrade*(s: Connection) {.async.} =
|
||||
if not isNil(s.upgraded):
|
||||
await s.upgraded
|
||||
|
||||
func shortLog*(conn: Connection): string =
|
||||
try:
|
||||
if conn.isNil: "Connection(nil)"
|
||||
@@ -80,9 +63,6 @@ method initStream*(s: Connection) =
|
||||
|
||||
doAssert(isNil(s.timerTaskFut))
|
||||
|
||||
if isNil(s.upgraded):
|
||||
s.upgraded = newFuture[void]()
|
||||
|
||||
if s.timeout > 0.millis:
|
||||
trace "Monitoring for timeout", s, timeout = s.timeout
|
||||
|
||||
@@ -100,10 +80,6 @@ method closeImpl*(s: Connection): Future[void] =
|
||||
s.timerTaskFut.cancel()
|
||||
s.timerTaskFut = nil
|
||||
|
||||
if not isNil(s.upgraded) and not s.upgraded.finished:
|
||||
s.upgraded.cancel()
|
||||
s.upgraded = nil
|
||||
|
||||
trace "Closed connection", s
|
||||
|
||||
procCall LPStream(s).closeImpl()
|
||||
@@ -158,6 +134,13 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||
method getWrapped*(s: Connection): Connection {.base.} =
|
||||
doAssert(false, "not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
proc setShortAgent*(s: Connection, shortAgent: string) =
|
||||
var conn = s
|
||||
while not isNil(conn):
|
||||
conn.shortAgent = shortAgent
|
||||
conn = conn.getWrapped()
|
||||
|
||||
proc new*(C: type Connection,
|
||||
peerId: PeerId,
|
||||
dir: Direction,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -80,7 +80,7 @@ type
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
|
||||
proc setupStreamTracker(name: string): StreamTracker =
|
||||
proc setupStreamTracker*(name: string): StreamTracker =
|
||||
let tracker = new StreamTracker
|
||||
|
||||
proc dumpTracking(): string {.gcsafe.} =
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user