mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 13:58:17 -05:00
Compare commits
21 Commits
race-cance
...
v1.7.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
11e386522b | ||
|
|
8855bce085 | ||
|
|
ed5670408b | ||
|
|
97192a3c80 | ||
|
|
294d06323c | ||
|
|
a3b8729cbe | ||
|
|
6c970911f2 | ||
|
|
5d48776b02 | ||
|
|
d389d96789 | ||
|
|
09fe199b6b | ||
|
|
68306cf1f1 | ||
|
|
b37133ca43 | ||
|
|
3e3df07269 | ||
|
|
1771534030 | ||
|
|
21a444197c | ||
|
|
966996542e | ||
|
|
8070b21825 | ||
|
|
d98152f266 | ||
|
|
47a51983b5 | ||
|
|
70754cd575 | ||
|
|
a1811e7395 |
6
.github/actions/install_nim/action.yml
vendored
6
.github/actions/install_nim/action.yml
vendored
@@ -6,7 +6,7 @@ inputs:
|
||||
cpu:
|
||||
description: "CPU to build for"
|
||||
default: "amd64"
|
||||
nim_branch:
|
||||
nim_ref:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
shell:
|
||||
@@ -117,7 +117,7 @@ runs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '${{ github.workspace }}/nim'
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_ref }}-cache-${{ env.cache_nonce }}
|
||||
|
||||
- name: Build Nim and Nimble
|
||||
shell: ${{ inputs.shell }}
|
||||
@@ -126,6 +126,6 @@ runs:
|
||||
# We don't want partial matches of the cache restored
|
||||
rm -rf nim
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_ref }} \
|
||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
|
||||
19
.github/workflows/ci.yml
vendored
19
.github/workflows/ci.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -28,10 +29,13 @@ jobs:
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
nim:
|
||||
- branch: version-1-6
|
||||
nim:
|
||||
- ref: version-1-6
|
||||
memory_management: refc
|
||||
- branch: version-2-0
|
||||
# The ref below corresponds to the branch "version-2-0".
|
||||
# Right before an update from Nimble 0.16.1 to 0.16.2.
|
||||
# That update breaks our dependency resolution.
|
||||
- ref: 8754469f4947844c5938f56e1fba846c349354b5
|
||||
memory_management: refc
|
||||
include:
|
||||
- platform:
|
||||
@@ -55,7 +59,7 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.branch }})'
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -69,7 +73,7 @@ jobs:
|
||||
os: ${{ matrix.platform.os }}
|
||||
cpu: ${{ matrix.platform.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.nim.branch }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
@@ -85,9 +89,9 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
# Using nim.branch as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# The change happened on Nimble v0.14.0.
|
||||
key: nimbledeps-${{ matrix.nim.branch }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
@@ -108,5 +112,6 @@ jobs:
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
1
.github/workflows/coverage.yml
vendored
1
.github/workflows/coverage.yml
vendored
@@ -6,6 +6,7 @@ on:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
2
.github/workflows/daily_amd64.yml
vendored
2
.github/workflows/daily_amd64.yml
vendored
@@ -10,5 +10,5 @@ jobs:
|
||||
name: Daily amd64
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
|
||||
22
.github/workflows/daily_common.yml
vendored
22
.github/workflows/daily_common.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
type: string # Following this format: [{"branch": ..., "memory_management": ...}, ...]
|
||||
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
@@ -17,6 +17,11 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
use_sat_solver:
|
||||
description: 'Install dependencies with SAT Solver'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@@ -53,9 +58,8 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.branch }})'
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.nim.branch == 'devel' || matrix.nim.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -65,7 +69,7 @@ jobs:
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.nim.branch }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
@@ -86,4 +90,12 @@ jobs:
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}" nimble test
|
||||
|
||||
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
|
||||
dependency_solver="sat"
|
||||
else
|
||||
dependency_solver="legacy"
|
||||
fi
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
nimble test
|
||||
|
||||
2
.github/workflows/daily_devel.yml
vendored
2
.github/workflows/daily_devel.yml
vendored
@@ -10,5 +10,5 @@ jobs:
|
||||
name: Daily Nim Devel
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'branch': 'devel', 'memory_management': 'orc'}]"
|
||||
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['amd64']"
|
||||
|
||||
2
.github/workflows/daily_i386.yml
vendored
2
.github/workflows/daily_i386.yml
vendored
@@ -10,6 +10,6 @@ jobs:
|
||||
name: Daily i386 (Linux)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}, {'branch': 'devel', 'memory_management': 'orc'}]"
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
|
||||
15
.github/workflows/daily_sat.yml
vendored
Normal file
15
.github/workflows/daily_sat.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Daily SAT
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily SAT
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
use_sat_solver: true
|
||||
14
.github/workflows/interop.yml
vendored
14
.github/workflows/interop.yml
vendored
@@ -2,6 +2,7 @@ name: Interoperability Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
@@ -16,8 +17,9 @@ jobs:
|
||||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
# For some reason the original job (libp2p/test-plans) has enough disk space, but this one doesn't.
|
||||
- name: Free Disk Space
|
||||
# For some reason we have space issues while running this action. Likely while building the image.
|
||||
# This action will free up some space to avoid the issue.
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
tool-cache: true
|
||||
@@ -31,6 +33,10 @@ jobs:
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
@@ -45,3 +51,7 @@ jobs:
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
|
||||
|
||||
4
.github/workflows/linters.yml
vendored
4
.github/workflows/linters.yml
vendored
@@ -2,6 +2,7 @@ name: Linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@@ -29,4 +30,5 @@ jobs:
|
||||
- name: Check style
|
||||
run: |
|
||||
shopt -s extglob # Enable extended globbing
|
||||
./nph --check examples libp2p tests tools *.@(nim|nims|nimble)
|
||||
./nph examples libp2p tests tools *.@(nim|nims|nimble)
|
||||
git diff --exit-code
|
||||
|
||||
4
.pinned
4
.pinned
@@ -1,12 +1,14 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#dc3847e4d6733dfc3811454c2a9c384b87343e26
|
||||
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
|
||||
@@ -12,6 +12,9 @@ switch("warning", "LockLevel:off")
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:
|
||||
error
|
||||
--mm:
|
||||
refc
|
||||
# reconsider when there's a version-2-2 branch worth testing with as we might switch to orc
|
||||
|
||||
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
|
||||
|
||||
5
funding.json
Normal file
5
funding.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"opRetro": {
|
||||
"projectId": "0xc9561ba3e4eca5483b40f8b1a254a73c91fefe4f8aee32dc20c0d96dcf33fe80"
|
||||
}
|
||||
}
|
||||
@@ -52,6 +52,7 @@ else:
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
transports/quictransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.5.0"
|
||||
version = "1.7.1"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.4.1", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.2", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2"
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2",
|
||||
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -19,8 +20,8 @@ let verbose = getEnv("V", "") notin ["", "0"]
|
||||
|
||||
let cfg =
|
||||
" --styleCheck:usages --styleCheck:error" &
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") &
|
||||
" --skipParentCfg --skipUserCfg -f" & " --threads:on --opt:speed"
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") & " --skipUserCfg -f" &
|
||||
" --threads:on --opt:speed"
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
|
||||
@@ -296,7 +296,8 @@ proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
|
||||
if expectedConn != nil and not expectedConn.finished:
|
||||
expectedConn.complete(muxer)
|
||||
else:
|
||||
debug "Too many connections for peer", conns = c.muxed.getOrDefault(peerId).len
|
||||
debug "Too many connections for peer",
|
||||
conns = c.muxed.getOrDefault(peerId).len, peerId, dir
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
|
||||
@@ -198,7 +198,7 @@ proc random*(
|
||||
case scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(KeyError)
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: scheme, rsakey: rsakey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -210,7 +210,8 @@ proc random*(
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let eckey = ?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
|
||||
let eckey =
|
||||
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: scheme, eckey: eckey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -237,10 +238,11 @@ proc random*(
|
||||
let skkey = SkPrivateKey.random(rng)
|
||||
ok(PrivateKey(scheme: PKScheme.Secp256k1, skkey: skkey))
|
||||
elif supported(PKScheme.RSA):
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(KeyError)
|
||||
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: PKScheme.RSA, rsakey: rsakey))
|
||||
elif supported(PKScheme.ECDSA):
|
||||
let eckey = ?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
|
||||
let eckey =
|
||||
?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(PrivateKey(scheme: PKScheme.ECDSA, eckey: eckey))
|
||||
else:
|
||||
err(SchemeError)
|
||||
@@ -258,7 +260,7 @@ proc random*(
|
||||
case scheme
|
||||
of PKScheme.RSA:
|
||||
when supported(PKScheme.RSA):
|
||||
let pair = ?RsaKeyPair.random(rng, bits).orError(KeyError)
|
||||
let pair = ?RsaKeyPair.random(rng, bits).orError(CryptoError.KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
|
||||
@@ -280,7 +282,7 @@ proc random*(
|
||||
err(SchemeError)
|
||||
of PKScheme.ECDSA:
|
||||
when supported(PKScheme.ECDSA):
|
||||
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(CryptoError.KeyError)
|
||||
ok(
|
||||
KeyPair(
|
||||
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
|
||||
@@ -583,7 +585,7 @@ proc init*(t: typedesc[PrivateKey], data: openArray[byte]): CryptoResult[Private
|
||||
## Create new private key from libp2p's protobuf serialized binary form.
|
||||
var res: t
|
||||
if not res.init(data):
|
||||
err(KeyError)
|
||||
err(CryptoError.KeyError)
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
@@ -591,7 +593,7 @@ proc init*(t: typedesc[PublicKey], data: openArray[byte]): CryptoResult[PublicKe
|
||||
## Create new public key from libp2p's protobuf serialized binary form.
|
||||
var res: t
|
||||
if not res.init(data):
|
||||
err(KeyError)
|
||||
err(CryptoError.KeyError)
|
||||
else:
|
||||
ok(res)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos, chronicles
|
||||
import pkg/[chronos, chronicles, results]
|
||||
import std/[nativesockets, net, hashes]
|
||||
import tables, strutils, sets
|
||||
import
|
||||
@@ -25,8 +25,8 @@ import
|
||||
protobuf/minprotobuf,
|
||||
errors,
|
||||
utility
|
||||
import stew/[base58, base32, endians2, results]
|
||||
export results, minprotobuf, vbuffer, utility
|
||||
import stew/[base58, base32, endians2]
|
||||
export results, minprotobuf, vbuffer, errors, utility
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multiaddress"
|
||||
@@ -71,6 +71,9 @@ type
|
||||
tcpProtocol
|
||||
udpProtocol
|
||||
|
||||
func maErr*(msg: string): ref MaError =
|
||||
(ref MaError)(msg: msg)
|
||||
|
||||
const
|
||||
# These are needed in order to avoid an ambiguity error stemming from
|
||||
# some cint constants with the same name defined in the posix modules
|
||||
@@ -408,7 +411,12 @@ const
|
||||
UDP_IP* = mapAnd(IP, mapEq("udp"))
|
||||
UDP* = mapOr(UDP_DNS, UDP_IP)
|
||||
UTP* = mapAnd(UDP, mapEq("utp"))
|
||||
QUIC* = mapAnd(UDP, mapEq("quic"))
|
||||
QUIC_IP* = mapAnd(UDP_IP, mapEq("quic"))
|
||||
QUIC_DNS* = mapAnd(UDP_DNS, mapEq("quic"))
|
||||
QUIC* = mapOr(QUIC_DNS, QUIC_IP)
|
||||
QUIC_V1_IP* = mapAnd(UDP_IP, mapEq("quic-v1"))
|
||||
QUIC_V1_DNS* = mapAnd(UDP_DNS, mapEq("quic-v1"))
|
||||
QUIC_V1* = mapOr(QUIC_V1_DNS, QUIC_V1_IP)
|
||||
UNIX* = mapEq("unix")
|
||||
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
|
||||
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
|
||||
@@ -970,23 +978,21 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
|
||||
else:
|
||||
ok()
|
||||
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.raises: [LPError].} =
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.raises: [MaError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
## exception on error.
|
||||
##
|
||||
concat(m1, m2).valueOr:
|
||||
raise maErr error
|
||||
|
||||
concat(m1, m2).tryGet()
|
||||
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.raises: [LPError].} =
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.raises: [MaError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
## exception on error.
|
||||
##
|
||||
|
||||
m1.append(m2).tryGet()
|
||||
m1.append(m2).isOkOr:
|
||||
raise maErr error
|
||||
|
||||
proc `==`*(m1: var MultiAddress, m2: MultiAddress): bool =
|
||||
## Check of two MultiAddress are equal
|
||||
|
||||
@@ -35,8 +35,6 @@ const
|
||||
RendezVousCodec* = "/rendezvous/1.0.0"
|
||||
MinimumDuration* = 2.hours
|
||||
MaximumDuration = 72.hours
|
||||
MinimumTTL = MinimumDuration.seconds.uint64
|
||||
MaximumTTL = MaximumDuration.seconds.uint64
|
||||
RegistrationLimitPerPeer = 1000
|
||||
DiscoverLimit = 1000'u64
|
||||
SemaphoreDefaultSize = 5
|
||||
@@ -320,6 +318,10 @@ type
|
||||
peers: seq[PeerId]
|
||||
cookiesSaved: Table[PeerId, Table[string, seq[byte]]]
|
||||
switch: Switch
|
||||
minDuration: Duration
|
||||
maxDuration: Duration
|
||||
minTTL: uint64
|
||||
maxTTL: uint64
|
||||
|
||||
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
|
||||
if spr.len == 0:
|
||||
@@ -395,7 +397,7 @@ proc save(
|
||||
rdv.registered.add(
|
||||
RegisteredData(
|
||||
peerId: peerId,
|
||||
expiration: Moment.now() + r.ttl.get(MinimumTTL).int64.seconds,
|
||||
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
|
||||
data: r,
|
||||
)
|
||||
)
|
||||
@@ -409,8 +411,8 @@ proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
libp2p_rendezvous_register.inc()
|
||||
if r.ns.len notin 1 .. 255:
|
||||
return conn.sendRegisterResponseError(InvalidNamespace)
|
||||
let ttl = r.ttl.get(MinimumTTL)
|
||||
if ttl notin MinimumTTL .. MaximumTTL:
|
||||
let ttl = r.ttl.get(rdv.minTTL)
|
||||
if ttl notin rdv.minTTL .. rdv.maxTTL:
|
||||
return conn.sendRegisterResponseError(InvalidTTL)
|
||||
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
|
||||
if pr.isErr():
|
||||
@@ -506,24 +508,35 @@ proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
|
||||
await rdv.sema.acquire()
|
||||
discard await advertiseWrap().withTimeout(5.seconds)
|
||||
|
||||
method advertise*(
|
||||
rdv: RendezVous, ns: string, ttl: Duration = MinimumDuration
|
||||
) {.async, base.} =
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
|
||||
raise newException(RendezVousError, "Wrong Signed Peer Record")
|
||||
proc advertise*(
|
||||
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
|
||||
) {.async.} =
|
||||
if ns.len notin 1 .. 255:
|
||||
raise newException(RendezVousError, "Invalid namespace")
|
||||
if ttl notin MinimumDuration .. MaximumDuration:
|
||||
raise newException(RendezVousError, "Invalid time to live")
|
||||
|
||||
if ttl notin rdv.minDuration .. rdv.maxDuration:
|
||||
raise newException(RendezVousError, "Invalid time to live: " & $ttl)
|
||||
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
|
||||
raise newException(RendezVousError, "Wrong Signed Peer Record")
|
||||
|
||||
let
|
||||
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
|
||||
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
|
||||
|
||||
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
|
||||
let fut = collect(newSeq()):
|
||||
for peer in rdv.peers:
|
||||
|
||||
let futs = collect(newSeq()):
|
||||
for peer in peers:
|
||||
trace "Send Advertise", peerId = peer, ns
|
||||
rdv.advertisePeer(peer, msg.buffer)
|
||||
await allFutures(fut)
|
||||
|
||||
await allFutures(futs)
|
||||
|
||||
method advertise*(
|
||||
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
|
||||
) {.async, base.} =
|
||||
await rdv.advertise(ns, ttl, rdv.peers)
|
||||
|
||||
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||
let
|
||||
@@ -540,9 +553,8 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||
@[]
|
||||
|
||||
proc request*(
|
||||
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
|
||||
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int, peers: seq[PeerId]
|
||||
): Future[seq[PeerRecord]] {.async.} =
|
||||
let nsSalted = ns & rdv.salt
|
||||
var
|
||||
s: Table[PeerId, (PeerRecord, Register)]
|
||||
limit: uint64
|
||||
@@ -587,8 +599,8 @@ proc request*(
|
||||
for r in resp.registrations:
|
||||
if limit == 0:
|
||||
return
|
||||
let ttl = r.ttl.get(MaximumTTL + 1)
|
||||
if ttl > MaximumTTL:
|
||||
let ttl = r.ttl.get(rdv.maxTTL + 1)
|
||||
if ttl > rdv.maxTTL:
|
||||
continue
|
||||
let
|
||||
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
|
||||
@@ -596,7 +608,7 @@ proc request*(
|
||||
pr = spr.data
|
||||
if s.hasKey(pr.peerId):
|
||||
let (prSaved, rSaved) = s[pr.peerId]
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
|
||||
prSaved.seqNo < pr.seqNo:
|
||||
s[pr.peerId] = (pr, r)
|
||||
else:
|
||||
@@ -605,8 +617,6 @@ proc request*(
|
||||
for (_, r) in s.values():
|
||||
rdv.save(ns, peer, r, false)
|
||||
|
||||
# copy to avoid resizes during the loop
|
||||
let peers = rdv.peers
|
||||
for peer in peers:
|
||||
if limit == 0:
|
||||
break
|
||||
@@ -621,6 +631,11 @@ proc request*(
|
||||
trace "exception catch in request", description = exc.msg
|
||||
return toSeq(s.values()).mapIt(it[0])
|
||||
|
||||
proc request*(
|
||||
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
|
||||
): Future[seq[PeerRecord]] {.async.} =
|
||||
await rdv.request(ns, l, rdv.peers)
|
||||
|
||||
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||
let nsSalted = ns & rdv.salt
|
||||
try:
|
||||
@@ -630,16 +645,15 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||
# TODO: find a way to improve this, maybe something similar to the advertise
|
||||
proc unsubscribe*(rdv: RendezVous, ns: string, peerIds: seq[PeerId]) {.async.} =
|
||||
if ns.len notin 1 .. 255:
|
||||
raise newException(RendezVousError, "Invalid namespace")
|
||||
rdv.unsubscribeLocally(ns)
|
||||
|
||||
let msg = encode(
|
||||
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
|
||||
)
|
||||
|
||||
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
|
||||
proc unsubscribePeer(peerId: PeerId) {.async.} =
|
||||
try:
|
||||
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
|
||||
defer:
|
||||
@@ -648,8 +662,16 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||
except CatchableError as exc:
|
||||
trace "exception while unsubscribing", description = exc.msg
|
||||
|
||||
for peer in rdv.peers:
|
||||
discard await rdv.unsubscribePeer(peer).withTimeout(5.seconds)
|
||||
let futs = collect(newSeq()):
|
||||
for peer in peerIds:
|
||||
unsubscribePeer(peer)
|
||||
|
||||
discard await allFutures(futs).withTimeout(5.seconds)
|
||||
|
||||
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||
rdv.unsubscribeLocally(ns)
|
||||
|
||||
await rdv.unsubscribe(ns, rdv.peers)
|
||||
|
||||
proc setup*(rdv: RendezVous, switch: Switch) =
|
||||
rdv.switch = switch
|
||||
@@ -662,7 +684,25 @@ proc setup*(rdv: RendezVous, switch: Switch) =
|
||||
rdv.switch.addPeerEventHandler(handlePeer, Joined)
|
||||
rdv.switch.addPeerEventHandler(handlePeer, Left)
|
||||
|
||||
proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
|
||||
proc new*(
|
||||
T: typedesc[RendezVous],
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
minDuration = MinimumDuration,
|
||||
maxDuration = MaximumDuration,
|
||||
): T {.raises: [RendezVousError].} =
|
||||
if minDuration < 1.minutes:
|
||||
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
|
||||
|
||||
if maxDuration > 72.hours:
|
||||
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
|
||||
|
||||
if minDuration >= maxDuration:
|
||||
raise newException(RendezVousError, "Minimum TTL longer than maximum")
|
||||
|
||||
let
|
||||
minTTL = minDuration.seconds.uint64
|
||||
maxTTL = maxDuration.seconds.uint64
|
||||
|
||||
let rdv = T(
|
||||
rng: rng,
|
||||
salt: string.fromBytes(generateBytes(rng[], 8)),
|
||||
@@ -670,6 +710,10 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
|
||||
defaultDT: Moment.now() - 1.days,
|
||||
#registerEvent: newAsyncEvent(),
|
||||
sema: newAsyncSemaphore(SemaphoreDefaultSize),
|
||||
minDuration: minDuration,
|
||||
maxDuration: maxDuration,
|
||||
minTTL: minTTL,
|
||||
maxTTL: maxTTL,
|
||||
)
|
||||
logScope:
|
||||
topics = "libp2p discovery rendezvous"
|
||||
@@ -701,9 +745,13 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
|
||||
return rdv
|
||||
|
||||
proc new*(
|
||||
T: typedesc[RendezVous], switch: Switch, rng: ref HmacDrbgContext = newRng()
|
||||
): T =
|
||||
let rdv = T.new(rng)
|
||||
T: typedesc[RendezVous],
|
||||
switch: Switch,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
minDuration = MinimumDuration,
|
||||
maxDuration = MaximumDuration,
|
||||
): T {.raises: [RendezVousError].} =
|
||||
let rdv = T.new(rng, minDuration, maxDuration)
|
||||
rdv.setup(switch)
|
||||
return rdv
|
||||
|
||||
|
||||
224
libp2p/transports/quictransport.nim
Normal file
224
libp2p/transports/quictransport.nim
Normal file
@@ -0,0 +1,224 @@
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/quic
|
||||
import ../multiaddress
|
||||
import ../multicodec
|
||||
import ../stream/connection
|
||||
import ../wire
|
||||
import ../muxers/muxer
|
||||
import ../upgrademngrs/upgrade
|
||||
import ./transport
|
||||
|
||||
export multiaddress
|
||||
export multicodec
|
||||
export connection
|
||||
export transport
|
||||
|
||||
logScope:
|
||||
topics = "libp2p quictransport"
|
||||
|
||||
type
|
||||
P2PConnection = connection.Connection
|
||||
QuicConnection = quic.Connection
|
||||
|
||||
# Stream
|
||||
type QuicStream* = ref object of P2PConnection
|
||||
stream: Stream
|
||||
cached: seq[byte]
|
||||
|
||||
proc new(
|
||||
_: type QuicStream, stream: Stream, oaddr: Opt[MultiAddress], peerId: PeerId
|
||||
): QuicStream =
|
||||
let quicstream = QuicStream(stream: stream, observedAddr: oaddr, peerId: peerId)
|
||||
procCall P2PConnection(quicstream).initStream()
|
||||
quicstream
|
||||
|
||||
template mapExceptions(body: untyped) =
|
||||
try:
|
||||
body
|
||||
except QuicError:
|
||||
raise newLPStreamEOFError()
|
||||
except CatchableError:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readOnce*(
|
||||
stream: QuicStream, pbytes: pointer, nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
try:
|
||||
if stream.cached.len == 0:
|
||||
stream.cached = await stream.stream.read()
|
||||
result = min(nbytes, stream.cached.len)
|
||||
copyMem(pbytes, addr stream.cached[0], result)
|
||||
stream.cached = stream.cached[result ..^ 1]
|
||||
except CatchableError as exc:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
{.push warning[LockLevel]: off.}
|
||||
method write*(
|
||||
stream: QuicStream, bytes: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mapExceptions(await stream.stream.write(bytes))
|
||||
|
||||
{.pop.}
|
||||
|
||||
method closeImpl*(stream: QuicStream) {.async: (raises: []).} =
|
||||
try:
|
||||
await stream.stream.close()
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
await procCall P2PConnection(stream).closeImpl()
|
||||
|
||||
# Session
|
||||
type QuicSession* = ref object of P2PConnection
|
||||
connection: QuicConnection
|
||||
|
||||
method close*(session: QuicSession) {.async, base.} =
|
||||
await session.connection.close()
|
||||
await procCall P2PConnection(session).close()
|
||||
|
||||
proc getStream*(
|
||||
session: QuicSession, direction = Direction.In
|
||||
): Future[QuicStream] {.async.} =
|
||||
var stream: Stream
|
||||
case direction
|
||||
of Direction.In:
|
||||
stream = await session.connection.incomingStream()
|
||||
of Direction.Out:
|
||||
stream = await session.connection.openStream()
|
||||
await stream.write(@[]) # QUIC streams do not exist until data is sent
|
||||
return QuicStream.new(stream, session.observedAddr, session.peerId)
|
||||
|
||||
method getWrapped*(self: QuicSession): P2PConnection =
|
||||
nil
|
||||
|
||||
# Muxer
|
||||
type QuicMuxer = ref object of Muxer
|
||||
quicSession: QuicSession
|
||||
handleFut: Future[void]
|
||||
|
||||
method newStream*(
|
||||
m: QuicMuxer, name: string = "", lazy: bool = false
|
||||
): Future[P2PConnection] {.
|
||||
async: (raises: [CancelledError, LPStreamError, MuxerError])
|
||||
.} =
|
||||
try:
|
||||
return await m.quicSession.getStream(Direction.Out)
|
||||
except CatchableError as exc:
|
||||
raise newException(MuxerError, exc.msg, exc)
|
||||
|
||||
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream"
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex stream handler", msg = exc.msg
|
||||
await chann.close()
|
||||
|
||||
method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
while not m.quicSession.atEof:
|
||||
let incomingStream = await m.quicSession.getStream(Direction.In)
|
||||
asyncSpawn m.handleStream(incomingStream)
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex handler", msg = exc.msg
|
||||
|
||||
method close*(m: QuicMuxer) {.async: (raises: []).} =
|
||||
try:
|
||||
await m.quicSession.close()
|
||||
m.handleFut.cancel()
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
|
||||
# Transport
|
||||
type QuicUpgrade = ref object of Upgrade
|
||||
|
||||
type QuicTransport* = ref object of Transport
|
||||
listener: Listener
|
||||
connections: seq[P2PConnection]
|
||||
|
||||
func new*(_: type QuicTransport, u: Upgrade): QuicTransport =
|
||||
QuicTransport(upgrader: QuicUpgrade(ms: u.ms))
|
||||
|
||||
method handles*(transport: QuicTransport, address: MultiAddress): bool =
|
||||
if not procCall Transport(transport).handles(address):
|
||||
return false
|
||||
QUIC_V1.match(address)
|
||||
|
||||
method start*(transport: QuicTransport, addrs: seq[MultiAddress]) {.async.} =
|
||||
doAssert transport.listener.isNil, "start() already called"
|
||||
#TODO handle multiple addr
|
||||
transport.listener = listen(initTAddress(addrs[0]).tryGet)
|
||||
await procCall Transport(transport).start(addrs)
|
||||
transport.addrs[0] =
|
||||
MultiAddress.init(transport.listener.localAddress(), IPPROTO_UDP).tryGet() &
|
||||
MultiAddress.init("/quic-v1").get()
|
||||
transport.running = true
|
||||
|
||||
method stop*(transport: QuicTransport) {.async.} =
|
||||
if transport.running:
|
||||
for c in transport.connections:
|
||||
await c.close()
|
||||
await procCall Transport(transport).stop()
|
||||
await transport.listener.stop()
|
||||
transport.running = false
|
||||
transport.listener = nil
|
||||
|
||||
proc wrapConnection(
|
||||
transport: QuicTransport, connection: QuicConnection
|
||||
): P2PConnection {.raises: [Defect, TransportOsError, LPError].} =
|
||||
let
|
||||
remoteAddr = connection.remoteAddress()
|
||||
observedAddr =
|
||||
MultiAddress.init(remoteAddr, IPPROTO_UDP).get() &
|
||||
MultiAddress.init("/quic-v1").get()
|
||||
conres = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
|
||||
conres.initStream()
|
||||
|
||||
transport.connections.add(conres)
|
||||
proc onClose() {.async.} =
|
||||
await conres.join()
|
||||
transport.connections.keepItIf(it != conres)
|
||||
trace "Cleaned up client"
|
||||
|
||||
asyncSpawn onClose()
|
||||
return conres
|
||||
|
||||
method accept*(transport: QuicTransport): Future[P2PConnection] {.async.} =
|
||||
doAssert not transport.listener.isNil, "call start() before calling accept()"
|
||||
let connection = await transport.listener.accept()
|
||||
return transport.wrapConnection(connection)
|
||||
|
||||
method dial*(
|
||||
transport: QuicTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId),
|
||||
): Future[P2PConnection] {.async, gcsafe.} =
|
||||
let connection = await dial(initTAddress(address).tryGet)
|
||||
return transport.wrapConnection(connection)
|
||||
|
||||
method upgrade*(
|
||||
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.async: (raises: [CancelledError, LPError]).} =
|
||||
let qs = QuicSession(conn)
|
||||
if peerId.isSome:
|
||||
qs.peerId = peerId.get()
|
||||
|
||||
let muxer = QuicMuxer(quicSession: qs, connection: conn)
|
||||
muxer.streamHandler = proc(conn: P2PConnection) {.async: (raises: []).} =
|
||||
trace "Starting stream handler"
|
||||
try:
|
||||
await self.upgrader.ms.handle(conn) # handle incoming connection
|
||||
except CancelledError as exc:
|
||||
return
|
||||
except CatchableError as exc:
|
||||
trace "exception in stream handler", conn, msg = exc.msg
|
||||
finally:
|
||||
await conn.closeWithEOF()
|
||||
trace "Stream handler done", conn
|
||||
muxer.handleFut = muxer.handle()
|
||||
return muxer
|
||||
@@ -226,10 +226,17 @@ method accept*(self: TcpTransport): Future[Connection] =
|
||||
proc impl(
|
||||
self: TcpTransport
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
proc cancelAcceptFuts() =
|
||||
for fut in self.acceptFuts:
|
||||
if not fut.completed():
|
||||
fut.cancel()
|
||||
|
||||
if not self.running:
|
||||
raise newTransportClosedError()
|
||||
|
||||
if self.acceptFuts.len <= 0:
|
||||
if self.servers.len == 0:
|
||||
raise (ref TcpTransportError)(msg: "No listeners configured")
|
||||
elif self.acceptFuts.len == 0:
|
||||
# Holds futures representing ongoing accept calls on multiple servers.
|
||||
self.acceptFuts = self.servers.mapIt(it.accept())
|
||||
|
||||
@@ -239,7 +246,10 @@ method accept*(self: TcpTransport): Future[Connection] =
|
||||
# Waits for any one of these futures to complete, indicating that a new connection has been accepted on one of the servers.
|
||||
await one(self.acceptFuts)
|
||||
except ValueError:
|
||||
raise (ref TcpTransportError)(msg: "No listeners configured")
|
||||
raiseAssert "Accept futures should not be empty"
|
||||
except CancelledError as exc:
|
||||
cancelAcceptFuts()
|
||||
raise exc
|
||||
index = self.acceptFuts.find(finished)
|
||||
|
||||
# A new connection has been accepted. The corresponding server should immediately start accepting another connection.
|
||||
@@ -261,6 +271,7 @@ method accept*(self: TcpTransport): Future[Connection] =
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except CancelledError as exc:
|
||||
cancelAcceptFuts()
|
||||
raise exc
|
||||
|
||||
if not self.running: # Stopped while waiting
|
||||
|
||||
@@ -56,7 +56,7 @@ proc new*(
|
||||
stream.initStream()
|
||||
return stream
|
||||
|
||||
template mapExceptions(body: untyped) =
|
||||
template mapExceptions(body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except AsyncStreamIncompleteError:
|
||||
|
||||
@@ -59,7 +59,8 @@ when defined(libp2p_agents_metrics):
|
||||
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"
|
||||
KnownLibP2PAgentsSeq* = KnownLibP2PAgents.safeToLowerAscii().tryGet().split(",")
|
||||
|
||||
template safeConvert*[T: SomeInteger, S: Ordinal](value: S): T =
|
||||
proc safeConvert*[T: SomeInteger](value: SomeOrdinal): T =
|
||||
type S = typeof(value)
|
||||
## Converts `value` from S to `T` iff `value` is guaranteed to be preserved.
|
||||
when int64(T.low) <= int64(S.low()) and uint64(T.high) >= uint64(S.high):
|
||||
T(value)
|
||||
|
||||
@@ -20,7 +20,7 @@ when defined(windows): import winlean else: import posix
|
||||
const
|
||||
RTRANSPMA* = mapOr(TCP, WebSockets, UNIX)
|
||||
|
||||
TRANSPMA* = mapOr(RTRANSPMA, UDP)
|
||||
TRANSPMA* = mapOr(RTRANSPMA, QUIC, UDP)
|
||||
|
||||
proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
|
||||
## Initialize ``TransportAddress`` with MultiAddress ``ma``.
|
||||
@@ -28,7 +28,7 @@ proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
|
||||
## MultiAddress must be wire address, e.g. ``{IP4, IP6, UNIX}/{TCP, UDP}``.
|
||||
##
|
||||
|
||||
if mapOr(TCP_IP, WebSockets_IP, UNIX, UDP_IP).match(ma):
|
||||
if mapOr(TCP_IP, WebSockets_IP, UNIX, UDP_IP, QUIC_V1_IP).match(ma):
|
||||
var pbuf: array[2, byte]
|
||||
let code = (?(?ma[0]).protoCode())
|
||||
if code == multiCodec("unix"):
|
||||
|
||||
@@ -17,10 +17,6 @@ import strutils, os
|
||||
libp2p_mplex_metrics
|
||||
--d:
|
||||
unittestPrintTime
|
||||
--skipParentCfg
|
||||
--mm:
|
||||
refc
|
||||
# reconsider when there's a version-2-2 branch worth testing with as we might switch to orc
|
||||
|
||||
# Only add chronicles param if the
|
||||
# user didn't specify any
|
||||
|
||||
@@ -677,55 +677,76 @@ suite "GossipSub internal":
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
|
||||
for i in 0 ..< 30:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# should ignore no budget peer
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
peer.iHaveBudget = 0
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
block:
|
||||
# given duplicate ihave should generate only one iwant
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
block:
|
||||
# given duplicate iwant should generate only one message
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
@@ -773,7 +794,7 @@ suite "GossipSub internal":
|
||||
var sentMessages = initHashSet[seq[byte]]()
|
||||
|
||||
for i, size in enumerate([size1, size2]):
|
||||
let data = newSeqWith[byte](size, i.byte)
|
||||
let data = newSeqWith(size, i.byte)
|
||||
sentMessages.incl(data)
|
||||
|
||||
let msg =
|
||||
|
||||
@@ -1059,7 +1059,7 @@ suite "GossipSub":
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith[byte](33, 1.byte), isHighPriority = true
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
@@ -1069,7 +1069,7 @@ suite "GossipSub":
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith[byte](35, 1.byte), isHighPriority = true
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
|
||||
@@ -26,10 +26,11 @@ const
|
||||
SuccessVectors = [
|
||||
"/ip4/1.2.3.4", "/ip4/0.0.0.0", "/ip6/::1",
|
||||
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21",
|
||||
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/udp/1234/quic", "/ip6zone/x/ip6/fe80::1",
|
||||
"/ip6zone/x%y/ip6/fe80::1", "/ip6zone/x%y/ip6/::",
|
||||
"/ip6zone/x/ip6/fe80::1/udp/1234/quic", "/onion/timaq4ygg2iegci7:1234",
|
||||
"/onion/timaq4ygg2iegci7:80/http",
|
||||
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/udp/1234/quic",
|
||||
"/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21/udp/1234/quic-v1",
|
||||
"/ip6zone/x/ip6/fe80::1", "/ip6zone/x%y/ip6/fe80::1", "/ip6zone/x%y/ip6/::",
|
||||
"/ip6zone/x/ip6/fe80::1/udp/1234/quic", "/ip6zone/x/ip6/fe80::1/udp/1234/quic-v1",
|
||||
"/onion/timaq4ygg2iegci7:1234", "/onion/timaq4ygg2iegci7:80/http",
|
||||
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234",
|
||||
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:80/http",
|
||||
"/udp/0", "/tcp/0", "/sctp/0", "/udp/1234", "/tcp/1234", "/sctp/1234", "/udp/65535",
|
||||
@@ -57,7 +58,7 @@ const
|
||||
FailureVectors = [
|
||||
"", "/", "/ip4", "/ip4/::1", "/ip4/fdpsofodsajfdoisa", "/ip6", "/ip6zone",
|
||||
"/ip6zone/", "/ip6zone//ip6/fe80::1", "/udp", "/tcp", "/sctp", "/udp/65536",
|
||||
"/tcp/65536", "/quic/65536", "/onion/9imaq4ygg2iegci7:80",
|
||||
"/tcp/65536", "/quic/65536", "/quic-v1/65536", "/onion/9imaq4ygg2iegci7:80",
|
||||
"/onion/aaimaq4ygg2iegci7:80", "/onion/timaq4ygg2iegci7:0",
|
||||
"/onion/timaq4ygg2iegci7:-1", "/onion/timaq4ygg2iegci7",
|
||||
"/onion/timaq4ygg2iegci@:666",
|
||||
@@ -70,8 +71,8 @@ const
|
||||
"/udp/1234/sctp", "/udp/1234/udt/1234", "/udp/1234/utp/1234",
|
||||
"/ip4/127.0.0.1/udp/jfodsajfidosajfoidsa", "/ip4/127.0.0.1/udp",
|
||||
"/ip4/127.0.0.1/tcp/jfodsajfidosajfoidsa", "/ip4/127.0.0.1/tcp",
|
||||
"/ip4/127.0.0.1/quic/1234", "/ip4/127.0.0.1/ipfs", "/ip4/127.0.0.1/ipfs/tcp",
|
||||
"/ip4/127.0.0.1/p2p", "/ip4/127.0.0.1/p2p/tcp", "/unix",
|
||||
"/ip4/127.0.0.1/quic/1234", "/ip4/127.0.0.1/quic-v1/1234", "/ip4/127.0.0.1/ipfs",
|
||||
"/ip4/127.0.0.1/ipfs/tcp", "/ip4/127.0.0.1/p2p", "/ip4/127.0.0.1/p2p/tcp", "/unix",
|
||||
]
|
||||
|
||||
RustSuccessVectors = [
|
||||
@@ -160,6 +161,15 @@ const
|
||||
"/quic",
|
||||
],
|
||||
),
|
||||
PatternVector(
|
||||
pattern: QUIC_V1,
|
||||
good: @["/ip4/1.2.3.4/udp/1234/quic-v1", "/ip6/::/udp/1234/quic-v1"],
|
||||
bad:
|
||||
@[
|
||||
"/ip4/0.0.0.0/tcp/12345/quic-v1", "/ip6/fc00::/ip4/0.0.0.0/udp/1234/quic-v1",
|
||||
"/quic-v1",
|
||||
],
|
||||
),
|
||||
PatternVector(
|
||||
pattern: IPFS,
|
||||
good:
|
||||
|
||||
24
tests/testquic.nim
Normal file
24
tests/testquic.nim
Normal file
@@ -0,0 +1,24 @@
|
||||
{.used.}
|
||||
|
||||
import sequtils
|
||||
import chronos, stew/byteutils
|
||||
import
|
||||
../libp2p/[
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/quictransport,
|
||||
upgrademngrs/upgrade,
|
||||
multiaddress,
|
||||
errors,
|
||||
wire,
|
||||
]
|
||||
|
||||
import ./helpers, ./commontransport
|
||||
|
||||
suite "Quic transport":
|
||||
asyncTest "can handle local address":
|
||||
let ma = @[MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()]
|
||||
let transport1 = QuicTransport.new()
|
||||
await transport1.start(ma)
|
||||
check transport1.handles(transport1.addrs[0])
|
||||
await transport1.stop()
|
||||
@@ -126,7 +126,7 @@ suite "RendezVous":
|
||||
|
||||
asyncTest "Various local error":
|
||||
let
|
||||
rdv = RendezVous.new()
|
||||
rdv = RendezVous.new(minDuration = 1.minutes, maxDuration = 72.hours)
|
||||
switch = createSwitch(rdv)
|
||||
expect RendezVousError:
|
||||
discard await rdv.request("A".repeat(300))
|
||||
@@ -137,6 +137,14 @@ suite "RendezVous":
|
||||
expect RendezVousError:
|
||||
await rdv.advertise("A".repeat(300))
|
||||
expect RendezVousError:
|
||||
await rdv.advertise("A", 2.weeks)
|
||||
await rdv.advertise("A", 73.hours)
|
||||
expect RendezVousError:
|
||||
await rdv.advertise("A", 5.minutes)
|
||||
await rdv.advertise("A", 30.seconds)
|
||||
|
||||
test "Various config error":
|
||||
expect RendezVousError:
|
||||
discard RendezVous.new(minDuration = 30.seconds)
|
||||
expect RendezVousError:
|
||||
discard RendezVous.new(maxDuration = 73.hours)
|
||||
expect RendezVousError:
|
||||
discard RendezVous.new(minDuration = 15.minutes, maxDuration = 10.minutes)
|
||||
|
||||
@@ -34,6 +34,7 @@ import
|
||||
utils/semaphore,
|
||||
transports/tcptransport,
|
||||
transports/wstransport,
|
||||
transports/quictransport,
|
||||
]
|
||||
import ./helpers
|
||||
|
||||
@@ -988,6 +989,42 @@ suite "Switch":
|
||||
await srcWsSwitch.stop()
|
||||
await srcTcpSwitch.stop()
|
||||
|
||||
asyncTest "e2e quic transport":
|
||||
let
|
||||
quicAddress1 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
quicAddress2 = MultiAddress.init("/ip4/127.0.0.1/udp/0/quic-v1").tryGet()
|
||||
|
||||
srcSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress1)
|
||||
.withRng(crypto.newRng())
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
QuicTransport.new(upgr)
|
||||
)
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
destSwitch = SwitchBuilder
|
||||
.new()
|
||||
.withAddress(quicAddress2)
|
||||
.withRng(crypto.newRng())
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
QuicTransport.new(upgr)
|
||||
)
|
||||
.withNoise()
|
||||
.build()
|
||||
|
||||
await destSwitch.start()
|
||||
await srcSwitch.start()
|
||||
|
||||
await srcSwitch.connect(destSwitch.peerInfo.peerId, destSwitch.peerInfo.addrs)
|
||||
check srcSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||
|
||||
await destSwitch.stop()
|
||||
await srcSwitch.stop()
|
||||
|
||||
asyncTest "mount unstarted protocol":
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
check "test123" == string.fromBytes(await conn.readLp(1024))
|
||||
|
||||
@@ -17,46 +17,42 @@ import ../libp2p/utility
|
||||
suite "Utility":
|
||||
|
||||
test "successful safeConvert from int8 to int16":
|
||||
let res = safeConvert[int16, int8]((-128).int8)
|
||||
let res = safeConvert[int16]((-128).int8)
|
||||
check res == -128'i16
|
||||
|
||||
test "unsuccessful safeConvert from int16 to int8":
|
||||
check not (compiles do:
|
||||
result: int8 = safeConvert[int8, int16](32767'i16))
|
||||
result: int8 = safeConvert[int8](32767'i16))
|
||||
|
||||
test "successful safeConvert from uint8 to uint16":
|
||||
let res: uint16 = safeConvert[uint16, uint8](255'u8)
|
||||
let res: uint16 = safeConvert[uint16](255'u8)
|
||||
check res == 255'u16
|
||||
|
||||
test "unsuccessful safeConvert from uint16 to uint8":
|
||||
check not (compiles do:
|
||||
result: uint8 = safeConvert[uint8, uint16](256'u16))
|
||||
|
||||
test "successful safeConvert from char to int":
|
||||
let res: int = safeConvert[int, char]('A')
|
||||
check res == 65
|
||||
result: uint8 = safeConvert[uint8](256'u16))
|
||||
|
||||
test "unsuccessful safeConvert from int to char":
|
||||
check not (compiles do:
|
||||
result: char = safeConvert[char, int](128))
|
||||
result: char = safeConvert[char](128))
|
||||
|
||||
test "successful safeConvert from bool to int":
|
||||
let res: int = safeConvert[int, bool](true)
|
||||
let res: int = safeConvert[int](true)
|
||||
check res == 1
|
||||
|
||||
test "unsuccessful safeConvert from int to bool":
|
||||
check not (compiles do:
|
||||
result: bool = safeConvert[bool, int](2))
|
||||
result: bool = safeConvert[bool](2))
|
||||
|
||||
test "successful safeConvert from enum to int":
|
||||
type Color = enum red, green, blue
|
||||
let res: int = safeConvert[int, Color](green)
|
||||
let res: int = safeConvert[int](green)
|
||||
check res == 1
|
||||
|
||||
test "unsuccessful safeConvert from int to enum":
|
||||
type Color = enum red, green, blue
|
||||
check not (compiles do:
|
||||
result: Color = safeConvert[Color, int](3))
|
||||
result: Color = safeConvert[Color](3))
|
||||
|
||||
test "successful safeConvert from range to int":
|
||||
let res: int = safeConvert[int, range[1..10]](5)
|
||||
@@ -68,11 +64,11 @@ suite "Utility":
|
||||
|
||||
test "unsuccessful safeConvert from int to uint":
|
||||
check not (compiles do:
|
||||
result: uint = safeConvert[uint, int](11))
|
||||
result: uint = safeConvert[uint](11))
|
||||
|
||||
test "unsuccessful safeConvert from uint to int":
|
||||
check not (compiles do:
|
||||
result: uint = safeConvert[int, uint](11.uint))
|
||||
result: uint = safeConvert[int](11.uint))
|
||||
|
||||
suite "withValue and valueOr templates":
|
||||
type
|
||||
|
||||
Reference in New Issue
Block a user