mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 10:37:55 -05:00
Compare commits
65 Commits
chronos-3-
...
fix-sendMs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1342b45b29 | ||
|
|
a65ce6a47a | ||
|
|
fdf53d18cd | ||
|
|
48a3ac06ff | ||
|
|
49a92e5641 | ||
|
|
08a48faf41 | ||
|
|
61b299e411 | ||
|
|
ca01ee06a8 | ||
|
|
6c43ab3fce | ||
|
|
ae13a0d583 | ||
|
|
28609597d1 | ||
|
|
8294d5b9df | ||
|
|
78e83889ee | ||
|
|
7603b8de5e | ||
|
|
8cccd54125 | ||
|
|
18e00a741b | ||
|
|
ee264fdf11 | ||
|
|
9059a8aced | ||
|
|
0b753e7cf2 | ||
|
|
d43c5feab0 | ||
|
|
1609fd7197 | ||
|
|
42cd78e95b | ||
|
|
44cada9c55 | ||
|
|
6c873481ac | ||
|
|
d08ce17144 | ||
|
|
bd6ead95ef | ||
|
|
53e3825e07 | ||
|
|
e9b456162a | ||
|
|
250024f6cc | ||
|
|
fec632d28d | ||
|
|
349496e40f | ||
|
|
7faa0fac23 | ||
|
|
c5e4f8e12d | ||
|
|
fe4ff79885 | ||
|
|
aa4ebb0b3c | ||
|
|
e0f70b7177 | ||
|
|
c1dfd58772 | ||
|
|
04af0c4323 | ||
|
|
eb0890cd6f | ||
|
|
9bc5ec1566 | ||
|
|
5594bcb33e | ||
|
|
d46bcdb6ac | ||
|
|
9468bb6b4d | ||
|
|
2725be64ba | ||
|
|
e3c967ad19 | ||
|
|
d2c98bd87d | ||
|
|
3011ba4326 | ||
|
|
c6566707fa | ||
|
|
3be681ec4d | ||
|
|
2ede0fa40c | ||
|
|
7c195ab927 | ||
|
|
3230407ffe | ||
|
|
deb72c8580 | ||
|
|
ce0685c272 | ||
|
|
1f4b090227 | ||
|
|
fb05f5ae22 | ||
|
|
e12f65f193 | ||
|
|
4b3bc4f819 | ||
|
|
6791f5e7bb | ||
|
|
08d9c84aca | ||
|
|
4e7eaba67a | ||
|
|
5f7a3ab829 | ||
|
|
ebef85c9d7 | ||
|
|
3fc1236659 | ||
|
|
fc4e9a8bb8 |
12
.github/workflows/daily.yml
vendored
Normal file
12
.github/workflows/daily.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0']"
|
||||
cpu: "['amd64']"
|
||||
84
.github/workflows/daily_common.yml
vendored
Normal file
84
.github/workflows/daily_common.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: daily-common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim-branch:
|
||||
description: 'Nim branch'
|
||||
required: true
|
||||
type: string
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-20
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--mm:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||
fi
|
||||
13
.github/workflows/daily_i386.yml
vendored
Normal file
13
.github/workflows/daily_i386.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Daily i386
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily Nim Devel
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['devel']"
|
||||
cpu: "['amd64']"
|
||||
4
.github/workflows/doc.yml
vendored
4
.github/workflows/doc.yml
vendored
@@ -19,13 +19,13 @@ jobs:
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
nim-version: '1.6.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install_pinned -y
|
||||
nimble install_pinned
|
||||
# nim doc can "fail", but the doc is still generated
|
||||
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
|
||||
|
||||
|
||||
58
.github/workflows/interop.yml
vendored
58
.github/workflows/interop.yml
vendored
@@ -11,44 +11,30 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
name: Run multidimensional interoperability tests
|
||||
run-transport-interop:
|
||||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: libp2p/test-plans
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: >
|
||||
cd transport-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
run: >
|
||||
(cat << EOF
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
|
||||
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-6, version-2-0, devel]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
|
||||
examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
2
.pinned
2
.pinned
@@ -1,6 +1,6 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
|
||||
chronos;https://github.com/status-im/nim-chronos@#672db137b7cad9b384b8f4fb551fb6bbeaabfe1b
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
@@ -40,6 +41,8 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
@@ -47,11 +50,11 @@ nimble install libp2p
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
|
||||
**Go Daemon:**
|
||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
|
||||
## Modules
|
||||
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
|
||||
| Name | Description |
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
@@ -8,26 +10,29 @@
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.15.15`.
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Follow one of the methods below:
|
||||
|
||||
## Script
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
We recommend using `1.15.15`, as previously stated.
|
||||
```sh
|
||||
# clone and install dependencies
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install
|
||||
|
||||
# perform unit tests
|
||||
nimble test
|
||||
|
||||
# update the git submodule to install the go daemon
|
||||
git submodule update --init --recursive
|
||||
go version
|
||||
git clone https://github.com/libp2p/go-libp2p-daemon
|
||||
cd go-libp2p-daemon
|
||||
git checkout v0.0.1
|
||||
go install ./...
|
||||
cd ..
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
|
||||
If you find any issues, please head into our discord and ask for our asistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Usage
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ type
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
|
||||
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
# The actual application
|
||||
##
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng() # Single random number source for the whole application
|
||||
# port 0 will take a random available port
|
||||
|
||||
@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
##
|
||||
## Let's now start to create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||
## Again, pretty straight-forward, we just send a message on the connection.
|
||||
##
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
testProto = TestProto.new()
|
||||
|
||||
@@ -108,7 +108,7 @@ type
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
return MetricList.decode(protobuf).tryGet()
|
||||
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||
let metricCount = rng[].generate(uint32) mod 16
|
||||
|
||||
@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||
## create a bootnode, and then every peer will advertise itself on the
|
||||
## bootnode, and use it to find other peers
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let bootNode = createSwitch()
|
||||
await bootNode.start()
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
defer: await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
|
||||
@@ -12,12 +12,12 @@ requires "nim >= 1.6.0",
|
||||
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
"chronicles >= 0.10.2",
|
||||
"chronos >= 3.0.6",
|
||||
"chronos >= 4.0.0",
|
||||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & <= 0.1.0"
|
||||
"unittest2"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
|
||||
@@ -25,7 +25,7 @@ import
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
|
||||
@@ -59,6 +59,7 @@ type
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
observedAddrManager: ObservedAddrManager
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
@@ -121,8 +122,12 @@ proc withMplex*(
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||
b
|
||||
|
||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
||||
proc withYamux*(b: SwitchBuilder,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer =
|
||||
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
|
||||
|
||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||
@@ -201,6 +206,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
|
||||
b.observedAddrManager = observedAddrManager
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
|
||||
@@ -223,8 +232,13 @@ proc build*(b: SwitchBuilder): Switch
|
||||
protoVersion = b.protoVersion,
|
||||
agentVersion = b.agentVersion)
|
||||
|
||||
let identify =
|
||||
if b.observedAddrManager != nil:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||
else:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
|
||||
let
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
@@ -277,23 +291,24 @@ proc build*(b: SwitchBuilder): Switch
|
||||
return switch
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
SecureProtocol.Noise,
|
||||
],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000
|
||||
): Switch {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -261,12 +261,6 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
|
||||
## Write CID value ``cid`` to buffer ``vb``.
|
||||
vb.writeArray(cid.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
cid: Cid): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``cid`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
|
||||
|
||||
proc hash*(cid: Cid): Hash {.inline.} =
|
||||
hash(cid.data.buffer)
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
event: ConnEvent) {.async.} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
event: PeerEvent) {.async.} =
|
||||
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
@@ -311,12 +311,14 @@ proc storeMuxer*(c: ConnManager,
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
assert muxer notin c.muxed.getOrDefault(peerId)
|
||||
|
||||
let
|
||||
newPeer = peerId notin c.muxed
|
||||
assert newPeer or c.muxed[peerId].len > 0
|
||||
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
|
||||
var newPeer = false
|
||||
c.muxed.withValue(peerId, muxers):
|
||||
doAssert muxers[].len > 0
|
||||
doAssert muxer notin muxers[]
|
||||
muxers[].add(muxer)
|
||||
do:
|
||||
c.muxed[peerId] = @[muxer]
|
||||
newPeer = true
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
|
||||
asyncSpawn c.triggerConnEvent(
|
||||
@@ -379,7 +381,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
muxer: Muxer): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
@@ -387,7 +389,7 @@ proc getStream*(c: ConnManager,
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: PeerId): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
@@ -395,7 +397,7 @@ proc getStream*(c: ConnManager,
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
|
||||
@@ -553,7 +553,7 @@ proc getSocket(pattern: string,
|
||||
closeSocket(sock)
|
||||
|
||||
# This is forward declaration needed for newDaemonApi()
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||
|
||||
proc copyEnv(): StringTableRef =
|
||||
## This procedure copy all environment variables into StringTable.
|
||||
@@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||
|
||||
# Starting daemon process
|
||||
# echo "Starting ", cmd, " ", args.join(" ")
|
||||
api.process =
|
||||
api.process =
|
||||
exceptionToAssert:
|
||||
startProcess(cmd, "", args, env, {poParentStreams})
|
||||
# Waiting until daemon will not be bound to control socket.
|
||||
@@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
var value: seq[byte]
|
||||
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||
|
||||
|
||||
return initProtoBuffer(value)
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
|
||||
@@ -247,7 +247,7 @@ proc toString*(msg: ProtoMessage, dump = true): string =
|
||||
else: "[REMOTE]"
|
||||
local & direction & remote
|
||||
let seqid = block:
|
||||
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
|
||||
msg.seqID.withValue(seqid): "seqID = " & $seqid & " "
|
||||
else: ""
|
||||
let mtype = block:
|
||||
msg.mtype.withValue(typ): "type = " & $typ & " "
|
||||
|
||||
@@ -26,7 +26,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
dir = Direction.Out) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
@@ -53,7 +53,7 @@ proc dialAndUpgrade(
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
for transport in self.transports: # for each transport
|
||||
@@ -75,15 +75,19 @@ proc dialAndUpgrade(
|
||||
|
||||
let mux =
|
||||
try:
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||
# The if below is more general and might handle other use cases in the future.
|
||||
if dialed.dir != dir:
|
||||
dialed.dir = dir
|
||||
await transport.upgrade(dialed, peerId)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
debug "Connection upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if exc isnot CancelledError:
|
||||
if upgradeDir == Direction.Out:
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
@@ -91,7 +95,7 @@ proc dialAndUpgrade(
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
@@ -128,7 +132,7 @@ proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||
@@ -146,7 +150,7 @@ proc dialAndUpgrade(
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
@@ -164,7 +168,7 @@ proc internalConnect(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
@@ -182,7 +186,7 @@ proc internalConnect(
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
@@ -209,7 +213,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
dir = Direction.Out) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -217,7 +221,7 @@ method connect*(
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
|
||||
@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
|
||||
# sadly nim needs more love for hygienic templates
|
||||
# so here goes the macro, its based on the proc/template version
|
||||
# and uses quote do so it's quite readable
|
||||
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# TODO https://github.com/nim-lang/Nim/issues/22936
|
||||
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
let nexclude = exclude.len
|
||||
case nexclude
|
||||
of 0:
|
||||
@@ -44,28 +45,6 @@ macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
debug "A future has failed, enable trace logging for details", error=exc.name
|
||||
trace "Exception details", msg=exc.msg
|
||||
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
|
||||
return call()
|
||||
|
||||
template tryAndWarn*(message: static[string]; body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
|
||||
@@ -119,23 +119,46 @@ proc ip6VB(vb: var VBuffer): bool =
|
||||
if vb.readArray(a.address_v6) == 16:
|
||||
result = true
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
template pathStringToBuffer(s: string, vb: var VBuffer): bool =
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToString(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathValidateBuffer(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
template pathValidateBufferNoSlash(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc ip6zoneBtS(vb: var VBuffer, s: var string): bool =
|
||||
## IPv6 bufferToString() implementation.
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
@@ -154,7 +177,8 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Port number bufferToString() implementation.
|
||||
var port: array[2, byte]
|
||||
if vb.readArray(port) == 2:
|
||||
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
let nport =
|
||||
(safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
s = $nport
|
||||
result = true
|
||||
|
||||
@@ -214,7 +238,8 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[12, byte]
|
||||
if vb.readArray(buf) == 12:
|
||||
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
let nport =
|
||||
(safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 9))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -248,7 +273,8 @@ proc onion3BtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[37, byte]
|
||||
if vb.readArray(buf) == 37:
|
||||
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
var nport =
|
||||
(safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 34))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -262,40 +288,27 @@ proc onion3VB(vb: var VBuffer): bool =
|
||||
|
||||
proc unixStB(s: string, vb: var VBuffer): bool =
|
||||
## Unix socket name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc unixBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Unix socket name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc unixVB(vb: var VBuffer): bool =
|
||||
## Unix socket name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc dnsStB(s: string, vb: var VBuffer): bool =
|
||||
## DNS name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc dnsBtS(vb: var VBuffer, s: var string): bool =
|
||||
## DNS name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc dnsVB(vb: var VBuffer): bool =
|
||||
## DNS name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc mapEq*(codec: string): MaPattern =
|
||||
## ``Equal`` operator for pattern
|
||||
@@ -398,6 +411,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||
coder: TranscoderIP6Zone
|
||||
@@ -657,7 +673,8 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
inc(offset)
|
||||
ok(res)
|
||||
|
||||
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
proc getParts[U, V](ma: MultiAddress,
|
||||
slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
template normalizeIndex(index): int =
|
||||
@@ -671,7 +688,8 @@ proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddres
|
||||
? res.append(? ma[i])
|
||||
ok(res)
|
||||
|
||||
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
proc `[]`*(ma: MultiAddress,
|
||||
i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
## Returns part with index ``i`` of MultiAddress ``ma``.
|
||||
when i is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
@@ -766,7 +784,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
if not proto.coder.bufferToString(vb.data, part):
|
||||
return err("multiaddress: Decoding protocol error")
|
||||
parts.add($(proto.mcodec))
|
||||
if proto.kind == Path and part[0] == '/':
|
||||
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
|
||||
parts.add(part[1..^1])
|
||||
else:
|
||||
parts.add(part)
|
||||
@@ -955,7 +973,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
||||
## Initialize empty MultiAddress.
|
||||
result.data = initVBuffer()
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
||||
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
|
||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
@@ -1117,16 +1135,20 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
value = MultiAddress.init(buffer).valueOr:
|
||||
return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
|
||||
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
|
||||
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
|
||||
## Read repeated field from protobuf message. ``field`` is field number.
|
||||
## If the message is malformed, an error is returned. If field is not present
|
||||
## in message, then ``ok(false)`` is returned and value is empty. If field is
|
||||
## present, but no items could be parsed, then
|
||||
## ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is
|
||||
## returned and value contains the parsed values.
|
||||
var items: seq[seq[byte]]
|
||||
value.setLen(0)
|
||||
let res = ? pb.getRepeatedField(field, items)
|
||||
|
||||
@@ -193,6 +193,7 @@ const MultiCodecList = [
|
||||
("https", 0x01BB),
|
||||
("tls", 0x01C0),
|
||||
("quic", 0x01CC),
|
||||
("quic-v1", 0x01CD),
|
||||
("ws", 0x01DD),
|
||||
("wss", 0x01DE),
|
||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -45,15 +45,18 @@ proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
)
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect failed, malformed message")
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]):
|
||||
Future[string] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "initiating handshake", conn, codec = Codec
|
||||
## select a remote protocol
|
||||
await conn.writeLp(Codec & "\n") # write handshake
|
||||
@@ -66,7 +69,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
|
||||
if s != Codec:
|
||||
notice "handshake failed", conn, codec = s
|
||||
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
|
||||
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
|
||||
else:
|
||||
trace "multistream handshake success", conn
|
||||
|
||||
@@ -98,19 +101,29 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
# No alternatives, fail
|
||||
return ""
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string): Future[bool] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
if proto.len > 0:
|
||||
return (await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
(await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
else:
|
||||
return (await MultistreamSelect.select(conn, @[])) == Codec
|
||||
(await MultistreamSelect.select(conn, @[])) == Codec
|
||||
|
||||
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
||||
proc select*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError], raw: true).} =
|
||||
m.select(conn, "")
|
||||
|
||||
proc list*(m: MultistreamSelect,
|
||||
conn: Connection): Future[seq[string]] {.async.} =
|
||||
proc list*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[seq[string]] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
## list remote protos requests on connection
|
||||
if not await m.select(conn):
|
||||
return
|
||||
@@ -126,12 +139,13 @@ proc list*(m: MultistreamSelect,
|
||||
result = list
|
||||
|
||||
proc handle*(
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
@@ -140,8 +154,8 @@ proc handle*(
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
debug "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
@@ -172,14 +186,16 @@ proc handle*(
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
proc handle*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection,
|
||||
active: bool = false) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
if not isNil(h.match):
|
||||
if h.match != nil:
|
||||
matchers.add(h.match)
|
||||
for proto in h.protos:
|
||||
protos.add(proto)
|
||||
@@ -187,12 +203,13 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
||||
try:
|
||||
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
|
||||
var protocolHolder = h
|
||||
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >=
|
||||
maxIncomingStreams:
|
||||
debug "Max streams for protocol reached, blocking new stream",
|
||||
conn, protocol = ms, maxIncomingStreams
|
||||
return
|
||||
@@ -243,8 +260,32 @@ proc addHandler*(m: MultistreamSelect,
|
||||
protocol: protocol,
|
||||
match: matcher))
|
||||
|
||||
proc start*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.start()))
|
||||
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
handlers = m.handlers
|
||||
futs = handlers.mapIt(it.protocol.start())
|
||||
try:
|
||||
await allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
except CancelledError as exc:
|
||||
var pending: seq[Future[void].Raising([])]
|
||||
for i, fut in futs:
|
||||
if not fut.finished:
|
||||
pending.add noCancel fut.cancelAndWait()
|
||||
elif fut.completed:
|
||||
pending.add handlers[i].protocol.stop()
|
||||
else:
|
||||
static: doAssert typeof(fut).E is (CancelledError,)
|
||||
await noCancel allFutures(pending)
|
||||
raise exc
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.stop()))
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
|
||||
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.stop()
|
||||
await noCancel allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -42,7 +42,10 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||
newException(InvalidMplexMsgType, "invalid message type")
|
||||
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
proc readMsg*(
|
||||
conn: Connection
|
||||
): Future[Msg] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let header = await conn.readVarint()
|
||||
trace "read header varint", varint = header, conn
|
||||
|
||||
@@ -55,10 +58,13 @@ proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
|
||||
return (header shr 3, MessageType(msgType), data)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
var
|
||||
left = data.len
|
||||
offset = 0
|
||||
@@ -84,8 +90,11 @@ proc writeMsg*(conn: Connection,
|
||||
# message gets written before some of the chunks
|
||||
conn.write(buf.buffer)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
conn.writeMsg(id, msgType, data.toBytes())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -28,7 +28,8 @@ when defined(libp2p_mplex_metrics):
|
||||
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
||||
declareCounter libp2p_protocols_bytes,
|
||||
"total sent or received bytes", ["protocol", "direction"]
|
||||
|
||||
## Channel half-closed states
|
||||
##
|
||||
@@ -64,16 +65,16 @@ type
|
||||
|
||||
func shortLog*(s: LPChannel): auto =
|
||||
try:
|
||||
if s.isNil: "LPChannel(nil)"
|
||||
if s == nil: "LPChannel(nil)"
|
||||
elif s.name != $s.oid and s.name.len > 0:
|
||||
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
|
||||
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(LPChannel): shortLog(it)
|
||||
|
||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Opening channel", s, conn = s.conn
|
||||
if s.conn.isClosed:
|
||||
return
|
||||
@@ -82,20 +83,20 @@ proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
s.isOpen = true
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
|
||||
## Channels may be closed for reading and writing in any order - we'll close
|
||||
## the underlying bufferstream when both directions are closed
|
||||
if s.closedLocal and s.atEof():
|
||||
await procCall BufferStream(s).close()
|
||||
|
||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc reset*(s: LPChannel) {.async: (raises: []).} =
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
@@ -108,22 +109,21 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
# If the connection is still active, notify the other end
|
||||
proc resetMessage() {.async.} =
|
||||
proc resetMessage() {.async: (raises: []).} =
|
||||
try:
|
||||
trace "sending reset message", s, conn = s.conn
|
||||
await s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except CatchableError as exc:
|
||||
# No cancellations
|
||||
await s.conn.close()
|
||||
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except LPStreamError as exc:
|
||||
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
||||
await s.conn.close()
|
||||
|
||||
asyncSpawn resetMessage()
|
||||
|
||||
await s.closeImpl() # noraises, nocancels
|
||||
await s.closeImpl()
|
||||
|
||||
trace "Channel reset", s
|
||||
|
||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
method close*(s: LPChannel) {.async: (raises: []).} =
|
||||
## Close channel for writing - a message will be sent to the other peer
|
||||
## informing them that the channel is closed and that we're waiting for
|
||||
## their acknowledgement.
|
||||
@@ -137,10 +137,9 @@ method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
try:
|
||||
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
||||
except CancelledError as exc:
|
||||
except CancelledError:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
# It's harmless that close message cannot be sent - the connection is
|
||||
# likely down already
|
||||
await s.conn.close()
|
||||
@@ -154,16 +153,17 @@ method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting LPChannel", s
|
||||
s.reset()
|
||||
|
||||
procCall BufferStream(s).initStream()
|
||||
|
||||
method readOnce*(s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Mplex relies on reading being done regularly from every channel, or all
|
||||
## channels are blocked - in particular, this means that reading from one
|
||||
## channel must not be done from within a callback / read handler of another
|
||||
@@ -186,15 +186,19 @@ method readOnce*(s: LPChannel,
|
||||
if bytes == 0:
|
||||
await s.closeUnderlying()
|
||||
return bytes
|
||||
except CatchableError as exc:
|
||||
# readOnce in BufferStream generally raises on EOF or cancellation - for
|
||||
# the former, resetting is harmless, for the latter it's necessary because
|
||||
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
||||
# use the channel any more
|
||||
except CancelledError as exc:
|
||||
await s.reset()
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
# Resetting is necessary because data has been lost in s.readBuf and
|
||||
# there's no way to gracefully recover / use the channel any more
|
||||
await s.reset()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
|
||||
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
proc prepareWrite(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
# prepareWrite is the slow path of writing a message - see conditions in
|
||||
# write
|
||||
if s.remoteReset:
|
||||
@@ -222,7 +226,10 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
await s.conn.writeMsg(s.id, s.msgCode, msg)
|
||||
|
||||
proc completeWrite(
|
||||
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
|
||||
s: LPChannel,
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
||||
msgLen: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
try:
|
||||
s.writes += 1
|
||||
|
||||
@@ -235,7 +242,10 @@ proc completeWrite(
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
|
||||
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
|
||||
# https://github.com/status-im/nim-metrics/issues/79
|
||||
libp2p_protocols_bytes.inc(
|
||||
msgLen.int64, labelValues = [s.protocol, "out"])
|
||||
|
||||
s.activity = true
|
||||
except CancelledError as exc:
|
||||
@@ -247,7 +257,7 @@ proc completeWrite(
|
||||
raise exc
|
||||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
@@ -255,7 +265,11 @@ proc completeWrite(
|
||||
finally:
|
||||
s.writes -= 1
|
||||
|
||||
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to mplex channel - there may be up to MaxWrite concurrent writes
|
||||
## pending after which the peer is disconnected
|
||||
|
||||
@@ -276,13 +290,12 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method getWrapped*(s: LPChannel): Connection = s.conn
|
||||
|
||||
proc init*(
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
let chann = L(
|
||||
id: id,
|
||||
name: name,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -56,7 +56,7 @@ proc newTooManyChannels(): ref TooManyChannels =
|
||||
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
|
||||
newException(InvalidChannelIdError, "max allowed channel count exceeded")
|
||||
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
|
||||
## remove the local channel from the internal tables
|
||||
##
|
||||
try:
|
||||
@@ -68,19 +68,19 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
libp2p_mplex_channels.set(
|
||||
m.channels[chann.initiator].len.int64,
|
||||
labelValues = [$chann.initiator, $m.connection.peerId])
|
||||
except CatchableError as exc:
|
||||
except CancelledError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||
|
||||
proc newStreamInternal*(m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel
|
||||
{.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
proc newStreamInternal*(
|
||||
m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
## create new channel/stream
|
||||
##
|
||||
let id = if initiator:
|
||||
m.currentId.inc(); m.currentId
|
||||
let id =
|
||||
if initiator: m.currentId.inc(); m.currentId
|
||||
else: chanId
|
||||
|
||||
if id in m.channels[initiator]:
|
||||
@@ -111,18 +111,14 @@ proc newStreamInternal*(m: Mplex,
|
||||
m.channels[initiator].len.int64,
|
||||
labelValues = [$initiator, $m.connection.peerId])
|
||||
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||
await chann.reset()
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
method handle*(m: Mplex) {.async: (raises: []).} =
|
||||
trace "Starting mplex handler", m
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -150,7 +146,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
else:
|
||||
if m.channels[false].len > m.maxChannCount - 1:
|
||||
warn "too many channels created by remote peer",
|
||||
allowedMax = MaxChannelCount, m
|
||||
allowedMax = MaxChannelCount, m
|
||||
raise newTooManyChannels()
|
||||
|
||||
let name = string.fromBytes(data)
|
||||
@@ -159,59 +155,65 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
trace "Processing channel message", m, channel, data = data.shortLog
|
||||
|
||||
case msgType:
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
|
||||
if not isNil(m.streamHandler):
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
if m.streamHandler != nil:
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
except CancelledError:
|
||||
debug "Unexpected cancellation in mplex handler", m
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", m, msg = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in mplex read loop", m, msg = exc.msg
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in mplex read loop", m, msg = exc.msg
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped mplex handler", m
|
||||
|
||||
proc new*(M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
proc new*(
|
||||
M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
M(connection: conn,
|
||||
inChannTimeout: inTimeout,
|
||||
outChannTimeout: outTimeout,
|
||||
oid: genOid(),
|
||||
maxChannCount: maxChannCount)
|
||||
|
||||
method newStream*(m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
method newStream*(
|
||||
m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||
|
||||
if not lazy:
|
||||
@@ -219,7 +221,7 @@ method newStream*(m: Mplex,
|
||||
|
||||
return Connection(channel)
|
||||
|
||||
method close*(m: Mplex) {.async, gcsafe.} =
|
||||
method close*(m: Mplex) {.async: (raises: []).} =
|
||||
if m.isClosed:
|
||||
trace "Already closed", m
|
||||
return
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,16 +23,17 @@ type
|
||||
MuxerError* = object of LPError
|
||||
TooManyChannels* = object of MuxerError
|
||||
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
handler*: Future[void]
|
||||
handler*: Future[void].Raising([])
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
MuxerConstructor* =
|
||||
proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
|
||||
# this wraps a creator proc that knows how to make muxers
|
||||
MuxerProvider* = object
|
||||
@@ -40,24 +41,32 @@ type
|
||||
codec*: string
|
||||
|
||||
func shortLog*(m: Muxer): auto =
|
||||
if isNil(m): "nil"
|
||||
if m == nil: "nil"
|
||||
else: shortLog(m.connection)
|
||||
|
||||
chronicles.formatIt(Muxer): shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
if not isNil(m.connection):
|
||||
method newStream*(
|
||||
m: Muxer,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.base, async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError], raw: true).} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} = discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -22,18 +22,22 @@ logScope:
|
||||
const
|
||||
YamuxCodec* = "/yamux/1.0.0"
|
||||
YamuxVersion = 0.uint8
|
||||
DefaultWindowSize = 256000
|
||||
YamuxDefaultWindowSize* = 256000
|
||||
MaxSendQueueSize = 256000
|
||||
MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
|
||||
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareGauge libp2p_yamux_channels,
|
||||
"yamux channels", labels = ["initiator", "peer"]
|
||||
declareHistogram libp2p_yamux_send_queue,
|
||||
"message send queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue,
|
||||
"message recv queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
|
||||
type
|
||||
YamuxError* = object of CatchableError
|
||||
YamuxError* = object of MuxerError
|
||||
|
||||
MsgType = enum
|
||||
Data = 0x0
|
||||
@@ -59,7 +63,10 @@ type
|
||||
streamId: uint32
|
||||
length: uint32
|
||||
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
proc readHeader(
|
||||
conn: LPStream
|
||||
): Future[YamuxHeader] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
var buffer: array[12, byte]
|
||||
await conn.readExactly(addr buffer[0], 12)
|
||||
|
||||
@@ -73,10 +80,10 @@ proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
return result
|
||||
|
||||
proc `$`(header: YamuxHeader): string =
|
||||
result = "{" & $header.msgType & ", "
|
||||
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
|
||||
result &= "streamId: " & $header.streamId & ", "
|
||||
result &= "length: " & $header.length & "}"
|
||||
"{" & $header.msgType & ", " &
|
||||
"{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, " &
|
||||
"streamId: " & $header.streamId & ", " &
|
||||
"length: " & $header.length & "}"
|
||||
|
||||
proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[0] = header.version
|
||||
@@ -85,10 +92,14 @@ proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[4..7] = toBytesBE(header.streamId)
|
||||
result[8..11] = toBytesBE(header.length)
|
||||
|
||||
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
|
||||
proc write(
|
||||
conn: LPStream,
|
||||
header: YamuxHeader
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
trace "write directly on stream", h = $header
|
||||
var buffer = header.encode()
|
||||
return conn.write(@buffer)
|
||||
conn.write(@buffer)
|
||||
|
||||
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
|
||||
T(
|
||||
@@ -106,11 +117,10 @@ proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
|
||||
)
|
||||
|
||||
proc data(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.Data,
|
||||
@@ -120,11 +130,10 @@ proc data(
|
||||
)
|
||||
|
||||
proc windowUpdate(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.WindowUpdate,
|
||||
@@ -137,12 +146,13 @@ type
|
||||
ToSend = tuple
|
||||
data: seq[byte]
|
||||
sent: int
|
||||
fut: Future[void]
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError])
|
||||
YamuxChannel* = ref object of Connection
|
||||
id: uint32
|
||||
recvWindow: int
|
||||
sendWindow: int
|
||||
maxRecvWindow: int
|
||||
maxSendQueueSize: int
|
||||
conn: Connection
|
||||
isSrc: bool
|
||||
opened: bool
|
||||
@@ -151,7 +161,7 @@ type
|
||||
recvQueue: seq[byte]
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: Future[void]
|
||||
closedRemotely: Future[void].Raising([])
|
||||
closedLocally: bool
|
||||
receivedData: AsyncEvent
|
||||
returnedEof: bool
|
||||
@@ -160,7 +170,7 @@ proc `$`(channel: YamuxChannel): string =
|
||||
result = if channel.conn.dir == Out: "=> " else: "<= "
|
||||
result &= $channel.id
|
||||
var s: seq[string] = @[]
|
||||
if channel.closedRemotely.done():
|
||||
if channel.closedRemotely.completed():
|
||||
s.add("ClosedRemotely")
|
||||
if channel.closedLocally:
|
||||
s.add("ClosedLocally")
|
||||
@@ -169,29 +179,44 @@ proc `$`(channel: YamuxChannel): string =
|
||||
if s.len > 0:
|
||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||
|
||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
||||
for (elem, sent, _) in channel.sendQueue:
|
||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
||||
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent
|
||||
##
|
||||
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent, but limit the size of big messages.
|
||||
##
|
||||
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||
# This value is arbitrary, it's not in the specs, it permits to store up to
|
||||
# 3 big messages if the peer is stalling.
|
||||
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||
channel.closedRemotely.done():
|
||||
channel.closedRemotely.completed():
|
||||
await procCall Connection(channel).closeImpl()
|
||||
|
||||
proc remoteClosed(channel: YamuxChannel) {.async.} =
|
||||
if not channel.closedRemotely.done():
|
||||
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedRemotely.completed():
|
||||
channel.closedRemotely.complete()
|
||||
await channel.actuallyClose()
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedLocally:
|
||||
channel.closedLocally = true
|
||||
channel.isEof = true
|
||||
|
||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
if not channel.isReset and channel.sendQueue.len == 0:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.actuallyClose()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
proc reset(
|
||||
channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
# it receives the reset.
|
||||
if channel.isReset:
|
||||
return
|
||||
trace "Reset channel"
|
||||
@@ -203,20 +228,24 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal:
|
||||
if isLocal and not channel.isSending:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||
except LPStreamEOFError as exc: discard
|
||||
except LPStreamClosedError as exc: discard
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.close()
|
||||
if not channel.closedRemotely.done():
|
||||
if not channel.closedRemotely.completed():
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
if not isLocal:
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||
# If the reset is remote, there's no reason to flush anything.
|
||||
channel.recvWindow = 0
|
||||
|
||||
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
proc updateRecvWindow(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Send to the peer a window update when the recvWindow is empty enough
|
||||
##
|
||||
# In order to avoid spamming a window update everytime a byte is read,
|
||||
# we send it everytime half of the maxRecvWindow is read.
|
||||
let inWindow = channel.recvWindow + channel.recvQueue.len
|
||||
if inWindow > channel.maxRecvWindow div 2:
|
||||
return
|
||||
@@ -230,13 +259,15 @@ proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
method readOnce*(
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Read from a yamux channel
|
||||
|
||||
if channel.isReset:
|
||||
raise if channel.remoteReset:
|
||||
raise
|
||||
if channel.remoteReset:
|
||||
newLPStreamResetError()
|
||||
elif channel.closedLocally:
|
||||
newLPStreamClosedError()
|
||||
@@ -246,15 +277,19 @@ method readOnce*(
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
channel.receivedData.clear()
|
||||
await channel.closedRemotely or channel.receivedData.wait()
|
||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(channel.closedRemotely, channel.receivedData.wait())
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if channel.closedRemotely.completed() and channel.recvQueue.len == 0:
|
||||
channel.returnedEof = true
|
||||
channel.isEof = true
|
||||
return 0
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead..^1]
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
@@ -262,7 +297,9 @@ method readOnce*(
|
||||
channel.activity = true
|
||||
return toRead
|
||||
|
||||
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel,
|
||||
b: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.receivedData.fire()
|
||||
@@ -273,26 +310,27 @@ proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
|
||||
channel.maxRecvWindow = maxRecvWindow
|
||||
|
||||
proc trySend(channel: YamuxChannel) {.async.} =
|
||||
proc trySend(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if channel.isSending:
|
||||
return
|
||||
channel.isSending = true
|
||||
defer: channel.isSending = false
|
||||
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
if channel.sendWindow == 0:
|
||||
trace "send window empty"
|
||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
||||
currentQueueSize = channel.sendQueueBytes(true)
|
||||
try:
|
||||
await channel.reset(true)
|
||||
except CatchableError as exc:
|
||||
debug "failed to reset", msg=exc.msg
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
trace "channel send queue too big, resetting",
|
||||
maxSendQueueSize = channel.maxSendQueueSize,
|
||||
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||
await channel.reset(isLocal = true)
|
||||
break
|
||||
|
||||
let
|
||||
bytesAvailable = channel.sendQueueBytes()
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
@@ -305,22 +343,33 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
|
||||
sendBuffer[0..<12] = header.encode()
|
||||
|
||||
var futures: seq[Future[void]]
|
||||
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
|
||||
while inBuffer < toSend:
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
|
||||
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(fut)
|
||||
channel.sendQueue.delete(0)
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
|
||||
trace "try to send the buffer", h = $header
|
||||
channel.sendWindow.dec(toSend)
|
||||
try: await channel.conn.write(sendBuffer)
|
||||
except CatchableError as exc:
|
||||
try:
|
||||
await channel.conn.write(sendBuffer)
|
||||
except CancelledError:
|
||||
trace "cancelled sending the buffer"
|
||||
for fut in futures.items():
|
||||
fut.cancelSoon()
|
||||
await channel.reset()
|
||||
break
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send the buffer"
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
fut.fail(connDown)
|
||||
@@ -330,7 +379,13 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
fut.complete()
|
||||
channel.activity = true
|
||||
|
||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
channel: YamuxChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to yamux channel
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
result.fail(newLPStreamResetError())
|
||||
@@ -343,15 +398,22 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
|
||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
proc open(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
##
|
||||
if channel.opened:
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||
{if channel.isSrc: Syn else: Ack}))
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
|
||||
@@ -362,48 +424,73 @@ type
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
windowSize: int
|
||||
maxSendQueueSize: int
|
||||
inTimeout: Duration
|
||||
outTimeout: Duration
|
||||
|
||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||
for v in m.channels.values():
|
||||
if v.isSrc == isSrc: result += 1
|
||||
|
||||
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
await channel.join()
|
||||
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
try:
|
||||
await channel.join()
|
||||
except CancelledError:
|
||||
discard
|
||||
m.channels.del(channel.id)
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
libp2p_yamux_channels.set(
|
||||
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
if channel.isReset and channel.recvWindow > 0:
|
||||
m.flushed[channel.id] = channel.recvWindow
|
||||
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
result = YamuxChannel(
|
||||
proc createStream(
|
||||
m: Yamux, id: uint32, isSrc: bool,
|
||||
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
|
||||
# During initialization, recvWindow can be larger than maxRecvWindow.
|
||||
# This is because the peer we're connected to will always assume
|
||||
# that the initial recvWindow is 256k.
|
||||
# To solve this contradiction, no updateWindow will be sent until
|
||||
# recvWindow is less than maxRecvWindow
|
||||
proc newClosedRemotelyFut(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
newFuture[void]()
|
||||
var stream = YamuxChannel(
|
||||
id: id,
|
||||
maxRecvWindow: DefaultWindowSize,
|
||||
recvWindow: DefaultWindowSize,
|
||||
sendWindow: DefaultWindowSize,
|
||||
maxRecvWindow: recvWindow,
|
||||
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||
sendWindow: YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
isSrc: isSrc,
|
||||
conn: m.connection,
|
||||
receivedData: newAsyncEvent(),
|
||||
closedRemotely: newFuture[void]()
|
||||
closedRemotely: newClosedRemotelyFut()
|
||||
)
|
||||
result.objName = "YamuxStream"
|
||||
result.dir = if isSrc: Direction.Out else: Direction.In
|
||||
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
result.reset()
|
||||
result.initStream()
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
stream.objName = "YamuxStream"
|
||||
if isSrc:
|
||||
stream.dir = Direction.Out
|
||||
stream.timeout = m.outTimeout
|
||||
else:
|
||||
stream.dir = Direction.In
|
||||
stream.timeout = m.inTimeout
|
||||
stream.timeoutHandler =
|
||||
proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
stream.reset(isLocal = true)
|
||||
stream.initStream()
|
||||
stream.peerId = m.connection.peerId
|
||||
stream.observedAddr = m.connection.observedAddr
|
||||
stream.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = result
|
||||
asyncSpawn m.cleanupChann(result)
|
||||
stream.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = stream
|
||||
asyncSpawn m.cleanupChannel(stream)
|
||||
trace "created channel", id, pid=m.connection.peerId
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
|
||||
return stream
|
||||
|
||||
method close*(m: Yamux) {.async.} =
|
||||
method close*(m: Yamux) {.async: (raises: []).} =
|
||||
if m.isClosed == true:
|
||||
trace "Already closed"
|
||||
return
|
||||
@@ -412,24 +499,21 @@ method close*(m: Yamux) {.async.} =
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(true)
|
||||
await channel.reset(isLocal = true)
|
||||
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||
except CancelledError as exc: trace "cancelled sending goAway", msg = exc.msg
|
||||
except LPStreamError as exc: trace "failed to send goAway", msg = exc.msg
|
||||
await m.connection.close()
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## Call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in yamux stream handler", msg = exc.msg
|
||||
await channel.reset()
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Starting yamux handler", pid=m.connection.peerId
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -453,28 +537,39 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
else:
|
||||
if header.streamId in m.flushed:
|
||||
m.flushed.del(header.streamId)
|
||||
|
||||
if header.streamId mod 2 == m.currentId mod 2:
|
||||
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
|
||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||
let newStream = m.createStream(header.streamId, false)
|
||||
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||
if m.channels.len >= m.maxChannCount:
|
||||
await newStream.reset()
|
||||
continue
|
||||
await newStream.open()
|
||||
asyncSpawn m.handleStream(newStream)
|
||||
elif header.streamId notin m.channels:
|
||||
if header.streamId notin m.flushed:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
elif header.msgType == Data:
|
||||
# Flush the data
|
||||
m.flushed[header.streamId].dec(int(header.length))
|
||||
if m.flushed[header.streamId] < 0:
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
# Flush the data
|
||||
m.flushed.withValue(header.streamId, flushed):
|
||||
if header.msgType == Data:
|
||||
flushed[].dec(int(header.length))
|
||||
if flushed[] < 0:
|
||||
raise newException(YamuxError,
|
||||
"Peer exhausted the recvWindow after reset")
|
||||
do:
|
||||
raise newException(YamuxError,
|
||||
"Unknown stream ID: " & $header.streamId)
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(
|
||||
addr buffer[0], int(header.length))
|
||||
continue
|
||||
|
||||
let channel = m.channels[header.streamId]
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
raise newException(YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
channel.sendWindow += int(header.length)
|
||||
@@ -487,7 +582,7 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", msg=string.fromBytes(buffer)
|
||||
trace "Msg Rcv", msg=shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
if MsgFlags.Fin in header.flags:
|
||||
@@ -496,11 +591,24 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if MsgFlags.Rst in header.flags:
|
||||
trace "remote reset channel"
|
||||
await channel.reset()
|
||||
except CancelledError as exc:
|
||||
debug "Unexpected cancellation in yamux handler", msg = exc.msg
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
|
||||
except YamuxError as exc:
|
||||
trace "Closing yamux connection", error=exc.msg
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in yamux read loop", msg = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
@@ -509,21 +617,32 @@ method getStreams*(m: Yamux): seq[Connection] =
|
||||
for c in m.channels.values: result.add(c)
|
||||
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
if m.channels.len > m.maxChannCount - 1:
|
||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
let stream = m.createStream(m.currentId, true)
|
||||
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||
m.currentId += 2
|
||||
if not lazy:
|
||||
await stream.open()
|
||||
return stream
|
||||
|
||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
||||
proc new*(
|
||||
T: type[Yamux], conn: Connection,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: int = MaxSendQueueSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): T =
|
||||
T(
|
||||
connection: conn,
|
||||
currentId: if conn.dir == Out: 1 else: 2,
|
||||
maxChannCount: maxChannCount
|
||||
maxChannCount: maxChannCount,
|
||||
windowSize: windowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
inTimeout: inTimeout,
|
||||
outTimeout: outTimeout
|
||||
)
|
||||
|
||||
@@ -52,7 +52,7 @@ proc resolveOneAddress(
|
||||
ma: MultiAddress,
|
||||
domain: Domain = Domain.AF_UNSPEC,
|
||||
prefix = ""): Future[seq[MultiAddress]]
|
||||
{.async, raises: [MaError, TransportAddressError].} =
|
||||
{.async.} =
|
||||
#Resolve a single address
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
|
||||
@@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
|
||||
proc addressMapper(
|
||||
self: AutonatService,
|
||||
peerStore: PeerStore,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
@@ -179,7 +179,7 @@ proc addressMapper(
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
|
||||
@@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
|
||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||
debug "Dcutr initiator has directly connected to the remote peer."
|
||||
|
||||
@@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
||||
addrs.filterIt(TCP.match(it))
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
|
||||
var result = newSeq[MultiAddress]()
|
||||
for a in addrs:
|
||||
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
|
||||
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
|
||||
result.add(a[0..1].tryGet())
|
||||
return result
|
||||
|
||||
@@ -19,7 +19,6 @@ import ../../protocol,
|
||||
../../../switch,
|
||||
../../../utils/future
|
||||
|
||||
export DcutrError
|
||||
export chronicles
|
||||
|
||||
type Dcutr* = ref object of LPProtocol
|
||||
@@ -29,7 +28,7 @@ logScope:
|
||||
|
||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
|
||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
@@ -56,7 +55,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
|
||||
if peerDialableAddrs.len > maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(connectTimeout)
|
||||
debug "Dcutr receiver has directly connected to the remote peer."
|
||||
@@ -65,14 +64,14 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
|
||||
warn "Unexpected error when Dcutr receiver tried to connect " &
|
||||
"to the remote peer", msg = err.msg
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
|
||||
@@ -189,7 +189,7 @@ proc dialPeerV2*(
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
return conn
|
||||
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||
let src = msg.srcPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||
return
|
||||
@@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
|
||||
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
||||
else: await conn.close()
|
||||
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
|
||||
maxCircuitPerPeer: maxCircuitPerPeer,
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV1Codec: await cl.handleStreamV1(conn)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,11 +23,15 @@ type
|
||||
method readOnce*(
|
||||
self: RelayConnection,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[int] {.async.} =
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
self.activity = true
|
||||
return await self.conn.readOnce(pbytes, nbytes)
|
||||
self.conn.readOnce(pbytes, nbytes)
|
||||
|
||||
method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
method write*(
|
||||
self: RelayConnection,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
self.dataSent.inc(msg.len)
|
||||
if self.limitData != 0 and self.dataSent > self.limitData:
|
||||
await self.close()
|
||||
@@ -35,24 +39,25 @@ method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
self.activity = true
|
||||
await self.conn.write(msg)
|
||||
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async.} =
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async: (raises: []).} =
|
||||
await self.conn.close()
|
||||
await procCall Connection(self).closeImpl()
|
||||
|
||||
method getWrapped*(self: RelayConnection): Connection = self.conn
|
||||
|
||||
proc new*(
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
||||
rc.dir = conn.dir
|
||||
rc.initStream()
|
||||
if limitDuration > 0:
|
||||
proc checkDurationConnection() {.async.} =
|
||||
let sleep = sleepAsync(limitDuration.seconds())
|
||||
await sleep or conn.join()
|
||||
if sleep.finished: await conn.close()
|
||||
else: sleep.cancel()
|
||||
proc checkDurationConnection() {.async: (raises: []).} =
|
||||
try:
|
||||
await noCancel conn.join().wait(limitDuration.seconds())
|
||||
except AsyncTimeoutError:
|
||||
await conn.close()
|
||||
asyncSpawn checkDurationConnection()
|
||||
return rc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
|
||||
wrappedConn = wrappedConn.getWrapped()
|
||||
return false
|
||||
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
if conn.isRelayed():
|
||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||
await sendHopStatus(conn, PermissionDenied)
|
||||
@@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
proc handleConnect(r: Relay,
|
||||
connSrc: Connection,
|
||||
msg: HopMessage) {.async, gcsafe.} =
|
||||
msg: HopMessage) {.async.} =
|
||||
if connSrc.isRelayed():
|
||||
trace "connection attempt over relay connection"
|
||||
await sendHopStatus(connSrc, PermissionDenied)
|
||||
@@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
|
||||
await rconnDst.close()
|
||||
await bridge(rconnSrc, rconnDst)
|
||||
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
# Relay V1
|
||||
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
r.streamCount.inc()
|
||||
defer: r.streamCount.dec()
|
||||
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
||||
@@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
trace "relaying connection", src, dst
|
||||
await bridge(connSrc, connDst)
|
||||
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
||||
@@ -361,17 +361,25 @@ proc deletesReservation(r: Relay) {.async.} =
|
||||
if n > r.rsvp[k]:
|
||||
r.rsvp.del(k)
|
||||
|
||||
method start*(r: Relay) {.async.} =
|
||||
method start*(
|
||||
r: Relay
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not r.reservationLoop.isNil:
|
||||
warn "Starting relay twice"
|
||||
return
|
||||
return fut
|
||||
r.reservationLoop = r.deletesReservation()
|
||||
r.started = true
|
||||
fut
|
||||
|
||||
method stop*(r: Relay) {.async.} =
|
||||
method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if r.reservationLoop.isNil:
|
||||
warn "Stopping relay without starting it"
|
||||
return
|
||||
return fut
|
||||
r.started = false
|
||||
r.reservationLoop.cancel()
|
||||
r.reservationLoop = nil
|
||||
fut
|
||||
|
||||
@@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
||||
self.client.onNewConnection = proc(
|
||||
conn: Connection,
|
||||
duration: uint32 = 0,
|
||||
data: uint64 = 0) {.async, gcsafe, raises: [].} =
|
||||
data: uint64 = 0) {.async.} =
|
||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||
await conn.join()
|
||||
self.selfRunning = true
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting Relay transport"
|
||||
|
||||
method stop*(self: RelayTransport) {.async, gcsafe.} =
|
||||
method stop*(self: RelayTransport) {.async.} =
|
||||
self.running = false
|
||||
self.selfRunning = false
|
||||
self.client.onNewConnection = nil
|
||||
while not self.queue.empty():
|
||||
await self.queue.popFirstNoWait().close()
|
||||
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||
result = await self.queue.popFirst()
|
||||
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||
let
|
||||
sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
||||
@@ -90,7 +90,7 @@ method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
peerId.withValue(pid):
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -21,63 +21,77 @@ const
|
||||
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
||||
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
||||
|
||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
|
||||
proc sendStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV1
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
msg = RelayMessage(
|
||||
msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
|
||||
proc sendHopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||
proc sendStopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
|
||||
let
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
|
||||
proc bridge*(
|
||||
connSrc: Connection,
|
||||
connDst: Connection) {.async: (raises: [CancelledError]).} =
|
||||
const bufferSize = 4096
|
||||
var
|
||||
bufSrcToDst: array[bufferSize, byte]
|
||||
bufDstToSrc: array[bufferSize, byte]
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSendFromSrcToDst = 0
|
||||
bytesSendFromDstToSrc = 0
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
bytesSentFromSrcToDst = 0
|
||||
bytesSentFromDstToSrc = 0
|
||||
bufRead: int
|
||||
|
||||
try:
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
await futSrc or futDst
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
bytesSendFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0..<bufRead])
|
||||
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
bytesSentFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0 ..< bufRead])
|
||||
zeroMem(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
if futDst.finished():
|
||||
bufRead = await futDst
|
||||
if bufRead > 0:
|
||||
bytesSendFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0..<bufRead])
|
||||
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSentFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0 ..< bufRead])
|
||||
zeroMem(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
if connSrc.closed() or connSrc.atEof():
|
||||
trace "relay src closed connection", src = connSrc.peerId
|
||||
if connDst.closed() or connDst.atEof():
|
||||
trace "relay dst closed connection", dst = connDst.peerId
|
||||
trace "relay error", exc=exc.msg
|
||||
trace "end relaying", bytesSendFromSrcToDst, bytesSendFromDstToSrc
|
||||
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
|
||||
await futSrc.cancelAndWait()
|
||||
await futDst.cancelAndWait()
|
||||
|
||||
@@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
|
||||
../peerid,
|
||||
../crypto/crypto,
|
||||
../multiaddress,
|
||||
../multicodec,
|
||||
../protocols/protocol,
|
||||
../utility,
|
||||
../errors,
|
||||
@@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
|
||||
signedPeerRecord =
|
||||
# The SPR contains the same data as the identify message
|
||||
# would be cumbersome to log
|
||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
||||
if it.signedPeerRecord.isSome(): "Some"
|
||||
else: "None"
|
||||
|
||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||
@@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
|
||||
if ? pb.getField(6, agentVersion).toOpt():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
|
||||
debug "decodeMsg: decoded identify", iinfo
|
||||
Opt.some(iinfo)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Identify],
|
||||
peerInfo: PeerInfo,
|
||||
sendSignedPeerRecord = false
|
||||
sendSignedPeerRecord = false,
|
||||
observedAddrManager = ObservedAddrManager.new(),
|
||||
): T =
|
||||
let identify = T(
|
||||
peerInfo: peerInfo,
|
||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||
observedAddrManager: ObservedAddrManager.new(),
|
||||
observedAddrManager: observedAddrManager,
|
||||
)
|
||||
identify.init()
|
||||
identify
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling identify request", conn
|
||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||
@@ -168,7 +169,7 @@ method init*(p: Identify) =
|
||||
|
||||
proc identify*(self: Identify,
|
||||
conn: Connection,
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
|
||||
trace "initiating identify", conn
|
||||
var message = await conn.readLp(64*1024)
|
||||
if len(message) == 0:
|
||||
@@ -176,6 +177,7 @@ proc identify*(self: Identify,
|
||||
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
||||
|
||||
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify: decoded message", conn, info
|
||||
let
|
||||
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
||||
@@ -186,8 +188,12 @@ proc identify*(self: Identify,
|
||||
info.peerId = peer
|
||||
|
||||
info.observedAddr.withValue(observed):
|
||||
if not self.observedAddrManager.addObservation(observed):
|
||||
debug "Observed address is not valid", observedAddr = observed
|
||||
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
|
||||
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
|
||||
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
|
||||
trace "Not adding address to ObservedAddrManager.", observed
|
||||
elif not self.observedAddrManager.addObservation(observed):
|
||||
trace "Observed address is not valid.", observedAddr = observed
|
||||
return info
|
||||
|
||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||
@@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
||||
identifypush
|
||||
|
||||
proc init*(p: IdentifyPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
trace "handling identify push", conn
|
||||
try:
|
||||
var message = await conn.readLp(64*1024)
|
||||
|
||||
var identInfo = decodeMsg(message).valueOr:
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify push: decoded message", conn, identInfo
|
||||
|
||||
identInfo.pubkey.withValue(pubkey):
|
||||
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
||||
|
||||
@@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
|
||||
|
||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
var p = T()
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
var bytesRead = 0
|
||||
try:
|
||||
trace "Received benchmark performance check", conn
|
||||
|
||||
@@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
@@ -71,7 +71,7 @@ method init*(p: Ping) =
|
||||
proc ping*(
|
||||
p: Ping,
|
||||
conn: Connection,
|
||||
): Future[Duration] {.async, gcsafe, public.} =
|
||||
): Future[Duration] {.async, public.} =
|
||||
## Sends ping to `conn`, returns the delay
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -31,8 +31,19 @@ type
|
||||
maxIncomingStreams: Opt[int]
|
||||
|
||||
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
||||
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
||||
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
||||
|
||||
method start*(
|
||||
p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = true
|
||||
fut
|
||||
|
||||
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = false
|
||||
fut
|
||||
|
||||
proc maxIncomingStreams*(p: LPProtocol): int =
|
||||
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
|
||||
|
||||
@@ -157,7 +157,7 @@ method rpcHandler*(f: FloodSub,
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "Forwared message to peers", peers = toSendPeers.len
|
||||
|
||||
f.updateMetrics(rpcMsg)
|
||||
@@ -219,7 +219,7 @@ method publish*(f: FloodSub,
|
||||
return 0
|
||||
|
||||
# Try to send to all peers that are known to be interested
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -46,6 +46,9 @@ declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizat
|
||||
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
|
||||
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
|
||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
GossipSubParams(
|
||||
explicit: true,
|
||||
@@ -79,7 +82,6 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
disconnectBadPeers: false,
|
||||
enablePX: false,
|
||||
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
||||
iwantTimeout: 3 * GossipSubHeartbeatInterval,
|
||||
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit: false
|
||||
)
|
||||
@@ -218,6 +220,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
for topic, info in stats[].topicInfos.mpairs:
|
||||
info.firstMessageDeliveries = 0
|
||||
|
||||
pubSubPeer.stopSendNonPriorityTask()
|
||||
|
||||
procCall FloodSub(g).unsubscribePeer(peer)
|
||||
|
||||
proc handleSubscribe*(g: GossipSub,
|
||||
@@ -277,12 +281,28 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||
let messages = g.handleIWant(peer, control.iwant)
|
||||
|
||||
if
|
||||
respControl.prune.len > 0 or
|
||||
respControl.iwant.len > 0 or
|
||||
messages.len > 0:
|
||||
# iwant and prunes from here, also messages
|
||||
let
|
||||
isPruneNotEmpty = respControl.prune.len > 0
|
||||
isIWantNotEmpty = respControl.iwant.len > 0
|
||||
|
||||
if isPruneNotEmpty or isIWantNotEmpty:
|
||||
|
||||
if isIWantNotEmpty:
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
if isPruneNotEmpty:
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl)), isHighPriority = true)
|
||||
|
||||
if messages.len > 0:
|
||||
for smsg in messages:
|
||||
for topic in smsg.topicIds:
|
||||
if g.knownTopics.contains(topic):
|
||||
@@ -290,18 +310,11 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
# iwant replies have lower priority
|
||||
trace "sending iwant reply messages", peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl), messages: messages))
|
||||
RPCMsg(messages: messages), isHighPriority = false)
|
||||
|
||||
proc validateAndRelay(g: GossipSub,
|
||||
msg: Message,
|
||||
@@ -319,7 +332,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
of ValidationResult.Reject:
|
||||
debug "Dropping message after validation, reason: reject",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
return
|
||||
of ValidationResult.Ignore:
|
||||
debug "Dropping message after validation, reason: ignore",
|
||||
@@ -354,7 +367,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
if msg.data.len > msgId.len * 10:
|
||||
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIds: @[msgId])]
|
||||
))))
|
||||
))), isHighPriority = true)
|
||||
|
||||
for peer in toSendPeers:
|
||||
for heDontWant in peer.heDontWants:
|
||||
@@ -368,7 +381,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
@@ -385,7 +398,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
|
||||
# In this way we count even ignored fields by protobuf
|
||||
|
||||
var rmsg = rpcMsgOpt.valueOr:
|
||||
@@ -427,6 +440,11 @@ method rpcHandler*(g: GossipSub,
|
||||
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
||||
return
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rpcMsg.messages:
|
||||
for t in m.topicIds:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, t])
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
||||
|
||||
@@ -434,7 +452,7 @@ method rpcHandler*(g: GossipSub,
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping))
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
||||
peer.pingBudget.dec
|
||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
@@ -461,9 +479,6 @@ method rpcHandler*(g: GossipSub,
|
||||
let
|
||||
msgId = msgIdResult.get
|
||||
msgIdSalted = msgId & g.seenSalt
|
||||
g.outstandingIWANTs.withValue(msgId, iwantRequest):
|
||||
if iwantRequest.peer.peerId == peer.peerId:
|
||||
g.outstandingIWANTs.del(msgId)
|
||||
|
||||
# addSeen adds salt to msgId to avoid
|
||||
# remote attacking the hash function
|
||||
@@ -496,14 +511,14 @@ method rpcHandler*(g: GossipSub,
|
||||
# always validate if signature is present or required
|
||||
debug "Dropping message due to failed signature verification",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||
# if we have seqno should be 8 bytes long
|
||||
debug "Dropping message due to invalid seqno length",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
# g.anonymize needs no evaluation when receiving messages
|
||||
@@ -547,7 +562,7 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
|
||||
g.broadcast(mpeers, msg)
|
||||
g.broadcast(mpeers, msg, isHighPriority = true)
|
||||
|
||||
for peer in mpeers:
|
||||
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
|
||||
@@ -651,7 +666,7 @@ method publish*(g: GossipSub,
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
@@ -686,30 +701,40 @@ proc maintainDirectPeers(g: GossipSub) {.async.} =
|
||||
for id, addrs in g.parameters.directPeers:
|
||||
await g.addDirectPeer(id, addrs)
|
||||
|
||||
method start*(g: GossipSub) {.async.} =
|
||||
method start*(
|
||||
g: GossipSub
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
trace "gossipsub start"
|
||||
|
||||
if not g.heartbeatFut.isNil:
|
||||
warn "Starting gossipsub twice"
|
||||
return
|
||||
return fut
|
||||
|
||||
g.heartbeatFut = g.heartbeat()
|
||||
g.scoringHeartbeatFut = g.scoringHeartbeat()
|
||||
g.directPeersLoop = g.maintainDirectPeers()
|
||||
g.started = true
|
||||
fut
|
||||
|
||||
method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
method stop*(g: GossipSub) {.async.} =
|
||||
trace "gossipsub stop"
|
||||
g.started = false
|
||||
if g.heartbeatFut.isNil:
|
||||
warn "Stopping gossipsub without starting it"
|
||||
return
|
||||
return fut
|
||||
|
||||
# stop heartbeat interval
|
||||
g.directPeersLoop.cancel()
|
||||
g.scoringHeartbeatFut.cancel()
|
||||
g.heartbeatFut.cancel()
|
||||
g.heartbeatFut = nil
|
||||
fut
|
||||
|
||||
method initPubSub*(g: GossipSub)
|
||||
{.raises: [InitializationError].} =
|
||||
|
||||
@@ -254,8 +254,7 @@ proc handleIHave*(g: GossipSub,
|
||||
if not g.hasSeen(msgId):
|
||||
if peer.iHaveBudget <= 0:
|
||||
break
|
||||
elif msgId notin res.messageIds and msgId notin g.outstandingIWANTs:
|
||||
g.outstandingIWANTs[msgId] = IWANTRequest(messageId: msgId, peer: peer, timestamp: Moment.now())
|
||||
elif msgId notin res.messageIds:
|
||||
res.messageIds.add(msgId)
|
||||
dec peer.iHaveBudget
|
||||
trace "requested message via ihave", messageID=msgId
|
||||
@@ -301,17 +300,6 @@ proc handleIWant*(g: GossipSub,
|
||||
messages.add(msg)
|
||||
return messages
|
||||
|
||||
proc checkIWANTTimeouts(g: GossipSub, timeoutDuration: Duration) {.raises: [].} =
|
||||
let currentTime = Moment.now()
|
||||
var idsToRemove = newSeq[MessageId]()
|
||||
for msgId, request in g.outstandingIWANTs.pairs():
|
||||
if currentTime - request.timestamp > timeoutDuration:
|
||||
trace "IWANT request timed out", messageID=msgId, peer=request.peer
|
||||
request.peer.behaviourPenalty += 0.1
|
||||
idsToRemove.add(msgId)
|
||||
for msgId in idsToRemove:
|
||||
g.outstandingIWANTs.del(msgId)
|
||||
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
|
||||
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
|
||||
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
|
||||
@@ -542,14 +530,14 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# Send changes to peers after table updates to avoid stale state
|
||||
if grafts.len > 0:
|
||||
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
||||
g.broadcast(grafts, graft)
|
||||
g.broadcast(grafts, graft, isHighPriority = true)
|
||||
if prunes.len > 0:
|
||||
let prune = RPCMsg(control: some(ControlMessage(
|
||||
prune: @[ControlPrune(
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
||||
# drop peers that we haven't published to in
|
||||
@@ -681,7 +669,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
topicID: t,
|
||||
peers: g.peerExchangeList(t),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
# pass by ptr in order to both signal we want to update metrics
|
||||
# and as well update the struct for each topic during this iteration
|
||||
@@ -703,7 +691,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
||||
g.send(peer, RPCMsg(control: some(control)))
|
||||
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
|
||||
|
||||
g.mcache.shift() # shift the cache
|
||||
|
||||
@@ -717,5 +705,3 @@ proc heartbeat*(g: GossipSub) {.async.} =
|
||||
for trigger in g.heartbeatEvents:
|
||||
trace "firing heartbeat event", instance = cast[int](g)
|
||||
trigger.fire()
|
||||
|
||||
checkIWANTTimeouts(g, g.parameters.iwantTimeout)
|
||||
|
||||
@@ -240,15 +240,15 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
||||
trace "running scoring heartbeat", instance = cast[int](g)
|
||||
g.updateScores()
|
||||
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
|
||||
let uselessAppBytesNum = msg.data.len
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
# discard g.disconnectPeer(peer)
|
||||
# debug "Peer disconnected", peer, uselessAppBytesNum
|
||||
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
|
||||
for tt in msg.topicIds:
|
||||
|
||||
@@ -143,7 +143,6 @@ type
|
||||
enablePX*: bool
|
||||
|
||||
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
||||
iwantTimeout*: Duration
|
||||
|
||||
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||
disconnectPeerAboveRateLimit*: bool
|
||||
@@ -181,7 +180,6 @@ type
|
||||
routingRecordsHandler*: seq[RoutingRecordsHandler] # Callback for peer exchange
|
||||
|
||||
heartbeatEvents*: seq[AsyncEvent]
|
||||
outstandingIWANTs*: Table[MessageId, IWANTRequest]
|
||||
|
||||
MeshMetrics* = object
|
||||
# scratch buffers for metrics
|
||||
@@ -192,8 +190,3 @@ type
|
||||
lowPeersTopics*: int64 # npeers < dlow
|
||||
healthyPeersTopics*: int64 # npeers >= dlow
|
||||
underDoutTopics*: int64
|
||||
|
||||
IWANTRequest* = object
|
||||
messageId*: MessageId
|
||||
peer*: PubSubPeer
|
||||
timestamp*: Moment
|
||||
|
||||
@@ -138,18 +138,34 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [].} =
|
||||
## Attempt to send `msg` to remote peer
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||
peer.send(msg, p.anonymize)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg) {.raises: [].} =
|
||||
## Attempt to send `msg` to the given peers
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
let npeers = sendPeers.len.int64
|
||||
for sub in msg.subscriptions:
|
||||
@@ -195,19 +211,19 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg)
|
||||
p.send(peer, msg, isHighPriority)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
topics: openArray[string],
|
||||
subscribe: bool) =
|
||||
## send subscriptions to remote peer
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe))
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
|
||||
|
||||
for topic in topics:
|
||||
if subscribe:
|
||||
|
||||
@@ -28,10 +28,13 @@ logScope:
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
|
||||
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
|
||||
|
||||
type
|
||||
PeerRateLimitError* = object of CatchableError
|
||||
|
||||
@@ -50,6 +53,14 @@ type
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
||||
|
||||
RpcMessageQueue* = ref object
|
||||
# Tracks async tasks for sending high-priority peer-published messages.
|
||||
sendPriorityQueue: Deque[Future[void]]
|
||||
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
|
||||
nonPriorityQueue: AsyncQueue[seq[byte]]
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
onEvent*: OnEvent # Connectivity updates for peer
|
||||
@@ -71,6 +82,8 @@ type
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||
|
||||
rpcmessagequeue: RpcMessageQueue
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
@@ -83,6 +96,16 @@ when defined(libp2p_agents_metrics):
|
||||
#so we have to read the parents short agent..
|
||||
p.sendConn.getWrapped().shortAgent
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
|
||||
func hash*(p: PubSubPeer): Hash =
|
||||
p.peerId.hash
|
||||
|
||||
@@ -138,12 +161,6 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
conn, peer = p, closed = conn.closed,
|
||||
data = data.shortLog
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rmsg.messages:
|
||||
for t in m.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
except PeerRateLimitError as exc:
|
||||
@@ -230,35 +247,31 @@ proc hasSendConn*(p: PubSubPeer): bool =
|
||||
template sendMetrics(msg: RPCMsg): untyped =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for x in msg.messages:
|
||||
for t in x.topicIDs:
|
||||
for t in x.topicIds:
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
proc clearSendPriorityQueue(p: PubSubPeer) =
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len == 0:
|
||||
return # fast path
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[0].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
|
||||
|
||||
if msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[^1].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popLast()
|
||||
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
await p.connectedFut
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(
|
||||
value = p.rpcmessagequeue.sendPriorityQueue.len.int64,
|
||||
labelValues = [$p.peerId])
|
||||
|
||||
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
|
||||
# Continuation for a pending `sendMsg` future from below
|
||||
try:
|
||||
await conn.writeLp(msg)
|
||||
await msgFut
|
||||
trace "sent pubsub message to remote", conn
|
||||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
@@ -269,6 +282,73 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||
# Slow path of `sendMsg` where msg is held in memory while send connection is
|
||||
# being set up
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
try:
|
||||
await p.connectedFut
|
||||
except CatchableError as exc:
|
||||
debug "Error when waiting for a send conn to be setup", msg = exc.msg, p
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", msg = shortLog(msg), p
|
||||
return
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg), p
|
||||
try:
|
||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
||||
except CancelledError as exc:
|
||||
trace "Continuation for pending `sendMsg` future has been unexpectedly cancelled"
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
|
||||
if p.sendConn != nil and not p.sendConn.closed():
|
||||
# Fast path that avoids copying msg (which happens for {.async.})
|
||||
let conn = p.sendConn
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg), p
|
||||
let f = conn.writeLp(msg)
|
||||
if not f.completed():
|
||||
sendMsgContinue(conn, f)
|
||||
else:
|
||||
f
|
||||
else:
|
||||
sendMsgSlow(p, msg)
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
Future[void].completed()
|
||||
elif msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
Future[void].completed()
|
||||
elif isHighPriority:
|
||||
p.clearSendPriorityQueue()
|
||||
let f = p.sendMsg(msg)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
else:
|
||||
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
|
||||
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
||||
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
||||
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
||||
@@ -304,7 +384,16 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
|
||||
else:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.raises: [].} =
|
||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The `RPCMsg` instance representing the message to be sent.
|
||||
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
|
||||
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
# When sending messages, we take care to re-encode them with the right
|
||||
# anonymization flag to ensure that we're not penalized for sending invalid
|
||||
# or malicious data on the wire - in particular, re-encoding protects against
|
||||
@@ -324,11 +413,11 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
|
||||
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg)
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
asyncSpawn p.sendEncoded(encoded)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -337,6 +426,45 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
return true
|
||||
return false
|
||||
|
||||
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
|
||||
while true:
|
||||
# we send non-priority messages only if there are no pending priority messages
|
||||
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
p.clearSendPriorityQueue()
|
||||
# waiting for the last future minimizes the number of times we have to
|
||||
# wait for something (each wait = performance cost) -
|
||||
# clearSendPriorityQueue ensures we're not waiting for an already-finished
|
||||
# future
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
await p.rpcmessagequeue.sendPriorityQueue[^1]
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
|
||||
await p.sendMsg(msg)
|
||||
|
||||
proc startSendNonPriorityTask(p: PubSubPeer) =
|
||||
debug "starting sendNonPriorityTask", p
|
||||
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
|
||||
|
||||
proc stopSendNonPriorityTask*(p: PubSubPeer) =
|
||||
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
debug "stopping sendNonPriorityTask", p
|
||||
p.rpcmessagequeue.sendNonPriorityTask.cancelSoon()
|
||||
p.rpcmessagequeue.sendNonPriorityTask = nil
|
||||
p.rpcmessagequeue.sendPriorityQueue.clear()
|
||||
p.rpcmessagequeue.nonPriorityQueue.clear()
|
||||
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
|
||||
proc new(T: typedesc[RpcMessageQueue]): T =
|
||||
return T(
|
||||
sendPriorityQueue: initDeque[Future[void]](),
|
||||
nonPriorityQueue: newAsyncQueue[seq[byte]](),
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
peerId: PeerId,
|
||||
@@ -353,17 +481,9 @@ proc new*(
|
||||
peerId: peerId,
|
||||
connectedFut: newFuture[void](),
|
||||
maxMessageSize: maxMessageSize,
|
||||
overheadRateLimitOpt: overheadRateLimitOpt
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(),
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
result.startSendNonPriorityTask()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
|
||||
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||
)
|
||||
logScope: topics = "libp2p discovery rendezvous"
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
@@ -678,17 +678,25 @@ proc deletesRegister(rdv: RendezVous) {.async.} =
|
||||
libp2p_rendezvous_registered.set(int64(total))
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
|
||||
method start*(rdv: RendezVous) {.async.} =
|
||||
method start*(
|
||||
rdv: RendezVous
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not rdv.registerDeletionLoop.isNil:
|
||||
warn "Starting rendezvous twice"
|
||||
return
|
||||
return fut
|
||||
rdv.registerDeletionLoop = rdv.deletesRegister()
|
||||
rdv.started = true
|
||||
fut
|
||||
|
||||
method stop*(rdv: RendezVous) {.async.} =
|
||||
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if rdv.registerDeletionLoop.isNil:
|
||||
warn "Stopping rendezvous without starting it"
|
||||
return
|
||||
return fut
|
||||
rdv.started = false
|
||||
rdv.registerDeletionLoop.cancel()
|
||||
rdv.registerDeletionLoop = nil
|
||||
fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -89,7 +89,7 @@ type
|
||||
readCs: CipherState
|
||||
writeCs: CipherState
|
||||
|
||||
NoiseError* = object of LPError
|
||||
NoiseError* = object of LPStreamError
|
||||
NoiseHandshakeError* = object of NoiseError
|
||||
NoiseDecryptTagError* = object of NoiseError
|
||||
NoiseOversizedPayloadError* = object of NoiseError
|
||||
@@ -99,10 +99,10 @@ type
|
||||
|
||||
func shortLog*(conn: NoiseConnection): auto =
|
||||
try:
|
||||
if conn.isNil: "NoiseConnection(nil)"
|
||||
if conn == nil: "NoiseConnection(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(NoiseConnection): shortLog(it)
|
||||
|
||||
@@ -112,7 +112,7 @@ proc genKeyPair(rng: var HmacDrbgContext): KeyPair =
|
||||
|
||||
proc hashProtocol(name: string): MDigest[256] =
|
||||
# If protocol_name is less than or equal to HASHLEN bytes in length,
|
||||
# sets h equal to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# sets h to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# Otherwise sets h = HASH(protocol_name).
|
||||
|
||||
if name.len <= 32:
|
||||
@@ -142,7 +142,7 @@ proc encrypt(
|
||||
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
@@ -168,10 +168,11 @@ proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
trace "decryptWithAd", tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
|
||||
if tagIn != tagOut:
|
||||
debug "decryptWithAd failed", data = shortLog(data)
|
||||
raise newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.")
|
||||
raise (ref NoiseDecryptTagError)(msg:
|
||||
"decryptWithAd failed tag authentication.")
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
# Symmetricstate
|
||||
|
||||
@@ -181,8 +182,7 @@ proc init(_: type[SymmetricState]): SymmetricState =
|
||||
result.cs = CipherState(k: EmptyKey)
|
||||
|
||||
proc mixKey(ss: var SymmetricState, ikm: ChaChaPolyKey) =
|
||||
var
|
||||
temp_keys: array[2, ChaChaPolyKey]
|
||||
var temp_keys: array[2, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.cs = CipherState(k: temp_keys[1])
|
||||
@@ -198,8 +198,7 @@ proc mixHash(ss: var SymmetricState, data: openArray[byte]) =
|
||||
|
||||
# We might use this for other handshake patterns/tokens
|
||||
proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
|
||||
var
|
||||
temp_keys: array[3, ChaChaPolyKey]
|
||||
var temp_keys: array[3, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.mixHash(temp_keys[1])
|
||||
@@ -234,7 +233,8 @@ proc init(_: type[HandshakeState]): HandshakeState =
|
||||
|
||||
template write_e: untyped =
|
||||
trace "noise write e"
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR(). Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR().
|
||||
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
hs.e = genKeyPair(p.rng[])
|
||||
msg.add hs.e.publicKey
|
||||
hs.ss.mixHash(hs.e.publicKey)
|
||||
@@ -275,33 +275,37 @@ template read_e: untyped =
|
||||
trace "noise read e", size = msg.len
|
||||
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise E, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise E, expected more data")
|
||||
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message. Calls MixHash(re.public_key).
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message.
|
||||
# Calls MixHash(re.public_key).
|
||||
hs.re[0..Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
|
||||
msg.consume(Curve25519Key.len)
|
||||
hs.ss.mixHash(hs.re)
|
||||
|
||||
template read_s: untyped =
|
||||
trace "noise read s", size = msg.len
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True, or to the next DHLEN bytes otherwise.
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
|
||||
# or to the next DHLEN bytes otherwise.
|
||||
# Sets rs (which must be empty) to DecryptAndHash(temp).
|
||||
let
|
||||
rsLen =
|
||||
if hs.ss.cs.hasKey:
|
||||
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len + ChaChaPolyTag.len
|
||||
else:
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len
|
||||
hs.rs[0..Curve25519Key.high] =
|
||||
hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
|
||||
|
||||
msg.consume(rsLen)
|
||||
|
||||
proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
proc readFrame(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var besize {.noinit.}: array[2, byte]
|
||||
await sconn.readExactly(addr besize[0], besize.len)
|
||||
let size = uint16.fromBytesBE(besize).int
|
||||
@@ -313,7 +317,11 @@ proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
await sconn.readExactly(addr buffer[0], buffer.len)
|
||||
return buffer
|
||||
|
||||
proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc writeFrame(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
doAssert buf.len <= uint16.high.int
|
||||
var
|
||||
lesize = buf.len.uint16
|
||||
@@ -324,13 +332,24 @@ proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
outbuf &= buf
|
||||
sconn.write(outbuf)
|
||||
|
||||
proc receiveHSMessage(sconn: Connection): Future[seq[byte]] = readFrame(sconn)
|
||||
proc sendHSMessage(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc receiveHSMessage(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
readFrame(sconn)
|
||||
|
||||
proc sendHSMessage(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
writeFrame(sconn, buf)
|
||||
|
||||
proc handshakeXXOutbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = true
|
||||
var
|
||||
hs = HandshakeState.init()
|
||||
@@ -372,13 +391,16 @@ proc handshakeXXOutbound(
|
||||
await conn.sendHSMessage(msg.data)
|
||||
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
proc handshakeXXInbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = false
|
||||
|
||||
var
|
||||
@@ -422,11 +444,14 @@ proc handshakeXXInbound(
|
||||
let
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
(cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
|
||||
method readMessage*(
|
||||
sconn: NoiseConnection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
while true: # Discard 0-length payloads
|
||||
let frame = await sconn.stream.readFrame()
|
||||
sconn.activity = true
|
||||
@@ -458,7 +483,11 @@ proc encryptFrame(
|
||||
|
||||
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
|
||||
|
||||
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
sconn: NoiseConnection,
|
||||
message: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
# Fast path: `{.async.}` would introduce a copy of `message`
|
||||
const FramingSize = 2 + sizeof(ChaChaPolyTag)
|
||||
|
||||
@@ -478,7 +507,8 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
try:
|
||||
encryptFrame(
|
||||
sconn,
|
||||
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
|
||||
cipherFrames.toOpenArray(
|
||||
woffset, woffset + chunkSize + FramingSize - 1),
|
||||
message.toOpenArray(offset, offset + chunkSize - 1))
|
||||
except NoiseNonceMaxError as exc:
|
||||
debug "Noise nonce exceeded"
|
||||
@@ -501,21 +531,28 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
# sequencing issues
|
||||
sconn.stream.write(cipherFrames)
|
||||
|
||||
method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
method handshake*(
|
||||
p: Noise,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Starting Noise handshake", conn, initiator
|
||||
|
||||
let timeout = conn.timeout
|
||||
conn.timeout = HandshakeTimeout
|
||||
|
||||
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
|
||||
let
|
||||
signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes).tryGet()
|
||||
let signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes)
|
||||
if signedPayload.isErr():
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to sign public key: " & $signedPayload.error())
|
||||
|
||||
var
|
||||
libp2pProof = initProtoBuffer()
|
||||
libp2pProof.write(1, p.localPublicKey)
|
||||
libp2pProof.write(2, signedPayload.getBytes())
|
||||
libp2pProof.write(2, signedPayload.get().getBytes())
|
||||
# data field also there but not used!
|
||||
libp2pProof.finish()
|
||||
|
||||
@@ -534,29 +571,38 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
remoteSigBytes: seq[byte]
|
||||
|
||||
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote public key bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote signature bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
|
||||
if not remotePubKey.init(remotePubKeyBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
if not remoteSig.init(remoteSigBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
|
||||
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
|
||||
if not remoteSig.verify(verifyPayload, remotePubKey):
|
||||
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake signature verify failed.")
|
||||
else:
|
||||
trace "Remote signature verified", conn
|
||||
|
||||
let pid = PeerId.init(remotePubKey).valueOr:
|
||||
raise newException(NoiseHandshakeError, "Invalid remote peer id: " & $error)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Invalid remote peer id: " & $error)
|
||||
|
||||
trace "Remote peer id", pid = $pid
|
||||
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to validate expected peerId.")
|
||||
|
||||
if pid != targetPid:
|
||||
var
|
||||
@@ -566,7 +612,8 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
initiator, dealt_peer = conn,
|
||||
dealt_key = $failedKey, received_peer = $pid,
|
||||
received_key = $remotePubKey
|
||||
raise newException(NoiseHandshakeError, "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
conn.peerId = pid
|
||||
|
||||
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
||||
@@ -586,7 +633,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
return secure
|
||||
|
||||
method closeImpl*(s: NoiseConnection) {.async.} =
|
||||
method closeImpl*(s: NoiseConnection) {.async: (raises: []).} =
|
||||
await procCall SecureConn(s).closeImpl()
|
||||
|
||||
burnMem(s.readCs)
|
||||
@@ -597,15 +644,14 @@ method init*(p: Noise) {.gcsafe.} =
|
||||
p.codec = NoiseCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
let pkBytes = privateKey.getPublicKey()
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
|
||||
var noise = Noise(
|
||||
rng: rng,
|
||||
|
||||
@@ -19,7 +19,7 @@ type
|
||||
|
||||
method init(p: PlainText) {.gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string)
|
||||
{.async, gcsafe.} = discard
|
||||
{.async.} = discard
|
||||
## plain text doesn't do anything
|
||||
|
||||
p.codec = PlainTextCodec
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -73,14 +73,14 @@ type
|
||||
writerCoder: SecureCipher
|
||||
readerCoder: SecureCipher
|
||||
|
||||
SecioError* = object of LPError
|
||||
SecioError* = object of LPStreamError
|
||||
|
||||
func shortLog*(conn: SecioConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecioConn(nil)"
|
||||
if conn == nil: "SecioConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(SecioConn): shortLog(it)
|
||||
|
||||
@@ -107,11 +107,11 @@ proc update(mac: var SecureMac, data: openArray[byte]) =
|
||||
proc sizeDigest(mac: SecureMac): int {.inline.} =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
result = int(mac.ctxsha256.sizeDigest())
|
||||
int(mac.ctxsha256.sizeDigest())
|
||||
of SecureMacType.Sha512:
|
||||
result = int(mac.ctxsha512.sizeDigest())
|
||||
int(mac.ctxsha512.sizeDigest())
|
||||
of SecureMacType.Sha1:
|
||||
result = int(mac.ctxsha1.sizeDigest())
|
||||
int(mac.ctxsha1.sizeDigest())
|
||||
|
||||
proc finish(mac: var SecureMac, data: var openArray[byte]) =
|
||||
case mac.kind
|
||||
@@ -188,9 +188,11 @@ proc macCheckAndDecode(sconn: SecioConn, data: var seq[byte]): bool =
|
||||
sconn.readerCoder.decrypt(data.toOpenArray(0, mark - 1),
|
||||
data.toOpenArray(0, mark - 1))
|
||||
data.setLen(mark)
|
||||
result = true
|
||||
true
|
||||
|
||||
proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
|
||||
proc readRawMessage(
|
||||
conn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
while true: # Discard 0-length payloads
|
||||
var lengthBuf: array[4, byte]
|
||||
await conn.readExactly(addr lengthBuf[0], lengthBuf.len)
|
||||
@@ -211,19 +213,23 @@ proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
|
||||
|
||||
trace "Discarding 0-length payload", conn
|
||||
|
||||
method readMessage*(sconn: SecioConn): Future[seq[byte]] {.async.} =
|
||||
method readMessage*(
|
||||
sconn: SecioConn
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Read message from channel secure connection ``sconn``.
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
logScope:
|
||||
stream_oid = $sconn.stream.oid
|
||||
var buf = await sconn.stream.readRawMessage()
|
||||
if sconn.macCheckAndDecode(buf):
|
||||
result = buf
|
||||
buf
|
||||
else:
|
||||
trace "Message MAC verification failed", buf = buf.shortLog
|
||||
raise (ref SecioError)(msg: "message failed MAC verification")
|
||||
|
||||
method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
|
||||
method write*(
|
||||
sconn: SecioConn,
|
||||
message: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Write message ``message`` to secure connection ``sconn``.
|
||||
if message.len == 0:
|
||||
return
|
||||
@@ -233,15 +239,16 @@ method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
|
||||
offset = 0
|
||||
while left > 0:
|
||||
let
|
||||
chunkSize = if left > SecioMaxMessageSize - 64: SecioMaxMessageSize - 64 else: left
|
||||
chunkSize = min(left, SecioMaxMessageSize - 64)
|
||||
macsize = sconn.writerMac.sizeDigest()
|
||||
length = chunkSize + macsize
|
||||
|
||||
var msg = newSeq[byte](chunkSize + 4 + macsize)
|
||||
msg[0..<4] = uint32(length).toBytesBE()
|
||||
|
||||
sconn.writerCoder.encrypt(message.toOpenArray(offset, offset + chunkSize - 1),
|
||||
msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
sconn.writerCoder.encrypt(
|
||||
message.toOpenArray(offset, offset + chunkSize - 1),
|
||||
msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
left = left - chunkSize
|
||||
offset = offset + chunkSize
|
||||
let mo = 4 + chunkSize
|
||||
@@ -253,17 +260,16 @@ method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
|
||||
await sconn.stream.write(msg)
|
||||
sconn.activity = true
|
||||
|
||||
proc newSecioConn(conn: Connection,
|
||||
hash: string,
|
||||
cipher: string,
|
||||
secrets: Secret,
|
||||
order: int,
|
||||
remotePubKey: PublicKey): SecioConn
|
||||
{.raises: [LPError].} =
|
||||
proc newSecioConn(
|
||||
conn: Connection,
|
||||
hash: string,
|
||||
cipher: string,
|
||||
secrets: Secret,
|
||||
order: int,
|
||||
remotePubKey: PublicKey): SecioConn =
|
||||
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
|
||||
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
||||
## ``order``.
|
||||
|
||||
result = SecioConn.new(conn, conn.peerId, conn.observedAddr)
|
||||
|
||||
let i0 = if order < 0: 1 else: 0
|
||||
@@ -282,108 +288,136 @@ proc newSecioConn(conn: Connection,
|
||||
result.readerCoder.init(cipher, secrets.keyOpenArray(i1),
|
||||
secrets.ivOpenArray(i1))
|
||||
|
||||
proc transactMessage(conn: Connection,
|
||||
msg: seq[byte]): Future[seq[byte]] {.async.} =
|
||||
proc transactMessage(
|
||||
conn: Connection,
|
||||
msg: seq[byte]
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Sending message", message = msg.shortLog, length = len(msg)
|
||||
await conn.write(msg)
|
||||
return await conn.readRawMessage()
|
||||
await conn.readRawMessage()
|
||||
|
||||
method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
var
|
||||
localNonce: array[SecioNonceSize, byte]
|
||||
remoteNonce: seq[byte]
|
||||
remoteBytesPubkey: seq[byte]
|
||||
remoteEBytesPubkey: seq[byte]
|
||||
remoteEBytesSig: seq[byte]
|
||||
remotePubkey: PublicKey
|
||||
remoteEPubkey: ecnist.EcPublicKey
|
||||
remoteESignature: Signature
|
||||
remoteExchanges: string
|
||||
remoteCiphers: string
|
||||
remoteHashes: string
|
||||
remotePeerId: PeerId
|
||||
localPeerId: PeerId
|
||||
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
|
||||
method handshake*(
|
||||
s: Secio,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let localBytesPubkey = s.localPublicKey.getBytes()
|
||||
if localBytesPubkey.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to get local public key bytes: " & $localBytesPubkey.error())
|
||||
|
||||
let localPeerId = PeerId.init(s.localPublicKey)
|
||||
if localPeerId.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to initialize local peer ID: " & $localPeerId.error())
|
||||
|
||||
var localNonce: array[SecioNonceSize, byte]
|
||||
hmacDrbgGenerate(s.rng[], localNonce)
|
||||
|
||||
var request = createProposal(localNonce,
|
||||
localBytesPubkey,
|
||||
SecioExchanges,
|
||||
SecioCiphers,
|
||||
SecioHashes)
|
||||
|
||||
localPeerId = PeerId.init(s.localPublicKey).tryGet()
|
||||
let request = createProposal(
|
||||
localNonce, localBytesPubkey.get(),
|
||||
SecioExchanges, SecioCiphers, SecioHashes)
|
||||
|
||||
trace "Local proposal", schemes = SecioExchanges,
|
||||
ciphers = SecioCiphers,
|
||||
hashes = SecioHashes,
|
||||
pubkey = localBytesPubkey.shortLog,
|
||||
peer = localPeerId
|
||||
|
||||
var answer = await transactMessage(conn, request)
|
||||
pubkey = localBytesPubkey.get().shortLog,
|
||||
peer = localPeerId.get()
|
||||
|
||||
let answer = await transactMessage(conn, request)
|
||||
if len(answer) == 0:
|
||||
trace "Proposal exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Proposal exchange failed")
|
||||
|
||||
if not decodeProposal(answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
|
||||
remoteCiphers, remoteHashes):
|
||||
var
|
||||
remoteNonce: seq[byte]
|
||||
remoteBytesPubkey: seq[byte]
|
||||
remoteExchanges: string
|
||||
remoteCiphers: string
|
||||
remoteHashes: string
|
||||
if not decodeProposal(
|
||||
answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
|
||||
remoteCiphers, remoteHashes):
|
||||
trace "Remote proposal decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote proposal decoding failed")
|
||||
|
||||
var remotePubkey: PublicKey
|
||||
if not remotePubkey.init(remoteBytesPubkey):
|
||||
trace "Remote public key incorrect or corrupted",
|
||||
pubkey = remoteBytesPubkey.shortLog
|
||||
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
|
||||
|
||||
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||
let remotePeerId = PeerId.init(remotePubkey)
|
||||
if remotePeerId.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to initialize remote peer ID: " & $remotePeerId.error())
|
||||
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(SecioError, "Failed to validate expected peerId.")
|
||||
raise (ref SecioError)(msg: "Failed to validate expected peerId.")
|
||||
|
||||
if remotePeerId != targetPid:
|
||||
raise newException(SecioError, "Peer ids don't match!")
|
||||
conn.peerId = remotePeerId
|
||||
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
||||
remoteNonce).tryGet()
|
||||
if remotePeerId.get() != targetPid:
|
||||
raise (ref SecioError)(msg: "Peer ids don't match!")
|
||||
conn.peerId = remotePeerId.get()
|
||||
let order = getOrder(
|
||||
remoteBytesPubkey, localNonce, localBytesPubkey.get(), remoteNonce)
|
||||
if order.isErr():
|
||||
raise (ref SecioError)(msg: "Failed to get order: " & $order.error())
|
||||
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
|
||||
hashes = remoteHashes,
|
||||
pubkey = remoteBytesPubkey.shortLog, order = order,
|
||||
peer = remotePeerId
|
||||
pubkey = remoteBytesPubkey.shortLog,
|
||||
order = order.get(),
|
||||
peer = remotePeerId.get()
|
||||
|
||||
let scheme = selectBest(order, SecioExchanges, remoteExchanges)
|
||||
let cipher = selectBest(order, SecioCiphers, remoteCiphers)
|
||||
let hash = selectBest(order, SecioHashes, remoteHashes)
|
||||
let
|
||||
scheme = selectBest(order.get(), SecioExchanges, remoteExchanges)
|
||||
cipher = selectBest(order.get(), SecioCiphers, remoteCiphers)
|
||||
hash = selectBest(order.get(), SecioHashes, remoteHashes)
|
||||
if len(scheme) == 0 or len(cipher) == 0 or len(hash) == 0:
|
||||
trace "No algorithms in common", peer = remotePeerId
|
||||
trace "No algorithms in common", peer = remotePeerId.get()
|
||||
raise (ref SecioError)(msg: "No algorithms in common")
|
||||
|
||||
trace "Encryption scheme selected", scheme = scheme, cipher = cipher,
|
||||
hash = hash
|
||||
|
||||
var ekeypair = ephemeral(scheme, s.rng[]).tryGet()
|
||||
let ekeypair = ephemeral(scheme, s.rng[])
|
||||
if ekeypair.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to create ephemeral keypair: " & $ekeypair.error())
|
||||
# We need EC public key in raw binary form
|
||||
var epubkey = ekeypair.pubkey.getRawBytes().tryGet()
|
||||
var localCorpus = request[4..^1] & answer & epubkey
|
||||
var signature = s.localPrivateKey.sign(localCorpus).tryGet()
|
||||
let epubkey = ekeypair.get().pubkey.getRawBytes()
|
||||
if epubkey.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to get ephemeral key bytes: " & $epubkey.error())
|
||||
let
|
||||
localCorpus = request[4..^1] & answer & epubkey.get()
|
||||
signature = s.localPrivateKey.sign(localCorpus)
|
||||
if signature.isErr():
|
||||
raise (ref SecioError)(msg:
|
||||
"Failed to sign local corpus: " & $signature.error())
|
||||
|
||||
var localExchange = createExchange(epubkey, signature.getBytes())
|
||||
var remoteExchange = await transactMessage(conn, localExchange)
|
||||
let
|
||||
localExchange = createExchange(epubkey.get(), signature.get().getBytes())
|
||||
remoteExchange = await transactMessage(conn, localExchange)
|
||||
if len(remoteExchange) == 0:
|
||||
trace "Corpus exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Corpus exchange failed")
|
||||
|
||||
var
|
||||
remoteEBytesPubkey: seq[byte]
|
||||
remoteEBytesSig: seq[byte]
|
||||
if not decodeExchange(remoteExchange, remoteEBytesPubkey, remoteEBytesSig):
|
||||
trace "Remote exchange decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote exchange decoding failed")
|
||||
|
||||
var remoteESignature: Signature
|
||||
if not remoteESignature.init(remoteEBytesSig):
|
||||
trace "Remote signature incorrect or corrupted", signature = remoteEBytesSig.shortLog
|
||||
trace "Remote signature incorrect or corrupted",
|
||||
signature = remoteEBytesSig.shortLog
|
||||
raise (ref SecioError)(msg: "Remote signature incorrect or corrupted")
|
||||
|
||||
var remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
|
||||
let remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
|
||||
if not remoteESignature.verify(remoteCorpus, remotePubkey):
|
||||
trace "Signature verification failed", scheme = $remotePubkey.scheme,
|
||||
signature = $remoteESignature,
|
||||
@@ -393,30 +427,34 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
trace "Signature verified", scheme = remotePubkey.scheme
|
||||
|
||||
var remoteEPubkey: ecnist.EcPublicKey
|
||||
if not remoteEPubkey.initRaw(remoteEBytesPubkey):
|
||||
trace "Remote ephemeral public key incorrect or corrupted",
|
||||
pubkey = toHex(remoteEBytesPubkey)
|
||||
raise (ref SecioError)(msg: "Remote ephemeral public key incorrect or corrupted")
|
||||
raise (ref SecioError)(msg:
|
||||
"Remote ephemeral public key incorrect or corrupted")
|
||||
|
||||
var secret = getSecret(remoteEPubkey, ekeypair.seckey)
|
||||
let secret = getSecret(remoteEPubkey, ekeypair.get().seckey)
|
||||
if len(secret) == 0:
|
||||
trace "Shared secret could not be created"
|
||||
raise (ref SecioError)(msg: "Shared secret could not be created")
|
||||
|
||||
trace "Shared secret calculated", secret = secret.shortLog
|
||||
|
||||
var keys = stretchKeys(cipher, hash, secret)
|
||||
let keys = stretchKeys(cipher, hash, secret)
|
||||
|
||||
trace "Authenticated encryption parameters",
|
||||
iv0 = toHex(keys.ivOpenArray(0)), key0 = keys.keyOpenArray(0).shortLog,
|
||||
iv0 = toHex(keys.ivOpenArray(0)),
|
||||
key0 = keys.keyOpenArray(0).shortLog,
|
||||
mac0 = keys.macOpenArray(0).shortLog,
|
||||
iv1 = keys.ivOpenArray(1).shortLog, key1 = keys.keyOpenArray(1).shortLog,
|
||||
iv1 = keys.ivOpenArray(1).shortLog,
|
||||
key1 = keys.keyOpenArray(1).shortLog,
|
||||
mac1 = keys.macOpenArray(1).shortLog
|
||||
|
||||
# Perform Nonce exchange over encrypted channel.
|
||||
|
||||
var secioConn = newSecioConn(conn, hash, cipher, keys, order, remotePubkey)
|
||||
result = secioConn
|
||||
let secioConn = newSecioConn(
|
||||
conn, hash, cipher, keys, order.get(), remotePubkey)
|
||||
await secioConn.write(remoteNonce)
|
||||
var res = await secioConn.readMessage()
|
||||
|
||||
@@ -426,19 +464,20 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
raise (ref SecioError)(msg: "Nonce verification failed")
|
||||
else:
|
||||
trace "Secure handshake succeeded"
|
||||
secioConn
|
||||
|
||||
method init(s: Secio) {.gcsafe.} =
|
||||
procCall Secure(s).init()
|
||||
s.codec = SecioCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Secio],
|
||||
rng: ref HmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
T: typedesc[Secio],
|
||||
rng: ref HmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
let secio = Secio(
|
||||
rng: rng,
|
||||
localPrivateKey: localPrivateKey,
|
||||
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
|
||||
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key")
|
||||
)
|
||||
secio.init()
|
||||
secio
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -37,18 +37,19 @@ type
|
||||
|
||||
func shortLog*(conn: SecureConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecureConn(nil)"
|
||||
if conn == nil: "SecureConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(SecureConn): shortLog(it)
|
||||
|
||||
proc new*(T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
proc new*(
|
||||
T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
result = T(stream: conn,
|
||||
peerId: peerId,
|
||||
observedAddr: observedAddr,
|
||||
@@ -63,55 +64,72 @@ method initStream*(s: SecureConn) =
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
method closeImpl*(s: SecureConn) {.async.} =
|
||||
method closeImpl*(s: SecureConn) {.async: (raises: []).} =
|
||||
trace "Closing secure conn", s, dir = s.dir
|
||||
if not(isNil(s.stream)):
|
||||
if s.stream != nil:
|
||||
await s.stream.close()
|
||||
|
||||
await procCall Connection(s).closeImpl()
|
||||
|
||||
method readMessage*(c: SecureConn): Future[seq[byte]] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method readMessage*(
|
||||
c: SecureConn
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method getWrapped*(s: SecureConn): Connection = s.stream
|
||||
|
||||
method handshake*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[SecureConn] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method handshake*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc handleConn(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||
proc handleConn(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var sconn = await s.handshake(conn, initiator, peerId)
|
||||
# mark connection bottom level transport direction
|
||||
# this is the safest place to do this
|
||||
# we require this information in for example gossipsub
|
||||
sconn.transportDir = if initiator: Direction.Out else: Direction.In
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
proc cleanup() {.async: (raises: []).} =
|
||||
try:
|
||||
let futs = [conn.join(), sconn.join()]
|
||||
await futs[0] or futs[1]
|
||||
for f in futs:
|
||||
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
|
||||
block:
|
||||
let
|
||||
fut1 = conn.join()
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished: await fut1.cancelAndWait()
|
||||
if not fut2.finished: await fut2.cancelAndWait()
|
||||
block:
|
||||
let
|
||||
fut1 = sconn.close()
|
||||
fut2 = conn.close()
|
||||
await allFutures(fut1, fut2)
|
||||
static: doAssert typeof(fut1).E is void # Cannot fail
|
||||
static: doAssert typeof(fut2).E is void # Cannot fail
|
||||
|
||||
await allFuturesThrowing(
|
||||
sconn.close(), conn.close())
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
discard
|
||||
except CatchableError as exc:
|
||||
debug "error cleaning up secure connection", err = exc.msg, sconn
|
||||
|
||||
if not isNil(sconn):
|
||||
if sconn != nil:
|
||||
# All the errors are handled inside `cleanup()` procedure.
|
||||
asyncSpawn cleanup()
|
||||
|
||||
return sconn
|
||||
sconn
|
||||
|
||||
method init*(s: Secure) =
|
||||
procCall LPProtocol(s).init()
|
||||
@@ -127,23 +145,25 @@ method init*(s: Secure) =
|
||||
warn "securing connection canceled", conn
|
||||
await conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
warn "securing connection failed", err = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
s.handler = handle
|
||||
|
||||
method secure*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]):
|
||||
Future[Connection] {.base.} =
|
||||
s.handleConn(conn, initiator, peerId)
|
||||
method secure*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
s.handleConn(conn, conn.dir == Direction.Out, peerId)
|
||||
|
||||
method readOnce*(s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
doAssert(nbytes > 0, "nbytes must be positive integer")
|
||||
|
||||
if s.isEof:
|
||||
@@ -160,7 +180,7 @@ method readOnce*(s: SecureConn,
|
||||
raise err
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as err:
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name,
|
||||
message = err.msg,
|
||||
|
||||
@@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
|
||||
|
||||
proc addressMapper(
|
||||
self: AutoRelayService,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
await sleepAsync chronos.seconds(ttl - 30)
|
||||
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, listenAddrs)
|
||||
|
||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
||||
self.backingOff.keepItIf(it != pid)
|
||||
self.peerAvailable.fire()
|
||||
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
while true:
|
||||
# Remove relayPeers that failed
|
||||
let peers = toSeq(self.relayPeers.keys())
|
||||
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
await self.peerAvailable.wait()
|
||||
await sleepAsync(200.millis)
|
||||
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
if self.running:
|
||||
trace "Autorelay is already running"
|
||||
return
|
||||
self.running = true
|
||||
self.runner = self.innerRun(switch)
|
||||
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
self.running = false
|
||||
|
||||
@@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
||||
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.setup(switch)
|
||||
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -34,10 +34,10 @@ type
|
||||
|
||||
func shortLog*(s: BufferStream): auto =
|
||||
try:
|
||||
if s.isNil: "BufferStream(nil)"
|
||||
if s == nil: "BufferStream(nil)"
|
||||
else: &"{shortLog(s.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(BufferStream): shortLog(it)
|
||||
|
||||
@@ -55,14 +55,16 @@ method initStream*(s: BufferStream) =
|
||||
trace "BufferStream created", s
|
||||
|
||||
proc new*(
|
||||
T: typedesc[BufferStream],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
|
||||
T: typedesc[BufferStream],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
let bufferStream = T(timeout: timeout)
|
||||
bufferStream.initStream()
|
||||
bufferStream
|
||||
|
||||
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
method pushData*(
|
||||
s: BufferStream,
|
||||
data: seq[byte]
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Write bytes to internal read buffer, use this to fill up the
|
||||
## buffer with data.
|
||||
##
|
||||
@@ -70,7 +72,7 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
##
|
||||
|
||||
doAssert(not s.pushing,
|
||||
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent push allowed for stream " & s.shortLog())
|
||||
|
||||
if s.isClosed or s.pushedEof:
|
||||
raise newLPStreamClosedError()
|
||||
@@ -87,12 +89,14 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
finally:
|
||||
s.pushing = false
|
||||
|
||||
method pushEof*(s: BufferStream) {.base, async.} =
|
||||
method pushEof*(
|
||||
s: BufferStream
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if s.pushedEof:
|
||||
return
|
||||
|
||||
doAssert(not s.pushing,
|
||||
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent push allowed for stream " & s.shortLog())
|
||||
|
||||
s.pushedEof = true
|
||||
|
||||
@@ -108,13 +112,14 @@ method pushEof*(s: BufferStream) {.base, async.} =
|
||||
method atEof*(s: BufferStream): bool =
|
||||
s.isEof and s.readBuf.len == 0
|
||||
|
||||
method readOnce*(s: BufferStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: BufferStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
doAssert(nbytes > 0, "nbytes must be positive integer")
|
||||
doAssert(not s.reading,
|
||||
&"Only one concurrent read allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent read allowed for stream " & s.shortLog())
|
||||
|
||||
if s.returnedEof:
|
||||
raise newLPStreamEOFError()
|
||||
@@ -135,13 +140,6 @@ method readOnce*(s: BufferStream,
|
||||
# Not very efficient, but shouldn't happen often
|
||||
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
# When an exception happens here, the Bufferstream is effectively
|
||||
# broken and no more reads will be valid - for now, return EOF if it's
|
||||
# called again, though this is not completely true - EOF represents an
|
||||
# "orderly" shutdown and that's not what happened here..
|
||||
s.returnedEof = true
|
||||
raise exc
|
||||
finally:
|
||||
s.reading = false
|
||||
|
||||
@@ -173,7 +171,8 @@ method readOnce*(s: BufferStream,
|
||||
|
||||
return rbytes
|
||||
|
||||
method closeImpl*(s: BufferStream): Future[void] =
|
||||
method closeImpl*(
|
||||
s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
|
||||
## close the stream and clear the buffer
|
||||
trace "Closing BufferStream", s, len = s.len
|
||||
|
||||
@@ -209,8 +208,8 @@ method closeImpl*(s: BufferStream): Future[void] =
|
||||
if not s.readQueue.empty():
|
||||
discard s.readQueue.popFirstNoWait()
|
||||
except AsyncQueueFullError, AsyncQueueEmptyError:
|
||||
raise newException(Defect, getCurrentExceptionMsg())
|
||||
raiseAssert(getCurrentExceptionMsg())
|
||||
|
||||
trace "Closed BufferStream", s
|
||||
|
||||
procCall Connection(s).closeImpl() # noraises, nocancels
|
||||
procCall Connection(s).closeImpl()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -31,18 +31,22 @@ type
|
||||
tracked: bool
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
declareGauge(libp2p_peers_identity, "peers identities", labels = ["agent"])
|
||||
declareCounter(libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"])
|
||||
declareCounter(libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"])
|
||||
declareGauge libp2p_peers_identity,
|
||||
"peers identities", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_read,
|
||||
"incoming traffic", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_write,
|
||||
"outgoing traffic", labels = ["agent"]
|
||||
|
||||
declareCounter(libp2p_network_bytes, "total traffic", labels = ["direction"])
|
||||
declareCounter libp2p_network_bytes,
|
||||
"total traffic", labels = ["direction"]
|
||||
|
||||
func shortLog*(conn: ChronosStream): auto =
|
||||
try:
|
||||
if conn.isNil: "ChronosStream(nil)"
|
||||
if conn == nil: "ChronosStream(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(ChronosStream): shortLog(it)
|
||||
|
||||
@@ -50,17 +54,18 @@ method initStream*(s: ChronosStream) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = ChronosStreamTrackerName
|
||||
|
||||
s.timeoutHandler = proc() {.async, gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, closing ChronosStream", s
|
||||
await s.close()
|
||||
s.close()
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
proc init*(C: type ChronosStream,
|
||||
client: StreamTransport,
|
||||
dir: Direction,
|
||||
timeout = DefaultChronosStreamTimeout,
|
||||
observedAddr: Opt[MultiAddress]): ChronosStream =
|
||||
proc init*(
|
||||
C: type ChronosStream,
|
||||
client: StreamTransport,
|
||||
dir: Direction,
|
||||
timeout = DefaultChronosStreamTimeout,
|
||||
observedAddr: Opt[MultiAddress]): ChronosStream =
|
||||
result = C(client: client,
|
||||
timeout: timeout,
|
||||
dir: dir,
|
||||
@@ -94,7 +99,11 @@ when defined(libp2p_agents_metrics):
|
||||
libp2p_peers_identity.dec(labelValues = [s.shortAgent])
|
||||
s.tracked = false
|
||||
|
||||
method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: ChronosStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if s.atEof:
|
||||
raise newLPStreamEOFError()
|
||||
withExceptions:
|
||||
@@ -107,7 +116,10 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
|
||||
libp2p_peers_traffic_read.inc(result.int64, labelValues = [s.shortAgent])
|
||||
|
||||
proc completeWrite(
|
||||
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =
|
||||
s: ChronosStream,
|
||||
fut: Future[int].Raising([TransportError, CancelledError]),
|
||||
msgLen: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
withExceptions:
|
||||
# StreamTransport will only return written < msg.len on fatal failures where
|
||||
# further writing is not possible - in such cases, we'll raise here,
|
||||
@@ -124,7 +136,11 @@ proc completeWrite(
|
||||
if s.tracked:
|
||||
libp2p_peers_traffic_write.inc(msgLen.int64, labelValues = [s.shortAgent])
|
||||
|
||||
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: ChronosStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
||||
# drives up memory usage
|
||||
if msg.len == 0:
|
||||
@@ -145,19 +161,14 @@ method closed*(s: ChronosStream): bool =
|
||||
method atEof*(s: ChronosStream): bool =
|
||||
s.client.atEof()
|
||||
|
||||
method closeImpl*(s: ChronosStream) {.async.} =
|
||||
try:
|
||||
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
|
||||
method closeImpl*(
|
||||
s: ChronosStream) {.async: (raises: []).} =
|
||||
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
if not s.client.closed():
|
||||
await s.client.closeWait()
|
||||
if not s.client.closed():
|
||||
await s.client.closeWait()
|
||||
|
||||
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error closing chronosstream", s, msg = exc.msg
|
||||
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
# do this after closing!
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -27,25 +27,25 @@ const
|
||||
DefaultConnectionTimeout* = 5.minutes
|
||||
|
||||
type
|
||||
TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [].}
|
||||
TimeoutHandler* = proc(): Future[void] {.async: (raises: []).}
|
||||
|
||||
Connection* = ref object of LPStream
|
||||
activity*: bool # reset every time data is sent or received
|
||||
timeout*: Duration # channel timeout if no activity
|
||||
timerTaskFut: Future[void] # the current timer instance
|
||||
activity*: bool # reset every time data is sent or received
|
||||
timeout*: Duration # channel timeout if no activity
|
||||
timerTaskFut: Future[void].Raising([]) # the current timer instance
|
||||
timeoutHandler*: TimeoutHandler # timeout handler
|
||||
peerId*: PeerId
|
||||
observedAddr*: Opt[MultiAddress]
|
||||
protocol*: string # protocol used by the connection, used as tag for metrics
|
||||
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||
protocol*: string # protocol used by the connection, used as metrics tag
|
||||
transportDir*: Direction # underlying transport (usually socket) direction
|
||||
when defined(libp2p_agents_metrics):
|
||||
shortAgent*: string
|
||||
|
||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
||||
proc timeoutMonitor(s: Connection) {.async: (raises: []).}
|
||||
|
||||
func shortLog*(conn: Connection): string =
|
||||
try:
|
||||
if conn.isNil: "Connection(nil)"
|
||||
if conn == nil: "Connection(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
@@ -58,23 +58,28 @@ method initStream*(s: Connection) =
|
||||
|
||||
procCall LPStream(s).initStream()
|
||||
|
||||
doAssert(isNil(s.timerTaskFut))
|
||||
doAssert(s.timerTaskFut == nil)
|
||||
|
||||
if s.timeout > 0.millis:
|
||||
trace "Monitoring for timeout", s, timeout = s.timeout
|
||||
|
||||
s.timerTaskFut = s.timeoutMonitor()
|
||||
if isNil(s.timeoutHandler):
|
||||
s.timeoutHandler = proc(): Future[void] =
|
||||
trace "Idle timeout expired, closing connection", s
|
||||
s.close()
|
||||
if s.timeoutHandler == nil:
|
||||
s.timeoutHandler =
|
||||
proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, closing connection", s
|
||||
s.close()
|
||||
|
||||
method closeImpl*(s: Connection): Future[void] =
|
||||
method closeImpl*(s: Connection): Future[void] {.async: (raises: []).} =
|
||||
# Cleanup timeout timer
|
||||
trace "Closing connection", s
|
||||
|
||||
if not isNil(s.timerTaskFut) and not s.timerTaskFut.finished:
|
||||
s.timerTaskFut.cancel()
|
||||
if s.timerTaskFut != nil and not s.timerTaskFut.finished:
|
||||
# Don't `cancelAndWait` here to avoid risking deadlock in this scenario:
|
||||
# - `pollActivity` is waiting for `s.timeoutHandler` to complete.
|
||||
# - `s.timeoutHandler` may have triggered `closeImpl` and we are now here.
|
||||
# In this situation, we have to return for `s.timerTaskFut` to complete.
|
||||
s.timerTaskFut.cancelSoon()
|
||||
s.timerTaskFut = nil
|
||||
|
||||
trace "Closed connection", s
|
||||
@@ -84,7 +89,7 @@ method closeImpl*(s: Connection): Future[void] =
|
||||
func hash*(p: Connection): Hash =
|
||||
cast[pointer](p).hash
|
||||
|
||||
proc pollActivity(s: Connection): Future[bool] {.async.} =
|
||||
proc pollActivity(s: Connection): Future[bool] {.async: (raises: []).} =
|
||||
if s.closed and s.atEof:
|
||||
return false # Done, no more monitoring
|
||||
|
||||
@@ -95,22 +100,13 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
|
||||
# Inactivity timeout happened, call timeout monitor
|
||||
|
||||
trace "Connection timed out", s
|
||||
if not(isNil(s.timeoutHandler)):
|
||||
if s.timeoutHandler != nil:
|
||||
trace "Calling timeout handler", s
|
||||
|
||||
try:
|
||||
await s.timeoutHandler()
|
||||
except CancelledError:
|
||||
# timeoutHandler is expected to be fast, but it's still possible that
|
||||
# cancellation will happen here - no need to warn about it - we do want to
|
||||
# stop the polling however
|
||||
debug "Timeout handler cancelled", s
|
||||
except CatchableError as exc: # Shouldn't happen
|
||||
warn "exception in timeout handler", s, exc = exc.msg
|
||||
await s.timeoutHandler()
|
||||
|
||||
return false
|
||||
|
||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||
proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
|
||||
## monitor the channel for inactivity
|
||||
##
|
||||
## if the timeout was hit, it means that
|
||||
@@ -129,21 +125,22 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||
return
|
||||
|
||||
method getWrapped*(s: Connection): Connection {.base.} =
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
proc setShortAgent*(s: Connection, shortAgent: string) =
|
||||
var conn = s
|
||||
while not isNil(conn):
|
||||
while conn != nil:
|
||||
conn.shortAgent = shortAgent
|
||||
conn = conn.getWrapped()
|
||||
|
||||
proc new*(C: type Connection,
|
||||
peerId: PeerId,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout,
|
||||
timeoutHandler: TimeoutHandler = nil): Connection =
|
||||
proc new*(
|
||||
C: type Connection,
|
||||
peerId: PeerId,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout,
|
||||
timeoutHandler: TimeoutHandler = nil): Connection =
|
||||
result = C(peerId: peerId,
|
||||
dir: dir,
|
||||
timeout: timeout,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,8 +23,8 @@ import ../varint,
|
||||
|
||||
export errors
|
||||
|
||||
declareGauge(libp2p_open_streams,
|
||||
"open stream instances", labels = ["type", "dir"])
|
||||
declareGauge libp2p_open_streams,
|
||||
"open stream instances", labels = ["type", "dir"]
|
||||
|
||||
export oids
|
||||
|
||||
@@ -50,12 +50,7 @@ type
|
||||
|
||||
LPStreamError* = object of LPError
|
||||
LPStreamIncompleteError* = object of LPStreamError
|
||||
LPStreamIncorrectDefect* = object of Defect
|
||||
LPStreamLimitError* = object of LPStreamError
|
||||
LPStreamReadError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
LPStreamWriteError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
LPStreamEOFError* = object of LPStreamError
|
||||
|
||||
# X | Read | Write
|
||||
@@ -77,54 +72,12 @@ type
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
|
||||
proc setupStreamTracker*(name: string): StreamTracker =
|
||||
let tracker = new StreamTracker
|
||||
|
||||
proc dumpTracking(): string {.gcsafe.} =
|
||||
return "Opened " & tracker.id & ": " & $tracker.opened & "\n" &
|
||||
"Closed " & tracker.id & ": " & $tracker.closed
|
||||
|
||||
proc leakTransport(): bool {.gcsafe.} =
|
||||
return (tracker.opened != tracker.closed)
|
||||
|
||||
tracker.id = name
|
||||
tracker.opened = 0
|
||||
tracker.closed = 0
|
||||
tracker.dump = dumpTracking
|
||||
tracker.isLeaked = leakTransport
|
||||
addTracker(name, tracker)
|
||||
|
||||
return tracker
|
||||
|
||||
proc getStreamTracker(name: string): StreamTracker {.gcsafe.} =
|
||||
result = cast[StreamTracker](getTracker(name))
|
||||
if isNil(result):
|
||||
result = setupStreamTracker(name)
|
||||
|
||||
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
|
||||
var w = newException(LPStreamReadError, "Read stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
|
||||
proc newLPStreamReadError*(msg: string): ref LPStreamReadError =
|
||||
newException(LPStreamReadError, msg)
|
||||
|
||||
proc newLPStreamWriteError*(p: ref CatchableError): ref LPStreamWriteError =
|
||||
var w = newException(LPStreamWriteError, "Write stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
|
||||
proc newLPStreamIncompleteError*(): ref LPStreamIncompleteError =
|
||||
result = newException(LPStreamIncompleteError, "Incomplete data received")
|
||||
|
||||
proc newLPStreamLimitError*(): ref LPStreamLimitError =
|
||||
result = newException(LPStreamLimitError, "Buffer limit reached")
|
||||
|
||||
proc newLPStreamIncorrectDefect*(m: string): ref LPStreamIncorrectDefect =
|
||||
result = newException(LPStreamIncorrectDefect, m)
|
||||
|
||||
proc newLPStreamEOFError*(): ref LPStreamEOFError =
|
||||
result = newException(LPStreamEOFError, "Stream EOF!")
|
||||
|
||||
@@ -145,8 +98,9 @@ proc newLPStreamConnDownError*(
|
||||
parentException)
|
||||
|
||||
func shortLog*(s: LPStream): auto =
|
||||
if s.isNil: "LPStream(nil)"
|
||||
if s == nil: "LPStream(nil)"
|
||||
else: $s.oid
|
||||
|
||||
chronicles.formatIt(LPStream): shortLog(it)
|
||||
|
||||
method initStream*(s: LPStream) {.base.} =
|
||||
@@ -157,10 +111,12 @@ method initStream*(s: LPStream) {.base.} =
|
||||
s.oid = genOid()
|
||||
|
||||
libp2p_open_streams.inc(labelValues = [s.objName, $s.dir])
|
||||
inc getStreamTracker(s.objName).opened
|
||||
trackCounter(s.objName)
|
||||
trace "Stream created", s, objName = s.objName, dir = $s.dir
|
||||
|
||||
proc join*(s: LPStream): Future[void] {.public.} =
|
||||
proc join*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
|
||||
## Wait for the stream to be closed
|
||||
s.closeEvent.wait()
|
||||
|
||||
@@ -171,19 +127,21 @@ method atEof*(s: LPStream): bool {.base, public.} =
|
||||
s.isEof
|
||||
|
||||
method readOnce*(
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.base, async, public.} =
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.base, async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
## Reads whatever is available in the stream,
|
||||
## up to `nbytes`. Will block if nothing is
|
||||
## available
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc readExactly*(s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[void] {.async, public.} =
|
||||
proc readExactly*(
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Waits for `nbytes` to be available, then read
|
||||
## them and return them
|
||||
if s.atEof:
|
||||
@@ -217,10 +175,11 @@ proc readExactly*(s: LPStream,
|
||||
trace "couldn't read all bytes, incomplete data", s, nbytes, read
|
||||
raise newLPStreamIncompleteError()
|
||||
|
||||
proc readLine*(s: LPStream,
|
||||
limit = 0,
|
||||
sep = "\r\n"): Future[string]
|
||||
{.async, public.} =
|
||||
proc readLine*(
|
||||
s: LPStream,
|
||||
limit = 0,
|
||||
sep = "\r\n"
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Reads up to `limit` bytes are read, or a `sep` is found
|
||||
# TODO replace with something that exploits buffering better
|
||||
var lim = if limit <= 0: -1 else: limit
|
||||
@@ -246,7 +205,9 @@ proc readLine*(s: LPStream,
|
||||
if len(result) == lim:
|
||||
break
|
||||
|
||||
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
||||
proc readVarint*(
|
||||
conn: LPStream
|
||||
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
var
|
||||
buffer: array[10, byte]
|
||||
|
||||
@@ -264,7 +225,11 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
||||
if true: # can't end with a raise apparently
|
||||
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
||||
|
||||
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
|
||||
proc readLp*(
|
||||
s: LPStream,
|
||||
maxSize: int
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError]), public.} =
|
||||
## read length prefixed msg, with the length encoded as a varint
|
||||
let
|
||||
length = await s.readVarint()
|
||||
@@ -278,13 +243,21 @@ proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, publ
|
||||
|
||||
var res = newSeqUninitialized[byte](length)
|
||||
await s.readExactly(addr res[0], res.len)
|
||||
return res
|
||||
res
|
||||
|
||||
method write*(s: LPStream, msg: seq[byte]): Future[void] {.base, public.} =
|
||||
method write*(
|
||||
s: LPStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base, public.} =
|
||||
# Write `msg` to stream, waiting for the write to be finished
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
|
||||
proc writeLp*(
|
||||
s: LPStream,
|
||||
msg: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
## Write `msg` with a varint-encoded length prefix
|
||||
let vbytes = PB.toBytes(msg.len().uint64)
|
||||
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
|
||||
@@ -292,35 +265,53 @@ proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
|
||||
buf[vbytes.len..<buf.len] = msg
|
||||
s.write(buf)
|
||||
|
||||
proc writeLp*(s: LPStream, msg: string): Future[void] {.public.} =
|
||||
proc writeLp*(
|
||||
s: LPStream,
|
||||
msg: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
||||
|
||||
proc write*(s: LPStream, msg: string): Future[void] {.public.} =
|
||||
proc write*(
|
||||
s: LPStream,
|
||||
msg: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
s.write(msg.toBytes())
|
||||
|
||||
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
|
||||
method closeImpl*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [], raw: true), base.} =
|
||||
## Implementation of close - called only once
|
||||
trace "Closing stream", s, objName = s.objName, dir = $s.dir
|
||||
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
|
||||
inc getStreamTracker(s.objName).closed
|
||||
untrackCounter(s.objName)
|
||||
s.closeEvent.fire()
|
||||
trace "Closed stream", s, objName = s.objName, dir = $s.dir
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
fut
|
||||
|
||||
method close*(s: LPStream): Future[void] {.base, async, public.} = # {.raises [Defect].}
|
||||
method close*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [], raw: true), base, public.} =
|
||||
## close the stream - this may block, but will not raise exceptions
|
||||
##
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
return fut
|
||||
|
||||
s.isClosed = true # Set flag before performing virtual close
|
||||
|
||||
# An separate implementation method is used so that even when derived types
|
||||
# A separate implementation method is used so that even when derived types
|
||||
# override `closeImpl`, it is called only once - anyone overriding `close`
|
||||
# itself must implement this - once-only check as well, with their own field
|
||||
await closeImpl(s)
|
||||
closeImpl(s)
|
||||
|
||||
proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
|
||||
proc closeWithEOF*(
|
||||
s: LPStream): Future[void] {.async: (raises: []), public.} =
|
||||
## Close the stream and wait for EOF - use this with half-closed streams where
|
||||
## an EOF is expected to arrive from the other end.
|
||||
##
|
||||
@@ -349,9 +340,9 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
|
||||
var buf: array[8, byte]
|
||||
if (await readOnce(s, addr buf[0], buf.len)) != 0:
|
||||
debug "Unexpected bytes while waiting for EOF", s
|
||||
except CancelledError:
|
||||
discard
|
||||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, msg = exc.msg
|
||||
|
||||
@@ -71,17 +71,17 @@ type
|
||||
inUse: bool
|
||||
|
||||
|
||||
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||
if self.inUse:
|
||||
warn "service setup has already been called"
|
||||
return false
|
||||
self.inUse = true
|
||||
return true
|
||||
|
||||
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
|
||||
method run*(self: Service, switch: Switch) {.base, async.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||
if not self.inUse:
|
||||
warn "service is already stopped"
|
||||
return false
|
||||
@@ -141,10 +141,10 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.public.} =
|
||||
dir = Direction.Out): Future[void] {.public.} =
|
||||
## Connects to a peer without opening a stream to it
|
||||
|
||||
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
||||
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
s: Switch,
|
||||
@@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
|
||||
s.peerInfo.protocols.add(proto.codec)
|
||||
|
||||
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
|
||||
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
|
||||
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
|
||||
switch.connManager.storeMuxer(muxed)
|
||||
await switch.peerStore.identify(muxed)
|
||||
trace "Connection upgrade succeeded"
|
||||
@@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
|
||||
|
||||
trace "Switch stopped"
|
||||
|
||||
proc start*(s: Switch) {.async, gcsafe, public.} =
|
||||
proc start*(s: Switch) {.async, public.} =
|
||||
## Start listening on every transport
|
||||
|
||||
if s.started:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -42,36 +42,8 @@ type
|
||||
acceptFuts: seq[Future[StreamTransport]]
|
||||
connectionsTimeout: Duration
|
||||
|
||||
TcpTransportTracker* = ref object of TrackerBase
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
|
||||
TcpTransportError* = object of transport.TransportError
|
||||
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [].}
|
||||
|
||||
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
|
||||
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
|
||||
if isNil(result):
|
||||
result = setupTcpTransportTracker()
|
||||
|
||||
proc dumpTracking(): string {.gcsafe.} =
|
||||
var tracker = getTcpTransportTracker()
|
||||
result = "Opened tcp transports: " & $tracker.opened & "\n" &
|
||||
"Closed tcp transports: " & $tracker.closed
|
||||
|
||||
proc leakTransport(): bool {.gcsafe.} =
|
||||
var tracker = getTcpTransportTracker()
|
||||
result = (tracker.opened != tracker.closed)
|
||||
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker =
|
||||
result = new TcpTransportTracker
|
||||
result.opened = 0
|
||||
result.closed = 0
|
||||
result.dump = dumpTracking
|
||||
result.isLeaked = leakTransport
|
||||
addTracker(TcpTransportTrackerName, result)
|
||||
|
||||
proc connHandler*(self: TcpTransport,
|
||||
client: StreamTransport,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
@@ -90,24 +62,38 @@ proc connHandler*(self: TcpTransport,
|
||||
timeout = self.connectionsTimeout
|
||||
))
|
||||
|
||||
proc onClose() {.async.} =
|
||||
proc onClose() {.async: (raises: []).} =
|
||||
try:
|
||||
let futs = @[client.join(), conn.join()]
|
||||
await futs[0] or futs[1]
|
||||
for f in futs:
|
||||
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
|
||||
block:
|
||||
let
|
||||
fut1 = client.join()
|
||||
fut2 = conn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished: await fut1.cancelAndWait()
|
||||
if not fut2.finished: await fut2.cancelAndWait()
|
||||
|
||||
trace "Cleaning up client", addrs = $client.remoteAddress,
|
||||
conn
|
||||
|
||||
self.clients[dir].keepItIf( it != client )
|
||||
await allFuturesThrowing(
|
||||
conn.close(), client.closeWait())
|
||||
|
||||
block:
|
||||
let
|
||||
fut1 = conn.close()
|
||||
fut2 = client.closeWait()
|
||||
await allFutures(fut1, fut2)
|
||||
if fut1.failed:
|
||||
let err = fut1.error()
|
||||
debug "Error cleaning up client", errMsg = err.msg, conn
|
||||
static: doAssert typeof(fut2).E is void # Cannot fail
|
||||
|
||||
trace "Cleaned up client", addrs = $client.remoteAddress,
|
||||
conn
|
||||
|
||||
except CatchableError as exc:
|
||||
except CancelledError as exc:
|
||||
let useExc {.used.} = exc
|
||||
debug "Error cleaning up client", errMsg = exc.msg, conn
|
||||
|
||||
@@ -152,7 +138,7 @@ method start*(
|
||||
|
||||
await procCall Transport(self).start(addrs)
|
||||
trace "Starting TCP transport"
|
||||
inc getTcpTransportTracker().opened
|
||||
trackCounter(TcpTransportTrackerName)
|
||||
|
||||
for i, ma in addrs:
|
||||
if not self.handles(ma):
|
||||
@@ -174,7 +160,7 @@ method start*(
|
||||
|
||||
trace "Listening on", address = ma
|
||||
|
||||
method stop*(self: TcpTransport) {.async, gcsafe.} =
|
||||
method stop*(self: TcpTransport) {.async.} =
|
||||
## stop the transport
|
||||
##
|
||||
try:
|
||||
@@ -206,11 +192,11 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
|
||||
self.servers = @[]
|
||||
|
||||
trace "Transport stopped"
|
||||
inc getTcpTransportTracker().closed
|
||||
untrackCounter(TcpTransportTrackerName)
|
||||
except CatchableError as exc:
|
||||
trace "Error shutting down tcp transport", exc = exc.msg
|
||||
|
||||
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: TcpTransport): Future[Connection] {.async.} =
|
||||
## accept a new TCP connection
|
||||
##
|
||||
|
||||
@@ -219,7 +205,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
|
||||
try:
|
||||
if self.acceptFuts.len <= 0:
|
||||
self.acceptFuts = self.servers.mapIt(it.accept())
|
||||
self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
|
||||
|
||||
if self.acceptFuts.len <= 0:
|
||||
return
|
||||
@@ -260,7 +246,7 @@ method dial*(
|
||||
self: TcpTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
|
||||
return TcpOnion3.match(address)
|
||||
|
||||
proc connectToTorServer(
|
||||
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
|
||||
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
|
||||
let transp = await connect(transportAddress)
|
||||
try:
|
||||
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
|
||||
@@ -99,7 +99,7 @@ proc connectToTorServer(
|
||||
await transp.closeWait()
|
||||
raise err
|
||||
|
||||
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
||||
proc readServerReply(transp: StreamTransport) {.async.} =
|
||||
## The specification for this code is defined on
|
||||
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
|
||||
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
|
||||
@@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
||||
let atyp = firstFourOctets[3]
|
||||
case atyp:
|
||||
of Socks5AddressType.IPv4.byte:
|
||||
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||
of Socks5AddressType.FQDN.byte:
|
||||
let fqdnNumOctets = await transp.read(1)
|
||||
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
|
||||
@@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
|
||||
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
||||
|
||||
proc dialPeer(
|
||||
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
|
||||
transp: StreamTransport, address: MultiAddress) {.async.} =
|
||||
let (atyp, dstAddr, dstPort) =
|
||||
if Onion3.match(address):
|
||||
parseOnion3(address)
|
||||
@@ -190,7 +190,7 @@ method dial*(
|
||||
self: TorTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
## dial a peer
|
||||
##
|
||||
if not handlesDial(address):
|
||||
@@ -229,14 +229,14 @@ method start*(
|
||||
else:
|
||||
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
|
||||
|
||||
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: TorTransport): Future[Connection] {.async.} =
|
||||
## accept a new Tor connection
|
||||
##
|
||||
let conn = await self.tcpTransport.accept()
|
||||
conn.observedAddr = Opt.none(MultiAddress)
|
||||
return conn
|
||||
|
||||
method stop*(self: TorTransport) {.async, gcsafe.} =
|
||||
method stop*(self: TorTransport) {.async.} =
|
||||
## stop the transport
|
||||
##
|
||||
await procCall Transport(self).stop() # call base
|
||||
|
||||
@@ -83,13 +83,12 @@ proc dial*(
|
||||
method upgrade*(
|
||||
self: Transport,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
self.upgrader.upgrade(conn, direction, peerId)
|
||||
self.upgrader.upgrade(conn, peerId)
|
||||
|
||||
method handles*(
|
||||
self: Transport,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -44,11 +44,12 @@ method initStream*(s: WsStream) =
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
proc new*(T: type WsStream,
|
||||
session: WSSession,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout = 10.minutes): T =
|
||||
proc new*(
|
||||
T: type WsStream,
|
||||
session: WSSession,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout = 10.minutes): T =
|
||||
|
||||
let stream = T(
|
||||
session: session,
|
||||
@@ -63,18 +64,23 @@ template mapExceptions(body: untyped) =
|
||||
try:
|
||||
body
|
||||
except AsyncStreamIncompleteError:
|
||||
raise newLPStreamEOFError()
|
||||
raise newLPStreamIncompleteError()
|
||||
except AsyncStreamLimitError:
|
||||
raise newLPStreamLimitError()
|
||||
except AsyncStreamUseClosedError:
|
||||
raise newLPStreamEOFError()
|
||||
except WSClosedError:
|
||||
raise newLPStreamEOFError()
|
||||
except AsyncStreamLimitError:
|
||||
raise newLPStreamLimitError()
|
||||
except WebSocketError:
|
||||
raise newLPStreamEOFError()
|
||||
except CatchableError:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readOnce*(
|
||||
s: WsStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[int] {.async.} =
|
||||
s: WsStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let res = mapExceptions(await s.session.recv(pbytes, nbytes))
|
||||
|
||||
if res == 0 and s.session.readyState == ReadyState.Closed:
|
||||
@@ -83,13 +89,17 @@ method readOnce*(
|
||||
return res
|
||||
|
||||
method write*(
|
||||
s: WsStream,
|
||||
msg: seq[byte]): Future[void] {.async.} =
|
||||
s: WsStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mapExceptions(await s.session.send(msg, Opcode.Binary))
|
||||
s.activity = true # reset activity flag
|
||||
|
||||
method closeImpl*(s: WsStream): Future[void] {.async.} =
|
||||
await s.session.close()
|
||||
method closeImpl*(s: WsStream): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
await s.session.close()
|
||||
except CatchableError:
|
||||
discard
|
||||
await procCall Connection(s).closeImpl()
|
||||
|
||||
method getWrapped*(s: WsStream): Connection = nil
|
||||
@@ -136,7 +146,7 @@ method start*(
|
||||
if WSS.match(ma):
|
||||
if self.secure: true
|
||||
else:
|
||||
warn "Trying to listen on a WSS address without setting the certificate!"
|
||||
warn "Trying to listen on a WSS address without setting certificate!"
|
||||
false
|
||||
else: false
|
||||
|
||||
@@ -173,7 +183,7 @@ method start*(
|
||||
|
||||
self.running = true
|
||||
|
||||
method stop*(self: WsTransport) {.async, gcsafe.} =
|
||||
method stop*(self: WsTransport) {.async.} =
|
||||
## stop the transport
|
||||
##
|
||||
|
||||
@@ -237,7 +247,7 @@ proc connHandler(self: WsTransport,
|
||||
asyncSpawn onClose()
|
||||
return conn
|
||||
|
||||
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: WsTransport): Future[Connection] {.async.} =
|
||||
## accept a new WS connection
|
||||
##
|
||||
|
||||
@@ -276,6 +286,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
||||
debug "AsyncStream Error", exc = exc.msg
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", exc = exc.msg
|
||||
except AsyncTimeoutError as exc:
|
||||
debug "Timed out", exc = exc.msg
|
||||
except TransportUseClosedError as exc:
|
||||
@@ -293,7 +305,7 @@ method dial*(
|
||||
self: WsTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -31,9 +31,8 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
|
||||
return m
|
||||
|
||||
proc mux*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
direction: Direction): Future[Muxer] {.async, gcsafe.} =
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection): Future[Muxer] {.async.} =
|
||||
## mux connection
|
||||
|
||||
trace "Muxing connection", conn
|
||||
@@ -42,7 +41,7 @@ proc mux*(
|
||||
return
|
||||
|
||||
let muxerName =
|
||||
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
||||
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
||||
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
|
||||
|
||||
if muxerName.len == 0 or muxerName == "na":
|
||||
@@ -60,18 +59,17 @@ proc mux*(
|
||||
return muxer
|
||||
|
||||
method upgrade*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
|
||||
trace "Upgrading connection", conn, direction
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
|
||||
trace "Upgrading connection", conn, direction = conn.dir
|
||||
|
||||
let sconn = await self.secure(conn, direction, peerId) # secure the connection
|
||||
if isNil(sconn):
|
||||
let sconn = await self.secure(conn, peerId) # secure the connection
|
||||
if sconn == nil:
|
||||
raise newException(UpgradeFailedError,
|
||||
"unable to secure connection, stopping upgrade")
|
||||
|
||||
let muxer = await self.mux(sconn, direction) # mux it if possible
|
||||
let muxer = await self.mux(sconn) # mux it if possible
|
||||
if muxer == nil:
|
||||
raise newException(UpgradeFailedError,
|
||||
"a muxer is required for outgoing connections")
|
||||
@@ -84,27 +82,25 @@ method upgrade*(
|
||||
raise newException(UpgradeFailedError,
|
||||
"Connection closed or missing peer info, stopping upgrade")
|
||||
|
||||
trace "Upgraded connection", conn, sconn, direction
|
||||
trace "Upgraded connection", conn, sconn, direction = conn.dir
|
||||
return muxer
|
||||
|
||||
proc new*(
|
||||
T: type MuxedUpgrade,
|
||||
muxers: seq[MuxerProvider],
|
||||
secureManagers: openArray[Secure] = [],
|
||||
ms: MultistreamSelect): T =
|
||||
|
||||
T: type MuxedUpgrade,
|
||||
muxers: seq[MuxerProvider],
|
||||
secureManagers: openArray[Secure] = [],
|
||||
ms: MultistreamSelect): T =
|
||||
let upgrader = T(
|
||||
muxers: muxers,
|
||||
secureManagers: @secureManagers,
|
||||
ms: ms)
|
||||
|
||||
upgrader.streamHandler = proc(conn: Connection)
|
||||
{.async, gcsafe, raises: [].} =
|
||||
upgrader.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
|
||||
trace "Starting stream handler", conn
|
||||
try:
|
||||
await upgrader.ms.handle(conn) # handle incoming connection
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
return
|
||||
except CatchableError as exc:
|
||||
trace "exception in stream handler", conn, msg = exc.msg
|
||||
finally:
|
||||
|
||||
@@ -40,20 +40,18 @@ type
|
||||
method upgrade*(
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
proc secure*(
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||
if self.secureManagers.len <= 0:
|
||||
raise newException(UpgradeFailedError, "No secure managers registered!")
|
||||
|
||||
let codec =
|
||||
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
|
||||
if codec.len == 0:
|
||||
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
|
||||
@@ -65,4 +63,4 @@ proc secure*(
|
||||
# let's avoid duplicating checks but detect if it fails to do it properly
|
||||
doAssert(secureProtocol.len > 0)
|
||||
|
||||
return await secureProtocol[0].secure(conn, direction == Out, peerId)
|
||||
return await secureProtocol[0].secure(conn, peerId)
|
||||
|
||||
@@ -89,8 +89,27 @@ template exceptionToAssert*(body: untyped): untyped =
|
||||
res
|
||||
|
||||
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
|
||||
if self.isSome:
|
||||
let value {.inject.} = self.get()
|
||||
## This template provides a convenient way to work with `Option` types in Nim.
|
||||
## It allows you to execute a block of code (`body`) only when the `Option` is not empty.
|
||||
##
|
||||
## `self` is the `Option` instance being checked.
|
||||
## `value` is the variable name to be used within the `body` for the unwrapped value.
|
||||
## `body` is a block of code that is executed only if `self` contains a value.
|
||||
##
|
||||
## The `value` within `body` is automatically unwrapped from the `Option`, making it
|
||||
## simpler to work with without needing explicit checks or unwrapping.
|
||||
##
|
||||
## Example:
|
||||
## ```nim
|
||||
## let myOpt = Opt.some(5)
|
||||
## myOpt.withValue(value):
|
||||
## echo value # Will print 5
|
||||
## ```
|
||||
##
|
||||
## Note: This is a template, and it will be inlined at the call site, offering good performance.
|
||||
let temp = (self)
|
||||
if temp.isSome:
|
||||
let value {.inject.} = temp.get()
|
||||
body
|
||||
|
||||
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =
|
||||
|
||||
@@ -89,6 +89,7 @@ build_target() {
|
||||
mkdir "$CACHE_DIR"
|
||||
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
|
||||
fi
|
||||
echo "Binary built successfully."
|
||||
}
|
||||
|
||||
if target_needs_rebuilding; then
|
||||
|
||||
@@ -5,21 +5,21 @@ export unittest2, chronos
|
||||
template asyncTeardown*(body: untyped): untyped =
|
||||
teardown:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
template asyncSetup*(body: untyped): untyped =
|
||||
setup:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
template asyncTest*(name: string, body: untyped): untyped =
|
||||
test name:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
@@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
|
||||
inc attemptNumber
|
||||
try:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
except Exception as e:
|
||||
|
||||
@@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
|
||||
buf.finish()
|
||||
result = s.write(buf.buffer)
|
||||
|
||||
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
|
||||
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
## read length prefixed msg
|
||||
var
|
||||
size: uint
|
||||
|
||||
@@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
|
||||
let transport2 = transpProvider()
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
if conn.observedAddr.isSome():
|
||||
check transport1.handles(conn.observedAddr.get())
|
||||
@@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
await conn.write("Hello!")
|
||||
await conn.close()
|
||||
@@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
var msg = newSeq[byte](6)
|
||||
await conn.readExactly(addr msg[0], 6)
|
||||
@@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(addrs)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
while true:
|
||||
let conn = await transport1.accept()
|
||||
await conn.write(newSeq[byte](0))
|
||||
@@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
await conn.close()
|
||||
|
||||
|
||||
29
tests/errorhelpers.nim
Normal file
29
tests/errorhelpers.nim
Normal file
@@ -0,0 +1,29 @@
|
||||
import
|
||||
std/sequtils,
|
||||
chronos
|
||||
|
||||
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
|
||||
# This proc is only meant for use in tests / not suitable for general use.
|
||||
# - Swallowing errors arbitrarily instead of aggregating them is bad design
|
||||
# - It raises `CatchableError` instead of the union of the `futs` errors,
|
||||
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
|
||||
let futs = @args
|
||||
(proc() {.async: (raises: [CatchableError]).} =
|
||||
await allFutures(futs)
|
||||
var firstErr: ref CatchableError
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.error()
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if firstErr == nil:
|
||||
firstErr = err
|
||||
if firstErr != nil:
|
||||
raise firstErr)()
|
||||
|
||||
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
|
||||
proc allFuturesThrowing*[T, E](
|
||||
futs: varargs[InternalRaisesFuture[T, E]]): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
@@ -1,6 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import macros
|
||||
import algorithm
|
||||
|
||||
import ../libp2p/transports/tcptransport
|
||||
@@ -13,8 +14,8 @@ import ../libp2p/protocols/secure/secure
|
||||
import ../libp2p/switch
|
||||
import ../libp2p/nameresolving/[nameresolver, mockresolver]
|
||||
|
||||
import ./asyncunit
|
||||
export asyncunit, mockresolver
|
||||
import "."/[asyncunit, errorhelpers]
|
||||
export asyncunit, errorhelpers, mockresolver
|
||||
|
||||
const
|
||||
StreamTransportTrackerName = "stream.transport"
|
||||
@@ -34,25 +35,19 @@ const
|
||||
ChronosStreamTrackerName
|
||||
]
|
||||
|
||||
iterator testTrackers*(extras: openArray[string] = []): TrackerBase =
|
||||
for name in trackerNames:
|
||||
let t = getTracker(name)
|
||||
if not isNil(t): yield t
|
||||
for name in extras:
|
||||
let t = getTracker(name)
|
||||
if not isNil(t): yield t
|
||||
|
||||
template checkTracker*(name: string) =
|
||||
var tracker = getTracker(name)
|
||||
if tracker.isLeaked():
|
||||
checkpoint tracker.dump()
|
||||
if isCounterLeaked(name):
|
||||
let
|
||||
tracker = getTrackerCounter(name)
|
||||
trackerDescription =
|
||||
"Opened " & name & ": " & $tracker.opened & "\n" &
|
||||
"Closed " & name & ": " & $tracker.closed
|
||||
checkpoint trackerDescription
|
||||
fail()
|
||||
|
||||
template checkTrackers*() =
|
||||
for tracker in testTrackers():
|
||||
if tracker.isLeaked():
|
||||
checkpoint tracker.dump()
|
||||
fail()
|
||||
for name in trackerNames:
|
||||
checkTracker(name)
|
||||
# Also test the GC is not fooling with us
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.push warning[BareExcept]:off.}
|
||||
@@ -81,11 +76,18 @@ template rng*(): ref HmacDrbgContext =
|
||||
getRng()
|
||||
|
||||
type
|
||||
WriteHandler* = proc(data: seq[byte]): Future[void] {.gcsafe, raises: [].}
|
||||
WriteHandler* = proc(
|
||||
data: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).}
|
||||
|
||||
TestBufferStream* = ref object of BufferStream
|
||||
writeHandler*: WriteHandler
|
||||
|
||||
method write*(s: TestBufferStream, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: TestBufferStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
s.writeHandler(msg)
|
||||
|
||||
method getWrapped*(s: TestBufferStream): Connection = nil
|
||||
@@ -103,26 +105,94 @@ proc bridgedConnections*: (Connection, Connection) =
|
||||
connB.dir = Direction.In
|
||||
connA.initStream()
|
||||
connB.initStream()
|
||||
connA.writeHandler = proc(data: seq[byte]) {.async.} =
|
||||
await connB.pushData(data)
|
||||
connA.writeHandler =
|
||||
proc(data: seq[byte]) {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
connB.pushData(data)
|
||||
|
||||
connB.writeHandler = proc(data: seq[byte]) {.async.} =
|
||||
await connA.pushData(data)
|
||||
connB.writeHandler =
|
||||
proc(data: seq[byte]) {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
connA.pushData(data)
|
||||
return (connA, connB)
|
||||
|
||||
|
||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
if Moment.now() > (start + chronos.seconds(5)):
|
||||
return false
|
||||
elif cond():
|
||||
return true
|
||||
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
## Periodically checks a given condition until it is true or a timeout occurs.
|
||||
##
|
||||
## `code`: untyped - A condition expression that should eventually evaluate to true.
|
||||
## `timeout`: Duration - The maximum duration to wait for the condition to be true.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(2.seconds):
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(5.seconds)::
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
## ```
|
||||
# Helper proc to recursively build a combined boolean expression
|
||||
proc buildAndExpr(n: NimNode): NimNode =
|
||||
if n.kind == nnkStmtList and n.len > 0:
|
||||
var combinedExpr = n[0] # Start with the first expression
|
||||
for i in 1..<n.len:
|
||||
# Combine the current expression with the next using 'and'
|
||||
combinedExpr = newCall("and", combinedExpr, n[i])
|
||||
return combinedExpr
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
return n
|
||||
|
||||
template checkExpiring*(code: untyped): untyped =
|
||||
check await checkExpiringInternal(proc(): bool = code)
|
||||
# Build the combined expression
|
||||
let combinedBoolExpr = buildAndExpr(code)
|
||||
|
||||
result = quote do:
|
||||
proc checkExpiringInternal(): Future[void] {.gensym, async.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
if Moment.now() > (start + `timeout`):
|
||||
checkpoint("[TIMEOUT] Timeout was reached and the conditions were not true. Check if the code is working as " &
|
||||
"expected or consider increasing the timeout param.")
|
||||
check `code`
|
||||
return
|
||||
else:
|
||||
if `combinedBoolExpr`:
|
||||
return
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
await checkExpiringInternal()
|
||||
|
||||
macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilTimeout should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilTimeout:
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilTimeout should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilTimeout:
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
## ```
|
||||
result = quote do:
|
||||
checkUntilCustomTimeout(10.seconds, `code`)
|
||||
|
||||
proc unorderedCompare*[T](a, b: seq[T]): bool =
|
||||
if a == b:
|
||||
@@ -146,8 +216,8 @@ proc default*(T: typedesc[MockResolver]): T =
|
||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||
resolver
|
||||
|
||||
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
proc setDNSAddr*(switch: Switch) {.async.} =
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
||||
switch.peerInfo.addressMappers.add(addressMapper)
|
||||
await switch.peerInfo.update()
|
||||
|
||||
17
tests/hole-punching-interop/Dockerfile
Normal file
17
tests/hole-punching-interop/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bookworm-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
|
||||
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||
ENV RUST_BACKTRACE=1
|
||||
115
tests/hole-punching-interop/hole_punching.nim
Normal file
115
tests/hole-punching-interop/hole_punching.nim
Normal file
@@ -0,0 +1,115 @@
|
||||
import std/[os, options, strformat]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import ../../libp2p/[builders,
|
||||
switch,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping]
|
||||
import ../stubs/autonatclientstub
|
||||
import ../errorhelpers
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng=rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
try:
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
debug "Got relay address", relayMA
|
||||
let relayId = await switch.connect(relayMA)
|
||||
debug "Connected to relay", relayId
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while switch.peerInfo.addrs.len == 0:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
quit(0)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", msg = e.msg
|
||||
|
||||
discard waitFor(main().withTimeout(4.minutes))
|
||||
quit(1)
|
||||
7
tests/hole-punching-interop/version.json
Normal file
7
tests/hole-punching-interop/version.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp"
|
||||
]
|
||||
}
|
||||
@@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||
proc waitSub(sender, receiver: auto; key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
# this is for testing purposes only
|
||||
var ceil = 15
|
||||
@@ -43,7 +43,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub basic publish/subscribe A -> B":
|
||||
var completionFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
@@ -81,7 +81,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub basic publish/subscribe B -> A":
|
||||
var completionFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
@@ -113,7 +113,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -151,7 +151,7 @@ suite "FloodSub":
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub validation should fail":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -186,7 +186,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -235,7 +235,7 @@ suite "FloodSub":
|
||||
counter = new int
|
||||
futs[i] = (
|
||||
fut,
|
||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
inc counter[]
|
||||
if counter[] == runs - 1:
|
||||
@@ -283,7 +283,7 @@ suite "FloodSub":
|
||||
counter = new int
|
||||
futs[i] = (
|
||||
fut,
|
||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
inc counter[]
|
||||
if counter[] == runs - 1:
|
||||
@@ -333,7 +333,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub message size validation":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check data.len < 50
|
||||
inc(messageReceived)
|
||||
|
||||
@@ -361,7 +361,7 @@ suite "FloodSub":
|
||||
check (await smallNode[0].publish("foo", smallMessage1)) > 0
|
||||
check (await bigNode[0].publish("foo", smallMessage2)) > 0
|
||||
|
||||
checkExpiring: messageReceived == 2
|
||||
checkUntilTimeout: messageReceived == 2
|
||||
|
||||
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
||||
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
||||
@@ -375,7 +375,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub message size validation 2":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
inc(messageReceived)
|
||||
|
||||
let
|
||||
@@ -396,7 +396,7 @@ suite "FloodSub":
|
||||
|
||||
check (await bigNode1[0].publish("foo", bigMessage)) > 0
|
||||
|
||||
checkExpiring: messageReceived == 1
|
||||
checkUntilTimeout: messageReceived == 1
|
||||
|
||||
await allFuturesThrowing(
|
||||
bigNode1[0].switch.stop(),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -24,7 +24,8 @@ import utils
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -718,104 +719,6 @@ suite "GossipSub internal":
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "two IHAVEs should generate only one IWANT":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
var iwantCount = 0
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Setup two connections and two peers
|
||||
var ihaveMessageId: string
|
||||
var firstPeer: PubSubPeer
|
||||
let seqno = @[0'u8, 1, 2, 3]
|
||||
for i in 0..<2:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
if isNil(firstPeer):
|
||||
firstPeer = peer
|
||||
ihaveMessageId = byteutils.toHex(seqno) & $firstPeer.peerId
|
||||
peer.handler = handler
|
||||
|
||||
# Simulate that each peer sends an IHAVE message to our node
|
||||
let msg = ControlIHave(
|
||||
topicID: topic,
|
||||
messageIDs: @[ihaveMessageId.toBytes()]
|
||||
)
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
if iwants.messageIds.len > 0:
|
||||
iwantCount += 1
|
||||
|
||||
# Verify that our node responds with only one IWANT message
|
||||
check: iwantCount == 1
|
||||
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
||||
|
||||
# Simulate that our node receives the RPCMsg in response to the IWANT
|
||||
let actualMessageData = "Hello, World!".toBytes
|
||||
let rpcMsg = RPCMsg(
|
||||
messages: @[Message(
|
||||
fromPeer: firstPeer.peerId,
|
||||
seqno: seqno,
|
||||
data: actualMessageData
|
||||
)]
|
||||
)
|
||||
await gossipSub.rpcHandler(firstPeer, encodeRpcMsg(rpcMsg, false))
|
||||
|
||||
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handle unanswered IWANT messages":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.heartbeatInterval = 50.milliseconds
|
||||
gossipSub.parameters.iwantTimeout = 10.milliseconds
|
||||
await gossipSub.start()
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} = discard
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Setup a connection and a peer
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# Simulate that the peer sends an IHAVE message to our node
|
||||
let ihaveMessageId = @[0'u8, 1, 2, 3]
|
||||
let ihaveMsg = ControlIHave(
|
||||
topicID: topic,
|
||||
messageIDs: @[ihaveMessageId]
|
||||
)
|
||||
discard gossipSub.handleIHave(peer, @[ihaveMsg])
|
||||
|
||||
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId)
|
||||
check: peer.behaviourPenalty == 0.0
|
||||
|
||||
await sleepAsync(60.milliseconds)
|
||||
|
||||
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId)
|
||||
check: peer.behaviourPenalty == 0.1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
@@ -828,10 +731,10 @@ suite "GossipSub internal":
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
@@ -877,9 +780,9 @@ suite "GossipSub internal":
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
|
||||
))))
|
||||
))), isHighPriority = false)
|
||||
|
||||
checkExpiring: receivedMessages[] == sentMessages
|
||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
@@ -894,10 +797,10 @@ suite "GossipSub internal":
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
))), isHighPriority = false)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkExpiring: receivedMessages[].len == 0
|
||||
checkUntilTimeout: receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
@@ -911,9 +814,9 @@ suite "GossipSub internal":
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
))), isHighPriority = false)
|
||||
|
||||
checkExpiring: receivedMessages[] == sentMessages
|
||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
@@ -929,7 +832,7 @@ suite "GossipSub internal":
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
))), isHighPriority = false)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
@@ -938,7 +841,7 @@ suite "GossipSub internal":
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkExpiring: receivedMessages[] == smallestSet
|
||||
checkUntilTimeout: receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
@@ -47,7 +47,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -92,7 +92,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub validation should fail (reject)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -138,7 +138,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub validation should fail (ignore)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -185,7 +185,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -238,7 +238,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -289,7 +289,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let
|
||||
@@ -310,9 +310,9 @@ suite "GossipSub":
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkExpiring:
|
||||
"foobar" in gossip2.topics and
|
||||
"foobar" in gossip1.gossipsub and
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -323,7 +323,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let
|
||||
@@ -374,7 +374,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
@@ -428,7 +428,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
@@ -454,9 +454,9 @@ suite "GossipSub":
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkExpiring:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0 and
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0 and
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
@@ -481,7 +481,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
@@ -548,11 +548,11 @@ suite "GossipSub":
|
||||
var
|
||||
aReceived = 0
|
||||
cReceived = 0
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
inc aReceived
|
||||
check aReceived < 2
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
inc cReceived
|
||||
check cReceived < 2
|
||||
cRelayed.complete()
|
||||
@@ -572,16 +572,16 @@ suite "GossipSub":
|
||||
gossip1.seen = TimedCache[MessageId].init()
|
||||
gossip3.seen = TimedCache[MessageId].init()
|
||||
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||
checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||
checkUntilTimeout(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||
result = ValidationResult.Accept
|
||||
bFinished.complete()
|
||||
|
||||
nodes[1].addValidator("foobar", slowValidator)
|
||||
|
||||
checkExpiring(
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2 and
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2 and
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2)
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
|
||||
|
||||
await bFinished
|
||||
@@ -596,7 +596,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
@@ -653,7 +653,7 @@ suite "GossipSub":
|
||||
)
|
||||
|
||||
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for node in nodes:
|
||||
@@ -661,7 +661,7 @@ suite "GossipSub":
|
||||
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
block setup:
|
||||
@@ -676,7 +676,7 @@ suite "GossipSub":
|
||||
|
||||
# Now try with a mesh
|
||||
gossip1.subscribe("foobar", handler)
|
||||
checkExpiring: gossip1.mesh.peers("foobar") > 5
|
||||
checkUntilTimeout: gossip1.mesh.peers("foobar") > 5
|
||||
|
||||
# use a different length so that the message is not equal to the last
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
|
||||
@@ -727,7 +727,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -778,7 +778,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
capture dialer, i:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -819,7 +819,7 @@ suite "GossipSub":
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard # not used in this test
|
||||
|
||||
let
|
||||
@@ -895,9 +895,9 @@ suite "GossipSub":
|
||||
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
@@ -912,14 +912,14 @@ suite "GossipSub":
|
||||
|
||||
gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
|
||||
))))
|
||||
checkExpiring: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
||||
))), isHighPriority = true)
|
||||
checkUntilTimeout: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
checkExpiring: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
|
||||
checkUntilTimeout: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
|
||||
check: toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 0)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -943,7 +943,7 @@ suite "GossipSub":
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
@@ -952,6 +952,10 @@ suite "GossipSub":
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
@@ -964,8 +968,10 @@ suite "GossipSub":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: "Valid data".toBytes)])
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]),
|
||||
isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
@@ -973,9 +979,13 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]),
|
||||
isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
@@ -986,8 +996,7 @@ suite "GossipSub":
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
let msg = newSeqWith[byte](30, 1.byte)
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(msg)
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte), isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
@@ -995,9 +1004,9 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(msg)
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte), isHighPriority = true)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
@@ -1008,11 +1017,10 @@ suite "GossipSub":
|
||||
|
||||
let msg = RPCMsg(control: some(ControlMessage(prune: @[
|
||||
ControlPrune(topicID: "foobar", peers: @[
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](30)))
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
|
||||
], backoff: 123'u64)
|
||||
])))
|
||||
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
@@ -1020,9 +1028,47 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
|
||||
ControlPrune(topicID: "foobar", peers: @[
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
|
||||
], backoff: 123'u64)
|
||||
])))
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]),
|
||||
isHighPriority = true)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
@@ -59,7 +59,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -93,7 +93,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -155,7 +155,7 @@ suite "GossipSub":
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await subscribeNodes(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} = discard
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
@@ -182,10 +182,10 @@ suite "GossipSub":
|
||||
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
@@ -226,7 +226,7 @@ suite "GossipSub":
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
@@ -272,7 +272,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -324,7 +324,7 @@ suite "GossipSub":
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0..<runs:
|
||||
@@ -368,7 +368,7 @@ suite "GossipSub":
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
@@ -128,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||
dialed.add(node.peerInfo.peerId)
|
||||
|
||||
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
|
||||
if sender == receiver:
|
||||
return
|
||||
let timeout = Moment.now() + 5.seconds
|
||||
@@ -148,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||
await sleepAsync(5.milliseconds)
|
||||
doAssert Moment.now() < timeout, "waitSub timeout!"
|
||||
|
||||
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
|
||||
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||
let timeout = Moment.now() + 5.seconds
|
||||
while true:
|
||||
var
|
||||
|
||||
@@ -24,7 +24,7 @@ type
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
|
||||
dir = Direction.Out): Future[void] {.async.}
|
||||
|
||||
method connect*(
|
||||
self: SwitchStub,
|
||||
@@ -32,11 +32,11 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
dir = Direction.Out) {.async.} =
|
||||
if (self.connectStub != nil):
|
||||
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
||||
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
|
||||
else:
|
||||
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
||||
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
|
||||
return SwitchStub(
|
||||
|
||||
@@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
|
||||
|
||||
proc makeAutonatServicePrivate(): Switch =
|
||||
var autonatProtocol = new LPProtocol
|
||||
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
|
||||
discard await conn.readLp(1024)
|
||||
await conn.writeLp(AutonatDialResponse(
|
||||
status: DialError,
|
||||
|
||||
@@ -87,7 +87,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||
if not awaiter.finished:
|
||||
awaiter.complete()
|
||||
@@ -131,7 +131,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||
if not awaiter.finished:
|
||||
autonatClientStub.answer = Reachable
|
||||
@@ -173,7 +173,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter.finished:
|
||||
awaiter.complete()
|
||||
@@ -213,7 +213,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||
if not awaiter.finished:
|
||||
autonatClientStub.answer = Unknown
|
||||
@@ -267,7 +267,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter.finished:
|
||||
awaiter.complete()
|
||||
@@ -302,12 +302,12 @@ suite "Autonat Service":
|
||||
let awaiter2 = newFuture[void]()
|
||||
let awaiter3 = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter1.finished:
|
||||
awaiter1.complete()
|
||||
|
||||
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter2.finished:
|
||||
awaiter2.complete()
|
||||
@@ -345,7 +345,7 @@ suite "Autonat Service":
|
||||
|
||||
let awaiter1 = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter1.finished:
|
||||
awaiter1.complete()
|
||||
@@ -388,7 +388,7 @@ suite "Autonat Service":
|
||||
|
||||
var awaiter = newFuture[void]()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||
if not awaiter.finished:
|
||||
awaiter.complete()
|
||||
@@ -428,7 +428,7 @@ suite "Autonat Service":
|
||||
let switch1 = createSwitch(autonatService)
|
||||
let switch2 = createSwitch()
|
||||
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
fail()
|
||||
|
||||
check autonatService.networkReachability == NetworkReachability.Unknown
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -18,8 +18,7 @@ import ./helpers
|
||||
|
||||
suite "BufferStream":
|
||||
teardown:
|
||||
# echo getTracker(BufferStreamTrackerName).dump()
|
||||
check getTracker(BufferStreamTrackerName).isLeaked() == false
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "push data to buffer":
|
||||
let buff = BufferStream.new()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -29,11 +29,12 @@ type
|
||||
peerId: PeerId
|
||||
|
||||
method newStream*(
|
||||
m: TestMuxer,
|
||||
name: string = "",
|
||||
lazy: bool = false):
|
||||
Future[Connection] {.async, gcsafe.} =
|
||||
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
|
||||
m: TestMuxer,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
|
||||
|
||||
suite "Connection Manager":
|
||||
teardown:
|
||||
@@ -215,7 +216,7 @@ suite "Connection Manager":
|
||||
|
||||
await connMngr.close()
|
||||
|
||||
checkExpiring: waitedConn3.cancelled()
|
||||
checkUntilTimeout: waitedConn3.cancelled()
|
||||
|
||||
await allFuturesThrowing(
|
||||
allFutures(muxs.mapIt( it.close() )))
|
||||
@@ -231,7 +232,7 @@ suite "Connection Manager":
|
||||
|
||||
await muxer.close()
|
||||
|
||||
checkExpiring: muxer notin connMngr
|
||||
checkUntilTimeout: muxer notin connMngr
|
||||
|
||||
await connMngr.close()
|
||||
|
||||
@@ -254,7 +255,7 @@ suite "Connection Manager":
|
||||
check peerId in connMngr
|
||||
await connMngr.dropPeer(peerId)
|
||||
|
||||
checkExpiring: peerId notin connMngr
|
||||
checkUntilTimeout: peerId notin connMngr
|
||||
check isNil(connMngr.selectMuxer(peerId, Direction.In))
|
||||
check isNil(connMngr.selectMuxer(peerId, Direction.Out))
|
||||
|
||||
|
||||
@@ -57,14 +57,15 @@ suite "Dcutr":
|
||||
for t in behindNATSwitch.transports:
|
||||
t.networkReachability = NetworkReachability.NotReachable
|
||||
|
||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||
.wait(300.millis)
|
||||
|
||||
checkExpiring:
|
||||
expect CatchableError:
|
||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
||||
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
|
||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
|
||||
# dial will succeed.
|
||||
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
|
||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
|
||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||
.wait(300.millis)
|
||||
|
||||
checkUntilTimeout:
|
||||
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
||||
|
||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||
@@ -82,9 +83,9 @@ suite "Dcutr":
|
||||
|
||||
body
|
||||
|
||||
checkExpiring:
|
||||
# no connection will be open by the receiver peer acting as the dcutr server
|
||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
|
||||
checkUntilTimeout:
|
||||
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
||||
|
||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||
|
||||
@@ -95,7 +96,7 @@ suite "Dcutr":
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
dir = Direction.Out): Future[void] {.async.} =
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
|
||||
@@ -114,7 +115,7 @@ suite "Dcutr":
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
dir = Direction.Out): Future[void] {.async.} =
|
||||
raise newException(CatchableError, "error")
|
||||
|
||||
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
|
||||
@@ -142,13 +143,16 @@ suite "Dcutr":
|
||||
for t in behindNATSwitch.transports:
|
||||
t.networkReachability = NetworkReachability.NotReachable
|
||||
|
||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||
.wait(300.millis)
|
||||
|
||||
checkExpiring:
|
||||
expect CatchableError:
|
||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
||||
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
|
||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
||||
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
|
||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
|
||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||
.wait(300.millis)
|
||||
|
||||
checkUntilTimeout:
|
||||
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
|
||||
|
||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||
|
||||
@@ -159,7 +163,7 @@ suite "Dcutr":
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
dir = Direction.Out): Future[void] {.async.} =
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
await ductrServerTest(connectProc)
|
||||
@@ -171,7 +175,23 @@ suite "Dcutr":
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
dir = Direction.Out): Future[void] {.async.} =
|
||||
raise newException(CatchableError, "error")
|
||||
|
||||
await ductrServerTest(connectProc)
|
||||
|
||||
test "should return valid TCP/IP and TCP/DNS addresses only":
|
||||
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
|
||||
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
|
||||
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
|
||||
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
|
||||
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
|
||||
|
||||
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
|
||||
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
|
||||
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
|
||||
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
|
||||
|
||||
let result = getHolePunchableAddrs(testAddrs)
|
||||
|
||||
check result == expected
|
||||
@@ -22,13 +22,13 @@ when not defined(macosx):
|
||||
asyncTest "simple heartbeat":
|
||||
var i = 0
|
||||
proc t() {.async.} =
|
||||
heartbeat "shouldn't see this", 30.milliseconds:
|
||||
heartbeat "shouldn't see this", 50.milliseconds:
|
||||
i.inc()
|
||||
let hb = t()
|
||||
await sleepAsync(300.milliseconds)
|
||||
await sleepAsync(500.milliseconds)
|
||||
await hb.cancelAndWait()
|
||||
check:
|
||||
i in 9..11
|
||||
i in 9..12
|
||||
|
||||
asyncTest "change heartbeat period on the fly":
|
||||
var i = 0
|
||||
@@ -46,7 +46,7 @@ when not defined(macosx):
|
||||
# (500 ms - 120 ms) / 75ms = 5x 75ms
|
||||
# total 9
|
||||
check:
|
||||
i in 8..10
|
||||
i in 8..11
|
||||
|
||||
asyncTest "catch up on slow heartbeat":
|
||||
var i = 0
|
||||
@@ -63,4 +63,4 @@ when not defined(macosx):
|
||||
# 360ms remaining, / 30ms = 12x
|
||||
# total 15
|
||||
check:
|
||||
i in 14..16
|
||||
i in 14..17
|
||||
|
||||
42
tests/testhelpers.nim
Normal file
42
tests/testhelpers.nim
Normal file
@@ -0,0 +1,42 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
import ./helpers
|
||||
|
||||
suite "Helpers":
|
||||
|
||||
asyncTest "checkUntilTimeout should pass if the condition is true":
|
||||
let a = 2
|
||||
let b = 2
|
||||
checkUntilTimeout:
|
||||
a == b
|
||||
|
||||
asyncTest "checkUntilTimeout should pass if the conditions are true":
|
||||
let a = 2
|
||||
let b = 2
|
||||
checkUntilTimeout:
|
||||
a == b
|
||||
a == 2
|
||||
b == 2
|
||||
|
||||
asyncTest "checkUntilCustomTimeout should pass when the condition is true":
|
||||
let a = 2
|
||||
let b = 2
|
||||
checkUntilCustomTimeout(2.seconds):
|
||||
a == b
|
||||
|
||||
asyncTest "checkUntilCustomTimeout should pass when the conditions are true":
|
||||
let a = 2
|
||||
let b = 2
|
||||
checkUntilCustomTimeout(5.seconds):
|
||||
a == b
|
||||
a == 2
|
||||
b == 2
|
||||
@@ -65,7 +65,7 @@ suite "Hole Punching":
|
||||
|
||||
let publicPeerSwitch = createSwitch(RelayClient.new())
|
||||
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
||||
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
|
||||
await publicPeerSwitch.peerInfo.update()
|
||||
@@ -89,8 +89,8 @@ suite "Hole Punching":
|
||||
|
||||
await publicPeerSwitch.connect(privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr))
|
||||
|
||||
checkExpiring:
|
||||
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and
|
||||
checkUntilTimeout:
|
||||
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
|
||||
not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -127,8 +127,8 @@ suite "Hole Punching":
|
||||
|
||||
await publicPeerSwitch.connect(privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr))
|
||||
|
||||
checkExpiring:
|
||||
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and
|
||||
checkUntilTimeout:
|
||||
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
|
||||
not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -193,38 +193,24 @@ suite "Hole Punching":
|
||||
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
|
||||
privatePeerSwitch2.connectStub = rcvConnectStub
|
||||
|
||||
checkExpiring:
|
||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
||||
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
|
||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
|
||||
# dial will succeed.
|
||||
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
|
||||
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
|
||||
# wait for hole punching to finish in the background
|
||||
await sleepAsync(600.millis)
|
||||
|
||||
await allFuturesThrowing(
|
||||
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
|
||||
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
|
||||
|
||||
asyncTest "Hole punching when peers addresses are private":
|
||||
proc connectStub(self: SwitchStub,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
self.connectStub = nil # this stub should be called only once
|
||||
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
|
||||
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
||||
await holePunchingTest(nil, connectStub, NotReachable)
|
||||
await holePunchingTest(nil, nil, NotReachable)
|
||||
|
||||
asyncTest "Hole punching when there is an error during unilateral direct connection":
|
||||
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
|
||||
|
||||
proc connectStub(self: SwitchStub,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
||||
dir = Direction.Out): Future[void] {.async.} =
|
||||
self.connectStub = nil # this stub should be called only once
|
||||
raise newException(CatchableError, "error")
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ suite "Identify":
|
||||
|
||||
asyncTest "default agent version":
|
||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
proc acceptHandler(): Future[void] {.async.} =
|
||||
let c = await transport1.accept()
|
||||
await msListen.handle(c)
|
||||
|
||||
@@ -95,7 +95,7 @@ suite "Identify":
|
||||
remotePeerInfo.agentVersion = customAgentVersion
|
||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
proc acceptHandler(): Future[void] {.async.} =
|
||||
let c = await transport1.accept()
|
||||
await msListen.handle(c)
|
||||
|
||||
@@ -136,7 +136,7 @@ suite "Identify":
|
||||
asyncTest "can send signed peer record":
|
||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||
identifyProto1.sendSignedPeerRecord = true
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
proc acceptHandler(): Future[void] {.async.} =
|
||||
let c = await transport1.accept()
|
||||
await msListen.handle(c)
|
||||
|
||||
@@ -219,8 +219,8 @@ suite "Identify":
|
||||
|
||||
await identifyPush2.push(switch2.peerInfo, conn)
|
||||
|
||||
checkExpiring: switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
|
||||
checkExpiring: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
|
||||
checkUntilTimeout: switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
|
||||
checkUntilTimeout: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
|
||||
|
||||
await closeAll()
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -32,7 +32,8 @@ suite "Mplex":
|
||||
|
||||
suite "channel encoding":
|
||||
asyncTest "encode header with channel id 0":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
proc encHandler(
|
||||
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
check msg == fromHex("000873747265616d2031")
|
||||
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
@@ -40,7 +41,8 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "encode header with channel id other than 0":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
proc encHandler(
|
||||
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
check msg == fromHex("88010873747265616d2031")
|
||||
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
@@ -48,7 +50,8 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "encode header and body with channel id 0":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
proc encHandler(
|
||||
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
check msg == fromHex("020873747265616d2031")
|
||||
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
@@ -56,7 +59,8 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "encode header and body with channel id other than 0":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
proc encHandler(
|
||||
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
check msg == fromHex("8a010873747265616d2031")
|
||||
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
@@ -97,7 +101,10 @@ suite "Mplex":
|
||||
|
||||
suite "channel half-closed":
|
||||
asyncTest "(local close) - should close for write":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -112,7 +119,9 @@ suite "Mplex":
|
||||
asyncTest "(local close) - should allow reads until remote closes":
|
||||
let
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
proc (
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard,
|
||||
)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -139,7 +148,9 @@ suite "Mplex":
|
||||
asyncTest "(remote close) - channel should close for reading by remote":
|
||||
let
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
proc (
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard,
|
||||
)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -162,7 +173,9 @@ suite "Mplex":
|
||||
let
|
||||
testData = "Hello!".toBytes
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
proc (
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -175,7 +188,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "should not allow pushing data to channel when remote end closed":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -192,7 +208,10 @@ suite "Mplex":
|
||||
suite "channel reset":
|
||||
|
||||
asyncTest "channel should fail reading":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -205,7 +224,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete read":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -220,7 +242,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete pushData":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -239,7 +264,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete both read and push":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -254,7 +282,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete both read and pushes":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -279,7 +310,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete both read and push with cancel":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -293,7 +327,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "should complete both read and push after reset":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -311,7 +348,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete ongoing push without reader":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -323,7 +363,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should complete ongoing read without a push":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -335,7 +378,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "reset should allow all reads and pushes to complete":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -364,7 +410,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "channel should fail writing":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
@@ -376,7 +425,10 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "channel should reset on timeout":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc writeHandler(
|
||||
data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
let
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(
|
||||
@@ -392,14 +444,18 @@ suite "Mplex":
|
||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == "HELLO"
|
||||
await stream.close()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == "HELLO"
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -429,14 +485,18 @@ suite "Mplex":
|
||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == "HELLO"
|
||||
await stream.close()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == "HELLO"
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -473,17 +533,21 @@ suite "Mplex":
|
||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
try:
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(MaxMsgSize)
|
||||
check msg == bigseq
|
||||
trace "Bigseq check passed!"
|
||||
await stream.close()
|
||||
listenJob.complete()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(MaxMsgSize)
|
||||
check msg == bigseq
|
||||
trace "Bigseq check passed!"
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
listenJob.complete()
|
||||
|
||||
await mplexListen.handle()
|
||||
await sleepAsync(1.seconds) # give chronos some slack to process things
|
||||
@@ -520,13 +584,17 @@ suite "Mplex":
|
||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
await stream.writeLp("Hello from stream!")
|
||||
await stream.close()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
await stream.writeLp("Hello from stream!")
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -557,18 +625,25 @@ suite "Mplex":
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
let done = newFuture[void]()
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
var count = 1
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == &"stream {count}!"
|
||||
count.inc
|
||||
if count == 11:
|
||||
done.complete()
|
||||
await stream.close()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(1024)
|
||||
try:
|
||||
check string.fromBytes(msg) == &"stream {count}!"
|
||||
except ValueError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
count.inc
|
||||
if count == 11:
|
||||
done.complete()
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -601,19 +676,26 @@ suite "Mplex":
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
let done = newFuture[void]()
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
var count = 1
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(1024)
|
||||
check string.fromBytes(msg) == &"stream {count} from dialer!"
|
||||
await stream.writeLp(&"stream {count} from listener!")
|
||||
count.inc
|
||||
if count == 11:
|
||||
done.complete()
|
||||
await stream.close()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(1024)
|
||||
try:
|
||||
check string.fromBytes(msg) == &"stream {count} from dialer!"
|
||||
await stream.writeLp(&"stream {count} from listener!")
|
||||
except ValueError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
count.inc
|
||||
if count == 11:
|
||||
done.complete()
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -646,20 +728,23 @@ suite "Mplex":
|
||||
|
||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
listenStreams.add(stream)
|
||||
try:
|
||||
discard await stream.readLp(1024)
|
||||
except LPStreamEOFError:
|
||||
await stream.close()
|
||||
return
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
try:
|
||||
discard await stream.readLp(1024)
|
||||
except LPStreamEOFError:
|
||||
return
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
check false
|
||||
check false
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -697,17 +782,17 @@ suite "Mplex":
|
||||
var count = 0
|
||||
var done = newFuture[void]()
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
listenStreams.add(stream)
|
||||
count.inc()
|
||||
if count == 10:
|
||||
done.complete()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
count.inc()
|
||||
if count == 10:
|
||||
done.complete()
|
||||
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -761,13 +846,13 @@ suite "Mplex":
|
||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -805,13 +890,13 @@ suite "Mplex":
|
||||
|
||||
var mplexListen: Mplex
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -829,7 +914,7 @@ suite "Mplex":
|
||||
check:
|
||||
unorderedCompare(dialStreams, mplexDial.getStreams())
|
||||
|
||||
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
|
||||
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
|
||||
|
||||
await mplexListen.close()
|
||||
await allFuturesThrowing(
|
||||
@@ -851,13 +936,13 @@ suite "Mplex":
|
||||
|
||||
var mplexHandle: Future[void]
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
mplexHandle = mplexListen.handle()
|
||||
await mplexHandle
|
||||
@@ -876,7 +961,7 @@ suite "Mplex":
|
||||
check:
|
||||
unorderedCompare(dialStreams, mplexDial.getStreams())
|
||||
|
||||
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
|
||||
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
|
||||
|
||||
mplexHandle.cancel()
|
||||
await allFuturesThrowing(
|
||||
@@ -896,13 +981,13 @@ suite "Mplex":
|
||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -920,7 +1005,7 @@ suite "Mplex":
|
||||
check:
|
||||
unorderedCompare(dialStreams, mplexDial.getStreams())
|
||||
|
||||
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
|
||||
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
|
||||
|
||||
await conn.close()
|
||||
await allFuturesThrowing(
|
||||
@@ -943,13 +1028,13 @@ suite "Mplex":
|
||||
|
||||
var listenConn: Connection
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
listenConn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(listenConn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
listenStreams.add(stream)
|
||||
await stream.join()
|
||||
await noCancel stream.join()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -967,7 +1052,7 @@ suite "Mplex":
|
||||
check:
|
||||
unorderedCompare(dialStreams, mplexDial.getStreams())
|
||||
|
||||
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
|
||||
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
|
||||
|
||||
await listenConn.closeWithEOF()
|
||||
await allFuturesThrowing(
|
||||
@@ -992,18 +1077,20 @@ suite "Mplex":
|
||||
|
||||
var complete = newFuture[void]()
|
||||
const MsgSize = 1024
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
try:
|
||||
let msg = await stream.readLp(MsgSize)
|
||||
check msg.len == MsgSize
|
||||
except CatchableError as e:
|
||||
echo e.msg
|
||||
await stream.close()
|
||||
complete.complete()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(MsgSize)
|
||||
check msg.len == MsgSize
|
||||
except CancelledError as e:
|
||||
echo e.msg
|
||||
except LPStreamError as e:
|
||||
echo e.msg
|
||||
await stream.close()
|
||||
complete.complete()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
@@ -1064,15 +1151,19 @@ suite "Mplex":
|
||||
|
||||
var complete = newFuture[void]()
|
||||
const MsgSize = 512
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
let mplexListen = Mplex.new(conn)
|
||||
mplexListen.streamHandler = proc(stream: Connection)
|
||||
{.async, gcsafe.} =
|
||||
let msg = await stream.readLp(MsgSize)
|
||||
check msg.len == MsgSize
|
||||
await stream.close()
|
||||
complete.complete()
|
||||
mplexListen.streamHandler =
|
||||
proc(stream: Connection) {.async: (raises: []).} =
|
||||
try:
|
||||
let msg = await stream.readLp(MsgSize)
|
||||
check msg.len == MsgSize
|
||||
except CancelledError, LPStreamError:
|
||||
return
|
||||
finally:
|
||||
await stream.close()
|
||||
complete.complete()
|
||||
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user