mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 17:18:27 -05:00
Compare commits
95 Commits
fix/ci-wor
...
penalty-no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a16293bdca | ||
|
|
77d40c34f4 | ||
|
|
2fa2c4425f | ||
|
|
47990be775 | ||
|
|
0911cb20f4 | ||
|
|
3ca49a2f40 | ||
|
|
1b91b97499 | ||
|
|
21cbe3a91a | ||
|
|
88e233db81 | ||
|
|
84659af45b | ||
|
|
aef44ed1ce | ||
|
|
02c96fc003 | ||
|
|
c4da9be32c | ||
|
|
8014a848a0 | ||
|
|
2b5319622c | ||
|
|
5cbb473d1b | ||
|
|
87110e551a | ||
|
|
b30b2656d5 | ||
|
|
917dbf83cd | ||
|
|
0c61f12b85 | ||
|
|
a6237bd1c1 | ||
|
|
d9a60f339e | ||
|
|
89cad5a3ba | ||
|
|
09b3e11956 | ||
|
|
03f67d3db5 | ||
|
|
bb97a9de79 | ||
|
|
1a707e1264 | ||
|
|
458b0885dd | ||
|
|
a2027003cd | ||
|
|
c5db35d9b0 | ||
|
|
d1e51beb7f | ||
|
|
275d649287 | ||
|
|
467b5b4f0c | ||
|
|
fdf53d18cd | ||
|
|
48a3ac06ff | ||
|
|
49a92e5641 | ||
|
|
08a48faf41 | ||
|
|
61b299e411 | ||
|
|
ca01ee06a8 | ||
|
|
6c43ab3fce | ||
|
|
ae13a0d583 | ||
|
|
28609597d1 | ||
|
|
8294d5b9df | ||
|
|
78e83889ee | ||
|
|
7603b8de5e | ||
|
|
8cccd54125 | ||
|
|
18e00a741b | ||
|
|
ee264fdf11 | ||
|
|
9059a8aced | ||
|
|
0b753e7cf2 | ||
|
|
d43c5feab0 | ||
|
|
1609fd7197 | ||
|
|
42cd78e95b | ||
|
|
44cada9c55 | ||
|
|
6c873481ac | ||
|
|
d08ce17144 | ||
|
|
bd6ead95ef | ||
|
|
53e3825e07 | ||
|
|
e9b456162a | ||
|
|
250024f6cc | ||
|
|
fec632d28d | ||
|
|
349496e40f | ||
|
|
7faa0fac23 | ||
|
|
c5e4f8e12d | ||
|
|
fe4ff79885 | ||
|
|
aa4ebb0b3c | ||
|
|
e0f70b7177 | ||
|
|
c1dfd58772 | ||
|
|
04af0c4323 | ||
|
|
eb0890cd6f | ||
|
|
9bc5ec1566 | ||
|
|
5594bcb33e | ||
|
|
d46bcdb6ac | ||
|
|
9468bb6b4d | ||
|
|
2725be64ba | ||
|
|
e3c967ad19 | ||
|
|
d2c98bd87d | ||
|
|
3011ba4326 | ||
|
|
c6566707fa | ||
|
|
3be681ec4d | ||
|
|
2ede0fa40c | ||
|
|
7c195ab927 | ||
|
|
3230407ffe | ||
|
|
deb72c8580 | ||
|
|
ce0685c272 | ||
|
|
1f4b090227 | ||
|
|
fb05f5ae22 | ||
|
|
e12f65f193 | ||
|
|
4b3bc4f819 | ||
|
|
6791f5e7bb | ||
|
|
08d9c84aca | ||
|
|
4e7eaba67a | ||
|
|
5f7a3ab829 | ||
|
|
ebef85c9d7 | ||
|
|
3fc1236659 |
3
.github/workflows/bumper.yml
vendored
3
.github/workflows/bumper.yml
vendored
@@ -2,8 +2,7 @@ name: Bumper
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- bumper
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
12
.github/workflows/daily.yml
vendored
Normal file
12
.github/workflows/daily.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0']"
|
||||
cpu: "['amd64']"
|
||||
84
.github/workflows/daily_common.yml
vendored
Normal file
84
.github/workflows/daily_common.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: daily-common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim-branch:
|
||||
description: 'Nim branch'
|
||||
required: true
|
||||
type: string
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-20
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--mm:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||
fi
|
||||
13
.github/workflows/daily_i386.yml
vendored
Normal file
13
.github/workflows/daily_i386.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Daily i386
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily Nim Devel
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['devel']"
|
||||
cpu: "['amd64']"
|
||||
4
.github/workflows/doc.yml
vendored
4
.github/workflows/doc.yml
vendored
@@ -19,13 +19,13 @@ jobs:
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
nim-version: '1.6.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install_pinned -y
|
||||
nimble install_pinned
|
||||
# nim doc can "fail", but the doc is still generated
|
||||
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
|
||||
|
||||
|
||||
58
.github/workflows/interop.yml
vendored
58
.github/workflows/interop.yml
vendored
@@ -11,44 +11,30 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
name: Run multidimensional interoperability tests
|
||||
run-transport-interop:
|
||||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: libp2p/test-plans
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: >
|
||||
cd transport-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
run: >
|
||||
(cat << EOF
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
|
||||
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-6, version-2-0, devel]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
|
||||
examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
2
.pinned
2
.pinned
@@ -1,6 +1,6 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
|
||||
chronos;https://github.com/status-im/nim-chronos@#672db137b7cad9b384b8f4fb551fb6bbeaabfe1b
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
|
||||
13
README.md
13
README.md
@@ -20,6 +20,7 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
@@ -40,6 +41,8 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
@@ -47,11 +50,11 @@ nimble install libp2p
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
|
||||
**Go Daemon:**
|
||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
|
||||
## Modules
|
||||
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
|
||||
| Name | Description |
|
||||
@@ -68,7 +71,6 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
| **Stream Multiplexers** | |
|
||||
@@ -132,7 +134,8 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph).
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
|
||||
@@ -7,7 +7,6 @@ if dirExists("nimbledeps/pkgs2"):
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
--define:chronosStrictException
|
||||
--styleCheck:usages
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
@@ -8,26 +10,29 @@
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.15.15`.
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Follow one of the methods below:
|
||||
|
||||
## Script
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
We recommend using `1.15.15`, as previously stated.
|
||||
```sh
|
||||
# clone and install dependencies
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install
|
||||
|
||||
# perform unit tests
|
||||
nimble test
|
||||
|
||||
# update the git submodule to install the go daemon
|
||||
git submodule update --init --recursive
|
||||
go version
|
||||
git clone https://github.com/libp2p/go-libp2p-daemon
|
||||
cd go-libp2p-daemon
|
||||
git checkout v0.0.1
|
||||
go install ./...
|
||||
cd ..
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
|
||||
If you find any issues, please head into our discord and ask for our asistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Usage
|
||||
|
||||
|
||||
@@ -13,14 +13,14 @@ type
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
return T(codecs: @[TestCodec], handler: handle)
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
##
|
||||
# Helper to create a switch/node
|
||||
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
# The actual application
|
||||
##
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng() # Single random number source for the whole application
|
||||
# port 0 will take a random available port
|
||||
|
||||
@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
##
|
||||
## Let's now start to create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||
## Again, pretty straight-forward, we just send a message on the connection.
|
||||
##
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
testProto = TestProto.new()
|
||||
|
||||
@@ -108,7 +108,7 @@ type
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
return MetricList.decode(protobuf).tryGet()
|
||||
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||
let metricCount = rng[].generate(uint32) mod 16
|
||||
|
||||
@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||
## create a bootnode, and then every peer will advertise itself on the
|
||||
## bootnode, and use it to find other peers
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let bootNode = createSwitch()
|
||||
await bootNode.start()
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
defer: await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
|
||||
@@ -12,12 +12,12 @@ requires "nim >= 1.6.0",
|
||||
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
"chronicles >= 0.10.2",
|
||||
"chronos >= 3.0.6",
|
||||
"chronos >= 4.0.0",
|
||||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & <= 0.1.0"
|
||||
"unittest2"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
|
||||
@@ -25,7 +25,7 @@ import
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
|
||||
@@ -36,8 +36,7 @@ type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise,
|
||||
Secio {.deprecated.}
|
||||
Noise
|
||||
|
||||
SwitchBuilder* = ref object
|
||||
privKey: Option[PrivateKey]
|
||||
@@ -59,6 +58,7 @@ type
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
observedAddrManager: ObservedAddrManager
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
@@ -121,8 +121,12 @@ proc withMplex*(
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||
b
|
||||
|
||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
||||
proc withYamux*(b: SwitchBuilder,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer =
|
||||
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
|
||||
|
||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||
@@ -201,6 +205,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
|
||||
b.observedAddrManager = observedAddrManager
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
|
||||
@@ -223,8 +231,13 @@ proc build*(b: SwitchBuilder): Switch
|
||||
protoVersion = b.protoVersion,
|
||||
agentVersion = b.agentVersion)
|
||||
|
||||
let identify =
|
||||
if b.observedAddrManager != nil:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||
else:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
|
||||
let
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
@@ -277,29 +290,25 @@ proc build*(b: SwitchBuilder): Switch
|
||||
return switch
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
SecureProtocol.Noise,
|
||||
],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [LPError], public.} =
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000
|
||||
): Switch {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
quit("Secio is deprecated!") # use of secio is unsafe
|
||||
{.pop.}
|
||||
|
||||
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -261,12 +261,6 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
|
||||
## Write CID value ``cid`` to buffer ``vb``.
|
||||
vb.writeArray(cid.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
cid: Cid): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``cid`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
|
||||
|
||||
proc hash*(cid: Cid): Hash {.inline.} =
|
||||
hash(cid.data.buffer)
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
event: ConnEvent) {.async.} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
event: PeerEvent) {.async.} =
|
||||
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
@@ -311,12 +311,14 @@ proc storeMuxer*(c: ConnManager,
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
assert muxer notin c.muxed.getOrDefault(peerId)
|
||||
|
||||
let
|
||||
newPeer = peerId notin c.muxed
|
||||
assert newPeer or c.muxed[peerId].len > 0
|
||||
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
|
||||
var newPeer = false
|
||||
c.muxed.withValue(peerId, muxers):
|
||||
doAssert muxers[].len > 0
|
||||
doAssert muxer notin muxers[]
|
||||
muxers[].add(muxer)
|
||||
do:
|
||||
c.muxed[peerId] = @[muxer]
|
||||
newPeer = true
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
|
||||
asyncSpawn c.triggerConnEvent(
|
||||
@@ -379,7 +381,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
muxer: Muxer): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
@@ -387,7 +389,7 @@ proc getStream*(c: ConnManager,
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: PeerId): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
@@ -395,7 +397,7 @@ proc getStream*(c: ConnManager,
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
|
||||
@@ -924,59 +924,6 @@ proc selectBest*(order: int, p1, p2: string): string =
|
||||
if felement == selement:
|
||||
return felement
|
||||
|
||||
proc createProposal*(nonce, pubkey: openArray[byte],
|
||||
exchanges, ciphers, hashes: string): seq[byte] =
|
||||
## Create SecIO proposal message using random ``nonce``, local public key
|
||||
## ``pubkey``, comma-delimieted list of supported exchange schemes
|
||||
## ``exchanges``, comma-delimeted list of supported ciphers ``ciphers`` and
|
||||
## comma-delimeted list of supported hashes ``hashes``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, nonce)
|
||||
msg.write(2, pubkey)
|
||||
msg.write(3, exchanges)
|
||||
msg.write(4, ciphers)
|
||||
msg.write(5, hashes)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
|
||||
exchanges, ciphers, hashes: var string): bool =
|
||||
## Parse incoming proposal message and decode remote random nonce ``nonce``,
|
||||
## remote public key ``pubkey``, comma-delimieted list of supported exchange
|
||||
## schemes ``exchanges``, comma-delimeted list of supported ciphers
|
||||
## ``ciphers`` and comma-delimeted list of supported hashes ``hashes``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, nonce)
|
||||
let r2 = pb.getField(2, pubkey)
|
||||
let r3 = pb.getField(3, exchanges)
|
||||
let r4 = pb.getField(4, ciphers)
|
||||
let r5 = pb.getField(5, hashes)
|
||||
|
||||
r1.get(false) and r2.get(false) and r3.get(false) and
|
||||
r4.get(false) and r5.get(false)
|
||||
|
||||
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
|
||||
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
||||
## signature of proposal blocks ``signature``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, epubkey)
|
||||
msg.write(2, signature)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeExchange*(message: seq[byte],
|
||||
pubkey, signature: var seq[byte]): bool =
|
||||
## Parse incoming exchange message and decode remote ephemeral public key
|
||||
## ``pubkey`` and signature ``signature``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getField(2, signature)
|
||||
r1.get(false) and r2.get(false)
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
|
||||
|
||||
@@ -553,7 +553,7 @@ proc getSocket(pattern: string,
|
||||
closeSocket(sock)
|
||||
|
||||
# This is forward declaration needed for newDaemonApi()
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||
|
||||
proc copyEnv(): StringTableRef =
|
||||
## This procedure copy all environment variables into StringTable.
|
||||
@@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||
|
||||
# Starting daemon process
|
||||
# echo "Starting ", cmd, " ", args.join(" ")
|
||||
api.process =
|
||||
api.process =
|
||||
exceptionToAssert:
|
||||
startProcess(cmd, "", args, env, {poParentStreams})
|
||||
# Waiting until daemon will not be bound to control socket.
|
||||
@@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
var value: seq[byte]
|
||||
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||
|
||||
|
||||
return initProtoBuffer(value)
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
|
||||
@@ -247,7 +247,7 @@ proc toString*(msg: ProtoMessage, dump = true): string =
|
||||
else: "[REMOTE]"
|
||||
local & direction & remote
|
||||
let seqid = block:
|
||||
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
|
||||
msg.seqID.withValue(seqid): "seqID = " & $seqid & " "
|
||||
else: ""
|
||||
let mtype = block:
|
||||
msg.mtype.withValue(typ): "type = " & $typ & " "
|
||||
|
||||
@@ -26,7 +26,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
dir = Direction.Out) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
@@ -53,7 +53,7 @@ proc dialAndUpgrade(
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
for transport in self.transports: # for each transport
|
||||
@@ -75,23 +75,29 @@ proc dialAndUpgrade(
|
||||
|
||||
let mux =
|
||||
try:
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||
# The if below is more general and might handle other use cases in the future.
|
||||
if dialed.dir != dir:
|
||||
dialed.dir = dir
|
||||
await transport.upgrade(dialed, peerId)
|
||||
except CancelledError as exc:
|
||||
await dialed.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if exc isnot CancelledError:
|
||||
if upgradeDir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
debug "Connection upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
@@ -128,7 +134,7 @@ proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||
@@ -146,7 +152,7 @@ proc dialAndUpgrade(
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
@@ -164,7 +170,7 @@ proc internalConnect(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
@@ -182,7 +188,7 @@ proc internalConnect(
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
@@ -209,7 +215,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
dir = Direction.Out) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -217,7 +223,7 @@ method connect*(
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
|
||||
@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
|
||||
# sadly nim needs more love for hygienic templates
|
||||
# so here goes the macro, its based on the proc/template version
|
||||
# and uses quote do so it's quite readable
|
||||
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# TODO https://github.com/nim-lang/Nim/issues/22936
|
||||
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
let nexclude = exclude.len
|
||||
case nexclude
|
||||
of 0:
|
||||
@@ -43,34 +44,3 @@ macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details", error=exc.name
|
||||
trace "Exception details", msg=exc.msg
|
||||
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
|
||||
return call()
|
||||
|
||||
template tryAndWarn*(message: static[string]; body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "An exception has ocurred, enable trace logging for details", name = exc.name, msg = message
|
||||
trace "Exception details", exc = exc.msg
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos, chronicles
|
||||
import std/[nativesockets, hashes]
|
||||
import tables, strutils, sets, stew/shims/net
|
||||
import std/[nativesockets, net, hashes]
|
||||
import tables, strutils, sets
|
||||
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
|
||||
protobuf/minprotobuf, errors, utility
|
||||
import stew/[base58, base32, endians2, results]
|
||||
@@ -119,23 +119,46 @@ proc ip6VB(vb: var VBuffer): bool =
|
||||
if vb.readArray(a.address_v6) == 16:
|
||||
result = true
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
template pathStringToBuffer(s: string, vb: var VBuffer): bool =
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToString(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathValidateBuffer(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
template pathValidateBufferNoSlash(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc ip6zoneBtS(vb: var VBuffer, s: var string): bool =
|
||||
## IPv6 bufferToString() implementation.
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
@@ -154,7 +177,8 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Port number bufferToString() implementation.
|
||||
var port: array[2, byte]
|
||||
if vb.readArray(port) == 2:
|
||||
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
let nport =
|
||||
(safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
s = $nport
|
||||
result = true
|
||||
|
||||
@@ -214,7 +238,8 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[12, byte]
|
||||
if vb.readArray(buf) == 12:
|
||||
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
let nport =
|
||||
(safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 9))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -248,7 +273,8 @@ proc onion3BtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[37, byte]
|
||||
if vb.readArray(buf) == 37:
|
||||
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
var nport =
|
||||
(safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 34))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -262,40 +288,27 @@ proc onion3VB(vb: var VBuffer): bool =
|
||||
|
||||
proc unixStB(s: string, vb: var VBuffer): bool =
|
||||
## Unix socket name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc unixBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Unix socket name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc unixVB(vb: var VBuffer): bool =
|
||||
## Unix socket name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc dnsStB(s: string, vb: var VBuffer): bool =
|
||||
## DNS name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc dnsBtS(vb: var VBuffer, s: var string): bool =
|
||||
## DNS name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc dnsVB(vb: var VBuffer): bool =
|
||||
## DNS name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc mapEq*(codec: string): MaPattern =
|
||||
## ``Equal`` operator for pattern
|
||||
@@ -398,6 +411,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||
coder: TranscoderIP6Zone
|
||||
@@ -657,7 +673,8 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
inc(offset)
|
||||
ok(res)
|
||||
|
||||
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
proc getParts[U, V](ma: MultiAddress,
|
||||
slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
template normalizeIndex(index): int =
|
||||
@@ -671,7 +688,8 @@ proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddres
|
||||
? res.append(? ma[i])
|
||||
ok(res)
|
||||
|
||||
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
proc `[]`*(ma: MultiAddress,
|
||||
i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
## Returns part with index ``i`` of MultiAddress ``ma``.
|
||||
when i is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
@@ -766,7 +784,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
if not proto.coder.bufferToString(vb.data, part):
|
||||
return err("multiaddress: Decoding protocol error")
|
||||
parts.add($(proto.mcodec))
|
||||
if proto.kind == Path and part[0] == '/':
|
||||
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
|
||||
parts.add(part[1..^1])
|
||||
else:
|
||||
parts.add(part)
|
||||
@@ -955,7 +973,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
||||
## Initialize empty MultiAddress.
|
||||
result.data = initVBuffer()
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
||||
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
|
||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
@@ -1117,16 +1135,20 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
value = MultiAddress.init(buffer).valueOr:
|
||||
return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
|
||||
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
|
||||
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
|
||||
## Read repeated field from protobuf message. ``field`` is field number.
|
||||
## If the message is malformed, an error is returned. If field is not present
|
||||
## in message, then ``ok(false)`` is returned and value is empty. If field is
|
||||
## present, but no items could be parsed, then
|
||||
## ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is
|
||||
## returned and value contains the parsed values.
|
||||
var items: seq[seq[byte]]
|
||||
value.setLen(0)
|
||||
let res = ? pb.getRepeatedField(field, items)
|
||||
|
||||
@@ -193,6 +193,7 @@ const MultiCodecList = [
|
||||
("https", 0x01BB),
|
||||
("tls", 0x01C0),
|
||||
("quic", 0x01CC),
|
||||
("quic-v1", 0x01CD),
|
||||
("ws", 0x01DD),
|
||||
("wss", 0x01DE),
|
||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -45,15 +45,18 @@ proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
)
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect failed, malformed message")
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]):
|
||||
Future[string] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "initiating handshake", conn, codec = Codec
|
||||
## select a remote protocol
|
||||
await conn.writeLp(Codec & "\n") # write handshake
|
||||
@@ -66,7 +69,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
|
||||
if s != Codec:
|
||||
notice "handshake failed", conn, codec = s
|
||||
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
|
||||
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
|
||||
else:
|
||||
trace "multistream handshake success", conn
|
||||
|
||||
@@ -98,19 +101,29 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
# No alternatives, fail
|
||||
return ""
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string): Future[bool] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
if proto.len > 0:
|
||||
return (await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
(await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
else:
|
||||
return (await MultistreamSelect.select(conn, @[])) == Codec
|
||||
(await MultistreamSelect.select(conn, @[])) == Codec
|
||||
|
||||
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
||||
proc select*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError], raw: true).} =
|
||||
m.select(conn, "")
|
||||
|
||||
proc list*(m: MultistreamSelect,
|
||||
conn: Connection): Future[seq[string]] {.async.} =
|
||||
proc list*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[seq[string]] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
## list remote protos requests on connection
|
||||
if not await m.select(conn):
|
||||
return
|
||||
@@ -126,12 +139,13 @@ proc list*(m: MultistreamSelect,
|
||||
result = list
|
||||
|
||||
proc handle*(
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
@@ -140,8 +154,8 @@ proc handle*(
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
debug "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
@@ -172,14 +186,16 @@ proc handle*(
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
proc handle*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection,
|
||||
active: bool = false) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
if not isNil(h.match):
|
||||
if h.match != nil:
|
||||
matchers.add(h.match)
|
||||
for proto in h.protos:
|
||||
protos.add(proto)
|
||||
@@ -187,12 +203,13 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
||||
try:
|
||||
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
|
||||
var protocolHolder = h
|
||||
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >=
|
||||
maxIncomingStreams:
|
||||
debug "Max streams for protocol reached, blocking new stream",
|
||||
conn, protocol = ms, maxIncomingStreams
|
||||
return
|
||||
@@ -229,10 +246,14 @@ proc addHandler*(m: MultistreamSelect,
|
||||
matcher: Matcher = nil) =
|
||||
addHandler(m, @[codec], protocol, matcher)
|
||||
|
||||
proc addHandler*(m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler: LPProtoHandler,
|
||||
matcher: Matcher = nil) =
|
||||
proc addHandler*[E](
|
||||
m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler: LPProtoHandler |
|
||||
proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E],
|
||||
matcher: Matcher = nil) =
|
||||
## helper to allow registering pure handlers
|
||||
trace "registering proto handler", proto = codec
|
||||
let protocol = new LPProtocol
|
||||
@@ -243,8 +264,34 @@ proc addHandler*(m: MultistreamSelect,
|
||||
protocol: protocol,
|
||||
match: matcher))
|
||||
|
||||
proc start*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.start()))
|
||||
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([])`
|
||||
# TODO https://github.com/nim-lang/Nim/issues/23445
|
||||
var futs = newSeqOfCap[Future[void].Raising([CancelledError])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.start()
|
||||
try:
|
||||
await allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
except CancelledError as exc:
|
||||
var pending: seq[Future[void].Raising([])]
|
||||
doAssert m.handlers.len == futs.len, "Handlers modified while starting"
|
||||
for i, fut in futs:
|
||||
if not fut.finished:
|
||||
pending.add fut.cancelAndWait()
|
||||
elif fut.completed:
|
||||
pending.add m.handlers[i].protocol.stop()
|
||||
else:
|
||||
static: doAssert typeof(fut).E is (CancelledError,)
|
||||
await noCancel allFutures(pending)
|
||||
raise exc
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.stop()))
|
||||
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
|
||||
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.stop()
|
||||
await noCancel allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -42,7 +42,10 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||
newException(InvalidMplexMsgType, "invalid message type")
|
||||
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
proc readMsg*(
|
||||
conn: Connection
|
||||
): Future[Msg] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let header = await conn.readVarint()
|
||||
trace "read header varint", varint = header, conn
|
||||
|
||||
@@ -55,10 +58,13 @@ proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
|
||||
return (header shr 3, MessageType(msgType), data)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
var
|
||||
left = data.len
|
||||
offset = 0
|
||||
@@ -84,8 +90,11 @@ proc writeMsg*(conn: Connection,
|
||||
# message gets written before some of the chunks
|
||||
conn.write(buf.buffer)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
conn.writeMsg(id, msgType, data.toBytes())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -28,7 +28,8 @@ when defined(libp2p_mplex_metrics):
|
||||
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
||||
declareCounter libp2p_protocols_bytes,
|
||||
"total sent or received bytes", ["protocol", "direction"]
|
||||
|
||||
## Channel half-closed states
|
||||
##
|
||||
@@ -64,16 +65,16 @@ type
|
||||
|
||||
func shortLog*(s: LPChannel): auto =
|
||||
try:
|
||||
if s.isNil: "LPChannel(nil)"
|
||||
if s == nil: "LPChannel(nil)"
|
||||
elif s.name != $s.oid and s.name.len > 0:
|
||||
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
|
||||
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(LPChannel): shortLog(it)
|
||||
|
||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Opening channel", s, conn = s.conn
|
||||
if s.conn.isClosed:
|
||||
return
|
||||
@@ -82,20 +83,20 @@ proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
s.isOpen = true
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
|
||||
## Channels may be closed for reading and writing in any order - we'll close
|
||||
## the underlying bufferstream when both directions are closed
|
||||
if s.closedLocal and s.atEof():
|
||||
await procCall BufferStream(s).close()
|
||||
|
||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc reset*(s: LPChannel) {.async: (raises: []).} =
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
@@ -108,22 +109,21 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
# If the connection is still active, notify the other end
|
||||
proc resetMessage() {.async.} =
|
||||
proc resetMessage() {.async: (raises: []).} =
|
||||
try:
|
||||
trace "sending reset message", s, conn = s.conn
|
||||
await s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except CatchableError as exc:
|
||||
# No cancellations
|
||||
await s.conn.close()
|
||||
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except LPStreamError as exc:
|
||||
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
||||
await s.conn.close()
|
||||
|
||||
asyncSpawn resetMessage()
|
||||
|
||||
await s.closeImpl() # noraises, nocancels
|
||||
await s.closeImpl()
|
||||
|
||||
trace "Channel reset", s
|
||||
|
||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
method close*(s: LPChannel) {.async: (raises: []).} =
|
||||
## Close channel for writing - a message will be sent to the other peer
|
||||
## informing them that the channel is closed and that we're waiting for
|
||||
## their acknowledgement.
|
||||
@@ -137,10 +137,9 @@ method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
try:
|
||||
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
||||
except CancelledError as exc:
|
||||
except CancelledError:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
# It's harmless that close message cannot be sent - the connection is
|
||||
# likely down already
|
||||
await s.conn.close()
|
||||
@@ -154,16 +153,17 @@ method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting LPChannel", s
|
||||
s.reset()
|
||||
|
||||
procCall BufferStream(s).initStream()
|
||||
|
||||
method readOnce*(s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Mplex relies on reading being done regularly from every channel, or all
|
||||
## channels are blocked - in particular, this means that reading from one
|
||||
## channel must not be done from within a callback / read handler of another
|
||||
@@ -186,15 +186,19 @@ method readOnce*(s: LPChannel,
|
||||
if bytes == 0:
|
||||
await s.closeUnderlying()
|
||||
return bytes
|
||||
except CatchableError as exc:
|
||||
# readOnce in BufferStream generally raises on EOF or cancellation - for
|
||||
# the former, resetting is harmless, for the latter it's necessary because
|
||||
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
||||
# use the channel any more
|
||||
except CancelledError as exc:
|
||||
await s.reset()
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
# Resetting is necessary because data has been lost in s.readBuf and
|
||||
# there's no way to gracefully recover / use the channel any more
|
||||
await s.reset()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
|
||||
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
proc prepareWrite(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
# prepareWrite is the slow path of writing a message - see conditions in
|
||||
# write
|
||||
if s.remoteReset:
|
||||
@@ -222,7 +226,10 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
await s.conn.writeMsg(s.id, s.msgCode, msg)
|
||||
|
||||
proc completeWrite(
|
||||
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
|
||||
s: LPChannel,
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
||||
msgLen: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
try:
|
||||
s.writes += 1
|
||||
|
||||
@@ -235,7 +242,10 @@ proc completeWrite(
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
|
||||
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
|
||||
# https://github.com/status-im/nim-metrics/issues/79
|
||||
libp2p_protocols_bytes.inc(
|
||||
msgLen.int64, labelValues = [s.protocol, "out"])
|
||||
|
||||
s.activity = true
|
||||
except CancelledError as exc:
|
||||
@@ -247,7 +257,7 @@ proc completeWrite(
|
||||
raise exc
|
||||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
@@ -255,7 +265,11 @@ proc completeWrite(
|
||||
finally:
|
||||
s.writes -= 1
|
||||
|
||||
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to mplex channel - there may be up to MaxWrite concurrent writes
|
||||
## pending after which the peer is disconnected
|
||||
|
||||
@@ -276,13 +290,12 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method getWrapped*(s: LPChannel): Connection = s.conn
|
||||
|
||||
proc init*(
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
let chann = L(
|
||||
id: id,
|
||||
name: name,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -56,7 +56,7 @@ proc newTooManyChannels(): ref TooManyChannels =
|
||||
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
|
||||
newException(InvalidChannelIdError, "max allowed channel count exceeded")
|
||||
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
|
||||
## remove the local channel from the internal tables
|
||||
##
|
||||
try:
|
||||
@@ -68,19 +68,19 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
libp2p_mplex_channels.set(
|
||||
m.channels[chann.initiator].len.int64,
|
||||
labelValues = [$chann.initiator, $m.connection.peerId])
|
||||
except CatchableError as exc:
|
||||
except CancelledError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||
|
||||
proc newStreamInternal*(m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel
|
||||
{.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
proc newStreamInternal*(
|
||||
m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
## create new channel/stream
|
||||
##
|
||||
let id = if initiator:
|
||||
m.currentId.inc(); m.currentId
|
||||
let id =
|
||||
if initiator: m.currentId.inc(); m.currentId
|
||||
else: chanId
|
||||
|
||||
if id in m.channels[initiator]:
|
||||
@@ -111,18 +111,14 @@ proc newStreamInternal*(m: Mplex,
|
||||
m.channels[initiator].len.int64,
|
||||
labelValues = [$initiator, $m.connection.peerId])
|
||||
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||
await chann.reset()
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
method handle*(m: Mplex) {.async: (raises: []).} =
|
||||
trace "Starting mplex handler", m
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -150,7 +146,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
else:
|
||||
if m.channels[false].len > m.maxChannCount - 1:
|
||||
warn "too many channels created by remote peer",
|
||||
allowedMax = MaxChannelCount, m
|
||||
allowedMax = MaxChannelCount, m
|
||||
raise newTooManyChannels()
|
||||
|
||||
let name = string.fromBytes(data)
|
||||
@@ -159,59 +155,65 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
trace "Processing channel message", m, channel, data = data.shortLog
|
||||
|
||||
case msgType:
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
|
||||
if not isNil(m.streamHandler):
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
if m.streamHandler != nil:
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
except CancelledError:
|
||||
debug "Unexpected cancellation in mplex handler", m
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", m, msg = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in mplex read loop", m, msg = exc.msg
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in mplex read loop", m, msg = exc.msg
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped mplex handler", m
|
||||
|
||||
proc new*(M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
proc new*(
|
||||
M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
M(connection: conn,
|
||||
inChannTimeout: inTimeout,
|
||||
outChannTimeout: outTimeout,
|
||||
oid: genOid(),
|
||||
maxChannCount: maxChannCount)
|
||||
|
||||
method newStream*(m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
method newStream*(
|
||||
m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||
|
||||
if not lazy:
|
||||
@@ -219,7 +221,7 @@ method newStream*(m: Mplex,
|
||||
|
||||
return Connection(channel)
|
||||
|
||||
method close*(m: Mplex) {.async, gcsafe.} =
|
||||
method close*(m: Mplex) {.async: (raises: []).} =
|
||||
if m.isClosed:
|
||||
trace "Already closed", m
|
||||
return
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,16 +23,17 @@ type
|
||||
MuxerError* = object of LPError
|
||||
TooManyChannels* = object of MuxerError
|
||||
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
handler*: Future[void]
|
||||
handler*: Future[void].Raising([])
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
MuxerConstructor* =
|
||||
proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
|
||||
# this wraps a creator proc that knows how to make muxers
|
||||
MuxerProvider* = object
|
||||
@@ -40,24 +41,32 @@ type
|
||||
codec*: string
|
||||
|
||||
func shortLog*(m: Muxer): auto =
|
||||
if isNil(m): "nil"
|
||||
if m == nil: "nil"
|
||||
else: shortLog(m.connection)
|
||||
|
||||
chronicles.formatIt(Muxer): shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
if not isNil(m.connection):
|
||||
method newStream*(
|
||||
m: Muxer,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.base, async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError], raw: true).} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} = discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -22,18 +22,22 @@ logScope:
|
||||
const
|
||||
YamuxCodec* = "/yamux/1.0.0"
|
||||
YamuxVersion = 0.uint8
|
||||
DefaultWindowSize = 256000
|
||||
YamuxDefaultWindowSize* = 256000
|
||||
MaxSendQueueSize = 256000
|
||||
MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
|
||||
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareGauge libp2p_yamux_channels,
|
||||
"yamux channels", labels = ["initiator", "peer"]
|
||||
declareHistogram libp2p_yamux_send_queue,
|
||||
"message send queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue,
|
||||
"message recv queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
|
||||
type
|
||||
YamuxError* = object of CatchableError
|
||||
YamuxError* = object of MuxerError
|
||||
|
||||
MsgType = enum
|
||||
Data = 0x0
|
||||
@@ -59,7 +63,10 @@ type
|
||||
streamId: uint32
|
||||
length: uint32
|
||||
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
proc readHeader(
|
||||
conn: LPStream
|
||||
): Future[YamuxHeader] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
var buffer: array[12, byte]
|
||||
await conn.readExactly(addr buffer[0], 12)
|
||||
|
||||
@@ -73,10 +80,10 @@ proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
return result
|
||||
|
||||
proc `$`(header: YamuxHeader): string =
|
||||
result = "{" & $header.msgType & ", "
|
||||
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
|
||||
result &= "streamId: " & $header.streamId & ", "
|
||||
result &= "length: " & $header.length & "}"
|
||||
"{" & $header.msgType & ", " &
|
||||
"{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, " &
|
||||
"streamId: " & $header.streamId & ", " &
|
||||
"length: " & $header.length & "}"
|
||||
|
||||
proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[0] = header.version
|
||||
@@ -85,10 +92,14 @@ proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[4..7] = toBytesBE(header.streamId)
|
||||
result[8..11] = toBytesBE(header.length)
|
||||
|
||||
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
|
||||
proc write(
|
||||
conn: LPStream,
|
||||
header: YamuxHeader
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
trace "write directly on stream", h = $header
|
||||
var buffer = header.encode()
|
||||
return conn.write(@buffer)
|
||||
conn.write(@buffer)
|
||||
|
||||
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
|
||||
T(
|
||||
@@ -106,11 +117,10 @@ proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
|
||||
)
|
||||
|
||||
proc data(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.Data,
|
||||
@@ -120,11 +130,10 @@ proc data(
|
||||
)
|
||||
|
||||
proc windowUpdate(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.WindowUpdate,
|
||||
@@ -137,12 +146,13 @@ type
|
||||
ToSend = tuple
|
||||
data: seq[byte]
|
||||
sent: int
|
||||
fut: Future[void]
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError])
|
||||
YamuxChannel* = ref object of Connection
|
||||
id: uint32
|
||||
recvWindow: int
|
||||
sendWindow: int
|
||||
maxRecvWindow: int
|
||||
maxSendQueueSize: int
|
||||
conn: Connection
|
||||
isSrc: bool
|
||||
opened: bool
|
||||
@@ -151,16 +161,15 @@ type
|
||||
recvQueue: seq[byte]
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: Future[void]
|
||||
closedRemotely: Future[void].Raising([])
|
||||
closedLocally: bool
|
||||
receivedData: AsyncEvent
|
||||
returnedEof: bool
|
||||
|
||||
proc `$`(channel: YamuxChannel): string =
|
||||
result = if channel.conn.dir == Out: "=> " else: "<= "
|
||||
result &= $channel.id
|
||||
var s: seq[string] = @[]
|
||||
if channel.closedRemotely.done():
|
||||
if channel.closedRemotely.completed():
|
||||
s.add("ClosedRemotely")
|
||||
if channel.closedLocally:
|
||||
s.add("ClosedLocally")
|
||||
@@ -169,29 +178,44 @@ proc `$`(channel: YamuxChannel): string =
|
||||
if s.len > 0:
|
||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||
|
||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
||||
for (elem, sent, _) in channel.sendQueue:
|
||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
||||
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent
|
||||
##
|
||||
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent, but limit the size of big messages.
|
||||
##
|
||||
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||
# This value is arbitrary, it's not in the specs, it permits to store up to
|
||||
# 3 big messages if the peer is stalling.
|
||||
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||
channel.closedRemotely.done():
|
||||
channel.closedRemotely.completed():
|
||||
await procCall Connection(channel).closeImpl()
|
||||
|
||||
proc remoteClosed(channel: YamuxChannel) {.async.} =
|
||||
if not channel.closedRemotely.done():
|
||||
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedRemotely.completed():
|
||||
channel.closedRemotely.complete()
|
||||
await channel.actuallyClose()
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedLocally:
|
||||
trace "Closing yamux channel locally", streamId = channel.id, conn = channel.conn
|
||||
channel.closedLocally = true
|
||||
|
||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
if not channel.isReset and channel.sendQueue.len == 0:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.actuallyClose()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
proc reset(
|
||||
channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
# it receives the reset.
|
||||
if channel.isReset:
|
||||
return
|
||||
trace "Reset channel"
|
||||
@@ -203,20 +227,24 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal:
|
||||
if isLocal and not channel.isSending:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||
except LPStreamEOFError as exc: discard
|
||||
except LPStreamClosedError as exc: discard
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.close()
|
||||
if not channel.closedRemotely.done():
|
||||
if not channel.closedRemotely.completed():
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
if not isLocal:
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||
# If the reset is remote, there's no reason to flush anything.
|
||||
channel.recvWindow = 0
|
||||
|
||||
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
proc updateRecvWindow(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Send to the peer a window update when the recvWindow is empty enough
|
||||
##
|
||||
# In order to avoid spamming a window update everytime a byte is read,
|
||||
# we send it everytime half of the maxRecvWindow is read.
|
||||
let inWindow = channel.recvWindow + channel.recvQueue.len
|
||||
if inWindow > channel.maxRecvWindow div 2:
|
||||
return
|
||||
@@ -230,31 +258,36 @@ proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
method readOnce*(
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Read from a yamux channel
|
||||
|
||||
if channel.isReset:
|
||||
raise if channel.remoteReset:
|
||||
raise
|
||||
if channel.remoteReset:
|
||||
newLPStreamResetError()
|
||||
elif channel.closedLocally:
|
||||
newLPStreamClosedError()
|
||||
else:
|
||||
newLPStreamConnDownError()
|
||||
if channel.returnedEof:
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
channel.receivedData.clear()
|
||||
await channel.closedRemotely or channel.receivedData.wait()
|
||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||
channel.returnedEof = true
|
||||
return 0
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(channel.closedRemotely, channel.receivedData.wait())
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if channel.closedRemotely.completed() and channel.recvQueue.len == 0:
|
||||
channel.isEof = true
|
||||
return 0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead..^1]
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
@@ -262,7 +295,9 @@ method readOnce*(
|
||||
channel.activity = true
|
||||
return toRead
|
||||
|
||||
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel,
|
||||
b: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.receivedData.fire()
|
||||
@@ -273,26 +308,27 @@ proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
|
||||
channel.maxRecvWindow = maxRecvWindow
|
||||
|
||||
proc trySend(channel: YamuxChannel) {.async.} =
|
||||
proc trySend(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if channel.isSending:
|
||||
return
|
||||
channel.isSending = true
|
||||
defer: channel.isSending = false
|
||||
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
if channel.sendWindow == 0:
|
||||
trace "send window empty"
|
||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
||||
currentQueueSize = channel.sendQueueBytes(true)
|
||||
try:
|
||||
await channel.reset(true)
|
||||
except CatchableError as exc:
|
||||
debug "failed to reset", msg=exc.msg
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
trace "channel send queue too big, resetting",
|
||||
maxSendQueueSize = channel.maxSendQueueSize,
|
||||
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||
await channel.reset(isLocal = true)
|
||||
break
|
||||
|
||||
let
|
||||
bytesAvailable = channel.sendQueueBytes()
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
@@ -305,22 +341,33 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
|
||||
sendBuffer[0..<12] = header.encode()
|
||||
|
||||
var futures: seq[Future[void]]
|
||||
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
|
||||
while inBuffer < toSend:
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
|
||||
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(fut)
|
||||
channel.sendQueue.delete(0)
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
|
||||
trace "try to send the buffer", h = $header
|
||||
channel.sendWindow.dec(toSend)
|
||||
try: await channel.conn.write(sendBuffer)
|
||||
except CatchableError as exc:
|
||||
try:
|
||||
await channel.conn.write(sendBuffer)
|
||||
except CancelledError:
|
||||
trace "cancelled sending the buffer"
|
||||
for fut in futures.items():
|
||||
fut.cancelSoon()
|
||||
await channel.reset()
|
||||
break
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send the buffer"
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
fut.fail(connDown)
|
||||
@@ -330,7 +377,13 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
fut.complete()
|
||||
channel.activity = true
|
||||
|
||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
channel: YamuxChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to yamux channel
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
result.fail(newLPStreamResetError())
|
||||
@@ -343,15 +396,22 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
|
||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
proc open(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
##
|
||||
if channel.opened:
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||
{if channel.isSrc: Syn else: Ack}))
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
|
||||
@@ -362,48 +422,73 @@ type
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
windowSize: int
|
||||
maxSendQueueSize: int
|
||||
inTimeout: Duration
|
||||
outTimeout: Duration
|
||||
|
||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||
for v in m.channels.values():
|
||||
if v.isSrc == isSrc: result += 1
|
||||
|
||||
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
await channel.join()
|
||||
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
try:
|
||||
await channel.join()
|
||||
except CancelledError:
|
||||
discard
|
||||
m.channels.del(channel.id)
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
libp2p_yamux_channels.set(
|
||||
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
if channel.isReset and channel.recvWindow > 0:
|
||||
m.flushed[channel.id] = channel.recvWindow
|
||||
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
result = YamuxChannel(
|
||||
proc createStream(
|
||||
m: Yamux, id: uint32, isSrc: bool,
|
||||
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
|
||||
# During initialization, recvWindow can be larger than maxRecvWindow.
|
||||
# This is because the peer we're connected to will always assume
|
||||
# that the initial recvWindow is 256k.
|
||||
# To solve this contradiction, no updateWindow will be sent until
|
||||
# recvWindow is less than maxRecvWindow
|
||||
proc newClosedRemotelyFut(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
newFuture[void]()
|
||||
var stream = YamuxChannel(
|
||||
id: id,
|
||||
maxRecvWindow: DefaultWindowSize,
|
||||
recvWindow: DefaultWindowSize,
|
||||
sendWindow: DefaultWindowSize,
|
||||
maxRecvWindow: recvWindow,
|
||||
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||
sendWindow: YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
isSrc: isSrc,
|
||||
conn: m.connection,
|
||||
receivedData: newAsyncEvent(),
|
||||
closedRemotely: newFuture[void]()
|
||||
closedRemotely: newClosedRemotelyFut()
|
||||
)
|
||||
result.objName = "YamuxStream"
|
||||
result.dir = if isSrc: Direction.Out else: Direction.In
|
||||
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
result.reset()
|
||||
result.initStream()
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
stream.objName = "YamuxStream"
|
||||
if isSrc:
|
||||
stream.dir = Direction.Out
|
||||
stream.timeout = m.outTimeout
|
||||
else:
|
||||
stream.dir = Direction.In
|
||||
stream.timeout = m.inTimeout
|
||||
stream.timeoutHandler =
|
||||
proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
stream.reset(isLocal = true)
|
||||
stream.initStream()
|
||||
stream.peerId = m.connection.peerId
|
||||
stream.observedAddr = m.connection.observedAddr
|
||||
stream.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = result
|
||||
asyncSpawn m.cleanupChann(result)
|
||||
stream.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = stream
|
||||
asyncSpawn m.cleanupChannel(stream)
|
||||
trace "created channel", id, pid=m.connection.peerId
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
|
||||
return stream
|
||||
|
||||
method close*(m: Yamux) {.async.} =
|
||||
method close*(m: Yamux) {.async: (raises: []).} =
|
||||
if m.isClosed == true:
|
||||
trace "Already closed"
|
||||
return
|
||||
@@ -412,24 +497,21 @@ method close*(m: Yamux) {.async.} =
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(true)
|
||||
await channel.reset(isLocal = true)
|
||||
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||
except CancelledError as exc: trace "cancelled sending goAway", msg = exc.msg
|
||||
except LPStreamError as exc: trace "failed to send goAway", msg = exc.msg
|
||||
await m.connection.close()
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## Call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in yamux stream handler", msg = exc.msg
|
||||
await channel.reset()
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Starting yamux handler", pid=m.connection.peerId
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -453,28 +535,39 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
else:
|
||||
if header.streamId in m.flushed:
|
||||
m.flushed.del(header.streamId)
|
||||
|
||||
if header.streamId mod 2 == m.currentId mod 2:
|
||||
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
|
||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||
let newStream = m.createStream(header.streamId, false)
|
||||
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||
if m.channels.len >= m.maxChannCount:
|
||||
await newStream.reset()
|
||||
continue
|
||||
await newStream.open()
|
||||
asyncSpawn m.handleStream(newStream)
|
||||
elif header.streamId notin m.channels:
|
||||
if header.streamId notin m.flushed:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
elif header.msgType == Data:
|
||||
# Flush the data
|
||||
m.flushed[header.streamId].dec(int(header.length))
|
||||
if m.flushed[header.streamId] < 0:
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
# Flush the data
|
||||
m.flushed.withValue(header.streamId, flushed):
|
||||
if header.msgType == Data:
|
||||
flushed[].dec(int(header.length))
|
||||
if flushed[] < 0:
|
||||
raise newException(YamuxError,
|
||||
"Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(
|
||||
addr buffer[0], int(header.length))
|
||||
do:
|
||||
raise newException(YamuxError,
|
||||
"Unknown stream ID: " & $header.streamId)
|
||||
continue
|
||||
|
||||
let channel = m.channels[header.streamId]
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
raise newException(YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
channel.sendWindow += int(header.length)
|
||||
@@ -487,7 +580,7 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", msg=string.fromBytes(buffer)
|
||||
trace "Msg Rcv", msg=shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
if MsgFlags.Fin in header.flags:
|
||||
@@ -496,11 +589,24 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if MsgFlags.Rst in header.flags:
|
||||
trace "remote reset channel"
|
||||
await channel.reset()
|
||||
except CancelledError as exc:
|
||||
debug "Unexpected cancellation in yamux handler", msg = exc.msg
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
|
||||
except YamuxError as exc:
|
||||
trace "Closing yamux connection", error=exc.msg
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in yamux read loop", msg = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
@@ -509,21 +615,32 @@ method getStreams*(m: Yamux): seq[Connection] =
|
||||
for c in m.channels.values: result.add(c)
|
||||
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
if m.channels.len > m.maxChannCount - 1:
|
||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
let stream = m.createStream(m.currentId, true)
|
||||
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||
m.currentId += 2
|
||||
if not lazy:
|
||||
await stream.open()
|
||||
return stream
|
||||
|
||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
||||
proc new*(
|
||||
T: type[Yamux], conn: Connection,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: int = MaxSendQueueSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): T =
|
||||
T(
|
||||
connection: conn,
|
||||
currentId: if conn.dir == Out: 1 else: 2,
|
||||
maxChannCount: maxChannCount
|
||||
maxChannCount: maxChannCount,
|
||||
windowSize: windowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
inTimeout: inTimeout,
|
||||
outTimeout: outTimeout
|
||||
)
|
||||
|
||||
@@ -52,7 +52,7 @@ proc resolveOneAddress(
|
||||
ma: MultiAddress,
|
||||
domain: Domain = Domain.AF_UNSPEC,
|
||||
prefix = ""): Future[seq[MultiAddress]]
|
||||
{.async, raises: [MaError, TransportAddressError].} =
|
||||
{.async.} =
|
||||
#Resolve a single address
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
|
||||
@@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
|
||||
proc addressMapper(
|
||||
self: AutonatService,
|
||||
peerStore: PeerStore,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
@@ -179,7 +179,7 @@ proc addressMapper(
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
|
||||
@@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
|
||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||
debug "Dcutr initiator has directly connected to the remote peer."
|
||||
|
||||
@@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
||||
addrs.filterIt(TCP.match(it))
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
|
||||
var result = newSeq[MultiAddress]()
|
||||
for a in addrs:
|
||||
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
|
||||
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
|
||||
result.add(a[0..1].tryGet())
|
||||
return result
|
||||
|
||||
@@ -19,7 +19,6 @@ import ../../protocol,
|
||||
../../../switch,
|
||||
../../../utils/future
|
||||
|
||||
export DcutrError
|
||||
export chronicles
|
||||
|
||||
type Dcutr* = ref object of LPProtocol
|
||||
@@ -29,7 +28,7 @@ logScope:
|
||||
|
||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
|
||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
@@ -56,7 +55,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
|
||||
if peerDialableAddrs.len > maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(connectTimeout)
|
||||
debug "Dcutr receiver has directly connected to the remote peer."
|
||||
@@ -65,14 +64,14 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
|
||||
warn "Unexpected error when Dcutr receiver tried to connect " &
|
||||
"to the remote peer", msg = err.msg
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
|
||||
@@ -189,7 +189,7 @@ proc dialPeerV2*(
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
return conn
|
||||
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||
let src = msg.srcPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||
return
|
||||
@@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
|
||||
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
||||
else: await conn.close()
|
||||
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
|
||||
maxCircuitPerPeer: maxCircuitPerPeer,
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV1Codec: await cl.handleStreamV1(conn)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,11 +23,15 @@ type
|
||||
method readOnce*(
|
||||
self: RelayConnection,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[int] {.async.} =
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
self.activity = true
|
||||
return await self.conn.readOnce(pbytes, nbytes)
|
||||
self.conn.readOnce(pbytes, nbytes)
|
||||
|
||||
method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
method write*(
|
||||
self: RelayConnection,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
self.dataSent.inc(msg.len)
|
||||
if self.limitData != 0 and self.dataSent > self.limitData:
|
||||
await self.close()
|
||||
@@ -35,24 +39,25 @@ method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
self.activity = true
|
||||
await self.conn.write(msg)
|
||||
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async.} =
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async: (raises: []).} =
|
||||
await self.conn.close()
|
||||
await procCall Connection(self).closeImpl()
|
||||
|
||||
method getWrapped*(self: RelayConnection): Connection = self.conn
|
||||
|
||||
proc new*(
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
||||
rc.dir = conn.dir
|
||||
rc.initStream()
|
||||
if limitDuration > 0:
|
||||
proc checkDurationConnection() {.async.} =
|
||||
let sleep = sleepAsync(limitDuration.seconds())
|
||||
await sleep or conn.join()
|
||||
if sleep.finished: await conn.close()
|
||||
else: sleep.cancel()
|
||||
proc checkDurationConnection() {.async: (raises: []).} =
|
||||
try:
|
||||
await noCancel conn.join().wait(limitDuration.seconds())
|
||||
except AsyncTimeoutError:
|
||||
await conn.close()
|
||||
asyncSpawn checkDurationConnection()
|
||||
return rc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
|
||||
wrappedConn = wrappedConn.getWrapped()
|
||||
return false
|
||||
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
if conn.isRelayed():
|
||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||
await sendHopStatus(conn, PermissionDenied)
|
||||
@@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
proc handleConnect(r: Relay,
|
||||
connSrc: Connection,
|
||||
msg: HopMessage) {.async, gcsafe.} =
|
||||
msg: HopMessage) {.async.} =
|
||||
if connSrc.isRelayed():
|
||||
trace "connection attempt over relay connection"
|
||||
await sendHopStatus(connSrc, PermissionDenied)
|
||||
@@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
|
||||
await rconnDst.close()
|
||||
await bridge(rconnSrc, rconnDst)
|
||||
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
# Relay V1
|
||||
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
r.streamCount.inc()
|
||||
defer: r.streamCount.dec()
|
||||
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
||||
@@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
trace "relaying connection", src, dst
|
||||
await bridge(connSrc, connDst)
|
||||
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
||||
@@ -361,17 +361,25 @@ proc deletesReservation(r: Relay) {.async.} =
|
||||
if n > r.rsvp[k]:
|
||||
r.rsvp.del(k)
|
||||
|
||||
method start*(r: Relay) {.async.} =
|
||||
method start*(
|
||||
r: Relay
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not r.reservationLoop.isNil:
|
||||
warn "Starting relay twice"
|
||||
return
|
||||
return fut
|
||||
r.reservationLoop = r.deletesReservation()
|
||||
r.started = true
|
||||
fut
|
||||
|
||||
method stop*(r: Relay) {.async.} =
|
||||
method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if r.reservationLoop.isNil:
|
||||
warn "Stopping relay without starting it"
|
||||
return
|
||||
return fut
|
||||
r.started = false
|
||||
r.reservationLoop.cancel()
|
||||
r.reservationLoop = nil
|
||||
fut
|
||||
|
||||
@@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
||||
self.client.onNewConnection = proc(
|
||||
conn: Connection,
|
||||
duration: uint32 = 0,
|
||||
data: uint64 = 0) {.async, gcsafe, raises: [].} =
|
||||
data: uint64 = 0) {.async.} =
|
||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||
await conn.join()
|
||||
self.selfRunning = true
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting Relay transport"
|
||||
|
||||
method stop*(self: RelayTransport) {.async, gcsafe.} =
|
||||
method stop*(self: RelayTransport) {.async.} =
|
||||
self.running = false
|
||||
self.selfRunning = false
|
||||
self.client.onNewConnection = nil
|
||||
while not self.queue.empty():
|
||||
await self.queue.popFirstNoWait().close()
|
||||
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||
result = await self.queue.popFirst()
|
||||
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||
let
|
||||
sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
||||
@@ -90,7 +90,7 @@ method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
peerId.withValue(pid):
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -21,63 +21,77 @@ const
|
||||
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
||||
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
||||
|
||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
|
||||
proc sendStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV1
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
msg = RelayMessage(
|
||||
msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
|
||||
proc sendHopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||
proc sendStopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
|
||||
let
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
|
||||
proc bridge*(
|
||||
connSrc: Connection,
|
||||
connDst: Connection) {.async: (raises: [CancelledError]).} =
|
||||
const bufferSize = 4096
|
||||
var
|
||||
bufSrcToDst: array[bufferSize, byte]
|
||||
bufDstToSrc: array[bufferSize, byte]
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSendFromSrcToDst = 0
|
||||
bytesSendFromDstToSrc = 0
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
bytesSentFromSrcToDst = 0
|
||||
bytesSentFromDstToSrc = 0
|
||||
bufRead: int
|
||||
|
||||
try:
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
await futSrc or futDst
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
bytesSendFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0..<bufRead])
|
||||
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
bytesSentFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0 ..< bufRead])
|
||||
zeroMem(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
if futDst.finished():
|
||||
bufRead = await futDst
|
||||
if bufRead > 0:
|
||||
bytesSendFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0..<bufRead])
|
||||
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSentFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0 ..< bufRead])
|
||||
zeroMem(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
if connSrc.closed() or connSrc.atEof():
|
||||
trace "relay src closed connection", src = connSrc.peerId
|
||||
if connDst.closed() or connDst.atEof():
|
||||
trace "relay dst closed connection", dst = connDst.peerId
|
||||
trace "relay error", exc=exc.msg
|
||||
trace "end relaying", bytesSendFromSrcToDst, bytesSendFromDstToSrc
|
||||
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
|
||||
await futSrc.cancelAndWait()
|
||||
await futDst.cancelAndWait()
|
||||
|
||||
@@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
|
||||
../peerid,
|
||||
../crypto/crypto,
|
||||
../multiaddress,
|
||||
../multicodec,
|
||||
../protocols/protocol,
|
||||
../utility,
|
||||
../errors,
|
||||
@@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
|
||||
signedPeerRecord =
|
||||
# The SPR contains the same data as the identify message
|
||||
# would be cumbersome to log
|
||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
||||
if it.signedPeerRecord.isSome(): "Some"
|
||||
else: "None"
|
||||
|
||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||
@@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
|
||||
if ? pb.getField(6, agentVersion).toOpt():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
|
||||
debug "decodeMsg: decoded identify", iinfo
|
||||
Opt.some(iinfo)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Identify],
|
||||
peerInfo: PeerInfo,
|
||||
sendSignedPeerRecord = false
|
||||
sendSignedPeerRecord = false,
|
||||
observedAddrManager = ObservedAddrManager.new(),
|
||||
): T =
|
||||
let identify = T(
|
||||
peerInfo: peerInfo,
|
||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||
observedAddrManager: ObservedAddrManager.new(),
|
||||
observedAddrManager: observedAddrManager,
|
||||
)
|
||||
identify.init()
|
||||
identify
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling identify request", conn
|
||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||
@@ -168,7 +169,7 @@ method init*(p: Identify) =
|
||||
|
||||
proc identify*(self: Identify,
|
||||
conn: Connection,
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
|
||||
trace "initiating identify", conn
|
||||
var message = await conn.readLp(64*1024)
|
||||
if len(message) == 0:
|
||||
@@ -176,6 +177,7 @@ proc identify*(self: Identify,
|
||||
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
||||
|
||||
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify: decoded message", conn, info
|
||||
let
|
||||
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
||||
@@ -186,8 +188,12 @@ proc identify*(self: Identify,
|
||||
info.peerId = peer
|
||||
|
||||
info.observedAddr.withValue(observed):
|
||||
if not self.observedAddrManager.addObservation(observed):
|
||||
debug "Observed address is not valid", observedAddr = observed
|
||||
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
|
||||
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
|
||||
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
|
||||
trace "Not adding address to ObservedAddrManager.", observed
|
||||
elif not self.observedAddrManager.addObservation(observed):
|
||||
trace "Observed address is not valid.", observedAddr = observed
|
||||
return info
|
||||
|
||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||
@@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
||||
identifypush
|
||||
|
||||
proc init*(p: IdentifyPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
trace "handling identify push", conn
|
||||
try:
|
||||
var message = await conn.readLp(64*1024)
|
||||
|
||||
var identInfo = decodeMsg(message).valueOr:
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify push: decoded message", conn, identInfo
|
||||
|
||||
identInfo.pubkey.withValue(pubkey):
|
||||
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
||||
|
||||
@@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
|
||||
|
||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
var p = T()
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
var bytesRead = 0
|
||||
try:
|
||||
trace "Received benchmark performance check", conn
|
||||
|
||||
@@ -51,12 +51,12 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
await conn.readExactly(addr buf[0], PingSize)
|
||||
trace "echoing ping", conn
|
||||
trace "echoing ping", conn, pingData = @buf
|
||||
await conn.write(@buf)
|
||||
if not isNil(p.pingHandler):
|
||||
await p.pingHandler(conn.peerId)
|
||||
@@ -71,7 +71,7 @@ method init*(p: Ping) =
|
||||
proc ping*(
|
||||
p: Ping,
|
||||
conn: Connection,
|
||||
): Future[Duration] {.async, gcsafe, public.} =
|
||||
): Future[Duration] {.async, public.} =
|
||||
## Sends ping to `conn`, returns the delay
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -19,20 +19,29 @@ const
|
||||
|
||||
type
|
||||
LPProtoHandler* = proc (
|
||||
conn: Connection,
|
||||
proto: string):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
conn: Connection,
|
||||
proto: string): Future[void] {.async.}
|
||||
|
||||
LPProtocol* = ref object of RootObj
|
||||
codecs*: seq[string]
|
||||
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
|
||||
handlerImpl: LPProtoHandler ## invoked by the protocol negotiator
|
||||
started*: bool
|
||||
maxIncomingStreams: Opt[int]
|
||||
|
||||
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
||||
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
||||
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
||||
|
||||
method start*(
|
||||
p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = true
|
||||
fut
|
||||
|
||||
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = false
|
||||
fut
|
||||
|
||||
proc maxIncomingStreams*(p: LPProtocol): int =
|
||||
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
|
||||
@@ -41,7 +50,7 @@ proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
|
||||
p.maxIncomingStreams = Opt.some(val)
|
||||
|
||||
func codec*(p: LPProtocol): string =
|
||||
assert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||
doAssert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||
p.codecs[0]
|
||||
|
||||
func `codec=`*(p: LPProtocol, codec: string) =
|
||||
@@ -49,15 +58,51 @@ func `codec=`*(p: LPProtocol, codec: string) =
|
||||
# if we use this abstraction
|
||||
p.codecs.insert(codec, 0)
|
||||
|
||||
template `handler`*(p: LPProtocol): LPProtoHandler =
|
||||
p.handlerImpl
|
||||
|
||||
template `handler`*(
|
||||
p: LPProtocol, conn: Connection, proto: string): Future[void] =
|
||||
p.handlerImpl(conn, proto)
|
||||
|
||||
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
|
||||
p.handlerImpl = handler
|
||||
|
||||
# Callbacks that are annotated with `{.async: (raises).}` explicitly
|
||||
# document the types of errors that they may raise, but are not compatible
|
||||
# with `LPProtoHandler` and need to use a custom `proc` type.
|
||||
# They are internally wrapped into a `LPProtoHandler`, but still allow the
|
||||
# compiler to check that their `{.async: (raises).}` annotation is correct.
|
||||
# https://github.com/nim-lang/Nim/issues/23432
|
||||
func `handler=`*[E](
|
||||
p: LPProtocol,
|
||||
handler: proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E]) =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
p.handlerImpl = wrap
|
||||
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler,
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler,
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
T(
|
||||
codecs: codecs,
|
||||
handler: handler,
|
||||
handlerImpl: handler,
|
||||
maxIncomingStreams:
|
||||
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
|
||||
else: maxIncomingStreams
|
||||
)
|
||||
|
||||
proc new*[E](
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E],
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
T.new(codec, wrap, maxIncomingStreams)
|
||||
|
||||
@@ -16,6 +16,7 @@ import ./pubsub,
|
||||
./timedcache,
|
||||
./peertable,
|
||||
./rpc/[message, messages, protobuf],
|
||||
nimcrypto/[hash, sha2],
|
||||
../../crypto/crypto,
|
||||
../../stream/connection,
|
||||
../../peerid,
|
||||
@@ -32,25 +33,34 @@ const FloodSubCodec* = "/floodsub/1.0.0"
|
||||
type
|
||||
FloodSub* {.public.} = ref object of PubSub
|
||||
floodsub*: PeerTable # topic to remote peer map
|
||||
seen*: TimedCache[MessageId] # message id:s already seen on the network
|
||||
seenSalt*: seq[byte]
|
||||
seen*: TimedCache[SaltedId]
|
||||
# Early filter for messages recently observed on the network
|
||||
# We use a salted id because the messages in this cache have not yet
|
||||
# been validated meaning that an attacker has greater control over the
|
||||
# hash key and therefore could poison the table
|
||||
seenSalt*: sha256
|
||||
# The salt in this case is a partially updated SHA256 context pre-seeded
|
||||
# with some random data
|
||||
|
||||
proc hasSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
f.seenSalt & msgId in f.seen
|
||||
proc salt*(f: FloodSub, msgId: MessageId): SaltedId =
|
||||
var tmp = f.seenSalt
|
||||
tmp.update(msgId)
|
||||
SaltedId(data: tmp.finish())
|
||||
|
||||
proc addSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
# Salting the seen hash helps avoid attacks against the hash function used
|
||||
# in the nim hash table
|
||||
proc hasSeen*(f: FloodSub, saltedId: SaltedId): bool =
|
||||
saltedId in f.seen
|
||||
|
||||
proc addSeen*(f: FloodSub, saltedId: SaltedId): bool =
|
||||
# Return true if the message has already been seen
|
||||
f.seen.put(f.seenSalt & msgId)
|
||||
f.seen.put(saltedId)
|
||||
|
||||
proc firstSeen*(f: FloodSub, msgId: MessageId): Moment =
|
||||
f.seen.addedAt(f.seenSalt & msgId)
|
||||
proc firstSeen*(f: FloodSub, saltedId: SaltedId): Moment =
|
||||
f.seen.addedAt(saltedId)
|
||||
|
||||
proc handleSubscribe*(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
proc handleSubscribe(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
logScope:
|
||||
peer
|
||||
topic
|
||||
@@ -96,10 +106,9 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
||||
method rpcHandler*(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
data: seq[byte]) {.async.} =
|
||||
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
raise newException(CatchableError, "")
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
# trigger hooks
|
||||
@@ -117,9 +126,11 @@ method rpcHandler*(f: FloodSub,
|
||||
# TODO: descore peers due to error during message validation (malicious?)
|
||||
continue
|
||||
|
||||
let msgId = msgIdResult.get
|
||||
let
|
||||
msgId = msgIdResult.get
|
||||
saltedId = f.salt(msgId)
|
||||
|
||||
if f.addSeen(msgId):
|
||||
if f.addSeen(saltedId):
|
||||
trace "Dropping already-seen message", msgId, peer
|
||||
continue
|
||||
|
||||
@@ -148,16 +159,19 @@ method rpcHandler*(f: FloodSub,
|
||||
discard
|
||||
|
||||
var toSendPeers = initHashSet[PubSubPeer]()
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin f.topics:
|
||||
continue
|
||||
f.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
let topic = msg.topic
|
||||
if topic notin f.topics:
|
||||
debug "Dropping message due to topic not in floodsub topics", topic, msgId, peer
|
||||
continue
|
||||
|
||||
await handleData(f, t, msg.data)
|
||||
f.floodsub.withValue(topic, peers):
|
||||
toSendPeers.incl(peers[])
|
||||
|
||||
await handleData(f, topic, msg.data)
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "Forwared message to peers", peers = toSendPeers.len
|
||||
|
||||
f.updateMetrics(rpcMsg)
|
||||
@@ -213,13 +227,13 @@ method publish*(f: FloodSub,
|
||||
trace "Created new message",
|
||||
msg = shortLog(msg), peers = peers.len, topic, msgId
|
||||
|
||||
if f.addSeen(msgId):
|
||||
if f.addSeen(f.salt(msgId)):
|
||||
# custom msgid providers might cause this
|
||||
trace "Dropping already-seen message", msgId, topic
|
||||
return 0
|
||||
|
||||
# Try to send to all peers that are known to be interested
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
||||
@@ -231,8 +245,11 @@ method publish*(f: FloodSub,
|
||||
method initPubSub*(f: FloodSub)
|
||||
{.raises: [InitializationError].} =
|
||||
procCall PubSub(f).initPubSub()
|
||||
f.seen = TimedCache[MessageId].init(2.minutes)
|
||||
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
||||
hmacDrbgGenerate(f.rng[], f.seenSalt)
|
||||
f.seen = TimedCache[SaltedId].init(2.minutes)
|
||||
f.seenSalt.init()
|
||||
|
||||
var tmp: array[32, byte]
|
||||
hmacDrbgGenerate(f.rng[], tmp)
|
||||
f.seenSalt.update(tmp)
|
||||
|
||||
f.init()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -46,42 +46,82 @@ declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizat
|
||||
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
|
||||
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
|
||||
|
||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
|
||||
proc init*(
|
||||
_: type[GossipSubParams],
|
||||
pruneBackoff = 1.minutes,
|
||||
unsubscribeBackoff = 5.seconds,
|
||||
floodPublish = true,
|
||||
gossipFactor: float64 = 0.25,
|
||||
d = GossipSubD,
|
||||
dLow = GossipSubDlo,
|
||||
dHigh = GossipSubDhi,
|
||||
dScore = GossipSubDlo,
|
||||
dOut = GossipSubDlo - 1, # DLow - 1
|
||||
dLazy = GossipSubD, # Like D,
|
||||
heartbeatInterval = GossipSubHeartbeatInterval,
|
||||
historyLength = GossipSubHistoryLength,
|
||||
historyGossip = GossipSubHistoryGossip,
|
||||
fanoutTTL = GossipSubFanoutTTL,
|
||||
seenTTL = 2.minutes,
|
||||
gossipThreshold = -100.0,
|
||||
publishThreshold = -1000.0,
|
||||
graylistThreshold = -10000.0,
|
||||
opportunisticGraftThreshold = 0.0,
|
||||
decayInterval = 1.seconds,
|
||||
decayToZero = 0.01,
|
||||
retainScore = 2.minutes,
|
||||
appSpecificWeight = 0.0,
|
||||
ipColocationFactorWeight = 0.0,
|
||||
ipColocationFactorThreshold = 1.0,
|
||||
behaviourPenaltyWeight = -1.0,
|
||||
behaviourPenaltyDecay = 0.999,
|
||||
directPeers = initTable[PeerId, seq[MultiAddress]](),
|
||||
disconnectBadPeers = false,
|
||||
enablePX = false,
|
||||
bandwidthEstimatebps = 100_000_000, # 100 Mbps or 12.5 MBps
|
||||
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit = false,
|
||||
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue): GossipSubParams =
|
||||
|
||||
GossipSubParams(
|
||||
explicit: true,
|
||||
pruneBackoff: 1.minutes,
|
||||
unsubscribeBackoff: 5.seconds,
|
||||
floodPublish: true,
|
||||
gossipFactor: 0.25,
|
||||
d: GossipSubD,
|
||||
dLow: GossipSubDlo,
|
||||
dHigh: GossipSubDhi,
|
||||
dScore: GossipSubDlo,
|
||||
dOut: GossipSubDlo - 1, # DLow - 1
|
||||
dLazy: GossipSubD, # Like D
|
||||
heartbeatInterval: GossipSubHeartbeatInterval,
|
||||
historyLength: GossipSubHistoryLength,
|
||||
historyGossip: GossipSubHistoryGossip,
|
||||
fanoutTTL: GossipSubFanoutTTL,
|
||||
seenTTL: 2.minutes,
|
||||
gossipThreshold: -100,
|
||||
publishThreshold: -1000,
|
||||
graylistThreshold: -10000,
|
||||
opportunisticGraftThreshold: 0,
|
||||
decayInterval: 1.seconds,
|
||||
decayToZero: 0.01,
|
||||
retainScore: 2.minutes,
|
||||
appSpecificWeight: 0.0,
|
||||
ipColocationFactorWeight: 0.0,
|
||||
ipColocationFactorThreshold: 1.0,
|
||||
behaviourPenaltyWeight: -1.0,
|
||||
behaviourPenaltyDecay: 0.999,
|
||||
disconnectBadPeers: false,
|
||||
enablePX: false,
|
||||
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
||||
iwantTimeout: 3 * GossipSubHeartbeatInterval,
|
||||
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit: false
|
||||
pruneBackoff: pruneBackoff,
|
||||
unsubscribeBackoff: unsubscribeBackoff,
|
||||
floodPublish: floodPublish,
|
||||
gossipFactor: gossipFactor,
|
||||
d: d,
|
||||
dLow: dLow,
|
||||
dHigh: dHigh,
|
||||
dScore: dScore,
|
||||
dOut: dOut,
|
||||
dLazy: dLazy,
|
||||
heartbeatInterval: heartbeatInterval,
|
||||
historyLength: historyLength,
|
||||
historyGossip: historyGossip,
|
||||
fanoutTTL: fanoutTTL,
|
||||
seenTTL: seenTTL,
|
||||
gossipThreshold: gossipThreshold,
|
||||
publishThreshold: publishThreshold,
|
||||
graylistThreshold: graylistThreshold,
|
||||
opportunisticGraftThreshold: opportunisticGraftThreshold,
|
||||
decayInterval: decayInterval,
|
||||
decayToZero: decayToZero,
|
||||
retainScore: retainScore,
|
||||
appSpecificWeight: appSpecificWeight,
|
||||
ipColocationFactorWeight: ipColocationFactorWeight,
|
||||
ipColocationFactorThreshold: ipColocationFactorThreshold,
|
||||
behaviourPenaltyWeight: behaviourPenaltyWeight,
|
||||
behaviourPenaltyDecay: behaviourPenaltyDecay,
|
||||
directPeers: directPeers,
|
||||
disconnectBadPeers: disconnectBadPeers,
|
||||
enablePX: enablePX,
|
||||
bandwidthEstimatebps: bandwidthEstimatebps,
|
||||
overheadRateLimit: overheadRateLimit,
|
||||
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
|
||||
)
|
||||
|
||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
@@ -112,6 +152,8 @@ proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
err("gossipsub: behaviourPenaltyWeight parameter error, Must be negative")
|
||||
elif parameters.behaviourPenaltyDecay < 0 or parameters.behaviourPenaltyDecay >= 1:
|
||||
err("gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1")
|
||||
elif parameters.maxNumElementsInNonPriorityQueue <= 0:
|
||||
err("gossipsub: maxNumElementsInNonPriorityQueue parameter error, Must be > 0")
|
||||
else:
|
||||
ok()
|
||||
|
||||
@@ -170,10 +212,10 @@ method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
of PubSubPeerEventKind.StreamOpened:
|
||||
discard
|
||||
of PubSubPeerEventKind.Disconnected:
|
||||
# If a send connection is lost, it's better to remove peer from the mesh -
|
||||
of PubSubPeerEventKind.StreamClosed:
|
||||
# If a send stream is lost, it's better to remove peer from the mesh -
|
||||
# if it gets reestablished, the peer will be readded to the mesh, and if it
|
||||
# doesn't, well.. then we hope the peer is going away!
|
||||
for topic, peers in p.mesh.mpairs():
|
||||
@@ -181,6 +223,8 @@ method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent
|
||||
peers.excl(peer)
|
||||
for _, peers in p.fanout.mpairs():
|
||||
peers.excl(peer)
|
||||
of PubSubPeerEventKind.DisconnectionRequested:
|
||||
asyncSpawn p.disconnectPeer(peer) # this should unsubscribePeer the peer too
|
||||
|
||||
procCall FloodSub(p).onPubSubPeerEvent(peer, event)
|
||||
|
||||
@@ -218,12 +262,14 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
for topic, info in stats[].topicInfos.mpairs:
|
||||
info.firstMessageDeliveries = 0
|
||||
|
||||
pubSubPeer.stopSendNonPriorityTask()
|
||||
|
||||
procCall FloodSub(g).unsubscribePeer(peer)
|
||||
|
||||
proc handleSubscribe*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
proc handleSubscribe(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
logScope:
|
||||
peer
|
||||
topic
|
||||
@@ -272,46 +318,55 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
var respControl: ControlMessage
|
||||
g.handleIDontWant(peer, control.idontwant)
|
||||
let iwant = g.handleIHave(peer, control.ihave)
|
||||
if iwant.messageIds.len > 0:
|
||||
if iwant.messageIDs.len > 0:
|
||||
respControl.iwant.add(iwant)
|
||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||
let messages = g.handleIWant(peer, control.iwant)
|
||||
|
||||
if
|
||||
respControl.prune.len > 0 or
|
||||
respControl.iwant.len > 0 or
|
||||
messages.len > 0:
|
||||
# iwant and prunes from here, also messages
|
||||
let
|
||||
isPruneNotEmpty = respControl.prune.len > 0
|
||||
isIWantNotEmpty = respControl.iwant.len > 0
|
||||
|
||||
for smsg in messages:
|
||||
for topic in smsg.topicIds:
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
|
||||
if isPruneNotEmpty or isIWantNotEmpty:
|
||||
|
||||
if isIWantNotEmpty:
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
if isPruneNotEmpty:
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl), messages: messages))
|
||||
RPCMsg(control: some(respControl)), isHighPriority = true)
|
||||
|
||||
if messages.len > 0:
|
||||
for smsg in messages:
|
||||
let topic = smsg.topic
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
# iwant replies have lower priority
|
||||
trace "sending iwant reply messages", peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(messages: messages), isHighPriority = false)
|
||||
|
||||
proc validateAndRelay(g: GossipSub,
|
||||
msg: Message,
|
||||
msgId, msgIdSalted: MessageId,
|
||||
msgId: MessageId, saltedId: SaltedId,
|
||||
peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
let validation = await g.validate(msg)
|
||||
|
||||
var seenPeers: HashSet[PubSubPeer]
|
||||
discard g.validationSeen.pop(msgIdSalted, seenPeers)
|
||||
discard g.validationSeen.pop(saltedId, seenPeers)
|
||||
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
|
||||
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
|
||||
|
||||
@@ -319,7 +374,7 @@ proc validateAndRelay(g: GossipSub,
|
||||
of ValidationResult.Reject:
|
||||
debug "Dropping message after validation, reason: reject",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
return
|
||||
of ValidationResult.Ignore:
|
||||
debug "Dropping message after validation, reason: ignore",
|
||||
@@ -331,18 +386,16 @@ proc validateAndRelay(g: GossipSub,
|
||||
# store in cache only after validation
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.rewardDelivered(peer, msg.topicIds, true)
|
||||
let topic = msg.topic
|
||||
g.rewardDelivered(peer, topic, true)
|
||||
|
||||
var toSendPeers = HashSet[PubSubPeer]()
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin g.topics:
|
||||
continue
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
|
||||
# add direct peers
|
||||
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(t))
|
||||
g.floodsub.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
g.mesh.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
g.subscribedDirectPeers.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
|
||||
# Don't send it to source peer, or peers that
|
||||
# sent it during validation
|
||||
@@ -353,66 +406,55 @@ proc validateAndRelay(g: GossipSub,
|
||||
# bigger than the messageId
|
||||
if msg.data.len > msgId.len * 10:
|
||||
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIds: @[msgId])]
|
||||
))))
|
||||
idontwant: @[ControlIWant(messageIDs: @[msgId])]
|
||||
))), isHighPriority = true)
|
||||
|
||||
for peer in toSendPeers:
|
||||
for heDontWant in peer.heDontWants:
|
||||
if msgId in heDontWant:
|
||||
if saltedId in heDontWant:
|
||||
seenPeers.incl(peer)
|
||||
libp2p_gossipsub_idontwant_saved_messages.inc
|
||||
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
|
||||
break
|
||||
toSendPeers.excl(seenPeers)
|
||||
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = ["generic"])
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = ["generic"])
|
||||
|
||||
await handleData(g, topic, msg.data)
|
||||
await handleData(g, topic, msg.data)
|
||||
except CatchableError as exc:
|
||||
info "validateAndRelay failed", msg=exc.msg
|
||||
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||
msgs.mapIt(it.data.len + it.topic.len).foldl(a + b, 0)
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
|
||||
proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
|
||||
# In this way we count even ignored fields by protobuf
|
||||
let
|
||||
payloadSize =
|
||||
if g.verifySignature:
|
||||
byteSize(msg.messages)
|
||||
else:
|
||||
dataAndTopicsIdSize(msg.messages)
|
||||
controlSize = msg.control.withValue(control):
|
||||
byteSize(control.ihave) + byteSize(control.iwant)
|
||||
do: # no control message
|
||||
0
|
||||
|
||||
var rmsg = rpcMsgOpt.valueOr:
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(msgSize):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
let usefulMsgBytesNum =
|
||||
if g.verifySignature:
|
||||
byteSize(rmsg.messages)
|
||||
else:
|
||||
dataAndTopicsIdSize(rmsg.messages)
|
||||
|
||||
var uselessAppBytesNum = msgSize - usefulMsgBytesNum
|
||||
rmsg.control.withValue(control):
|
||||
uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
|
||||
msgSize - payloadSize - controlSize
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
if not overheadRateLimit.tryConsume(overhead):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
|
||||
debug "Peer sent too much useless application data and it's above rate limit.", peer, overhead
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
@@ -420,22 +462,31 @@ proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize:
|
||||
method rpcHandler*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
data: seq[byte]) {.async.} =
|
||||
|
||||
let msgSize = data.len
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
||||
return
|
||||
await rateLimit(g, peer, msgSize)
|
||||
# Raising in the handler closes the gossipsub connection (but doesn't
|
||||
# disconnect the peer!)
|
||||
# TODO evaluate behaviour penalty values
|
||||
peer.behaviourPenalty += 0.1
|
||||
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rpcMsg.messages:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, m.topic])
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
||||
await rateLimit(g, peer, g.messageOverhead(rpcMsg, msgSize))
|
||||
|
||||
# trigger hooks
|
||||
# trigger hooks - these may modify the message
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping))
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
||||
peer.pingBudget.dec
|
||||
|
||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||
@@ -455,19 +506,15 @@ method rpcHandler*(g: GossipSub,
|
||||
if msgIdResult.isErr:
|
||||
debug "Dropping message due to failed message id generation",
|
||||
error = msgIdResult.error
|
||||
# TODO: descore peers due to error during message validation (malicious?)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
let
|
||||
msgId = msgIdResult.get
|
||||
msgIdSalted = msgId & g.seenSalt
|
||||
g.outstandingIWANTs.withValue(msgId, iwantRequest):
|
||||
if iwantRequest.peer.peerId == peer.peerId:
|
||||
g.outstandingIWANTs.del(msgId)
|
||||
msgIdSalted = g.salt(msgId)
|
||||
topic = msg.topic
|
||||
|
||||
# addSeen adds salt to msgId to avoid
|
||||
# remote attacking the hash function
|
||||
if g.addSeen(msgId):
|
||||
if g.addSeen(msgIdSalted):
|
||||
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
||||
|
||||
var alreadyReceived = false
|
||||
@@ -477,8 +524,8 @@ method rpcHandler*(g: GossipSub,
|
||||
alreadyReceived = true
|
||||
|
||||
if not alreadyReceived:
|
||||
let delay = Moment.now() - g.firstSeen(msgId)
|
||||
g.rewardDelivered(peer, msg.topicIds, false, delay)
|
||||
let delay = Moment.now() - g.firstSeen(msgIdSalted)
|
||||
g.rewardDelivered(peer, topic, false, delay)
|
||||
|
||||
libp2p_gossipsub_duplicate.inc()
|
||||
|
||||
@@ -488,7 +535,7 @@ method rpcHandler*(g: GossipSub,
|
||||
libp2p_gossipsub_received.inc()
|
||||
|
||||
# avoid processing messages we are not interested in
|
||||
if msg.topicIds.allIt(it notin g.topics):
|
||||
if topic notin g.topics:
|
||||
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
|
||||
continue
|
||||
|
||||
@@ -496,14 +543,14 @@ method rpcHandler*(g: GossipSub,
|
||||
# always validate if signature is present or required
|
||||
debug "Dropping message due to failed signature verification",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||
# if we have seqno should be 8 bytes long
|
||||
debug "Dropping message due to invalid seqno length",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
# g.anonymize needs no evaluation when receiving messages
|
||||
@@ -547,32 +594,31 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
|
||||
g.broadcast(mpeers, msg)
|
||||
g.broadcast(mpeers, msg, isHighPriority = true)
|
||||
|
||||
for peer in mpeers:
|
||||
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
|
||||
|
||||
g.mesh.del(topic)
|
||||
|
||||
|
||||
# Send unsubscribe (in reverse order to sub/graft)
|
||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||
|
||||
method publish*(g: GossipSub,
|
||||
topic: string,
|
||||
data: seq[byte]): Future[int] {.async.} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
logScope:
|
||||
topic
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
# add always direct peers
|
||||
@@ -585,38 +631,39 @@ method publish*(g: GossipSub,
|
||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||
# but a peer's own messages will always be published to all known peers in the topic, limited
|
||||
# to the amount of peers we can send it to in one heartbeat
|
||||
var maxPeersToFlodOpt: Opt[int64]
|
||||
if g.parameters.bandwidthEstimatebps > 0:
|
||||
let
|
||||
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
||||
msToTransmit = max(data.len div bandwidth, 1)
|
||||
maxPeersToFlodOpt = Opt.some(max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
|
||||
|
||||
let maxPeersToFlood =
|
||||
if g.parameters.bandwidthEstimatebps > 0:
|
||||
let
|
||||
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
||||
msToTransmit = max(data.len div bandwidth, 1)
|
||||
max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow)
|
||||
else:
|
||||
int.high() # unlimited
|
||||
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
maxPeersToFlodOpt.withValue(maxPeersToFlod):
|
||||
if peers.len >= maxPeersToFlod: break
|
||||
if peers.len >= maxPeersToFlood: break
|
||||
|
||||
if peer.score >= g.parameters.publishThreshold:
|
||||
trace "publish: including flood/high score peer", peer
|
||||
peers.incl(peer)
|
||||
|
||||
if peers.len < g.parameters.dLow:
|
||||
# not subscribed, or bad mesh, send to fanout peers
|
||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
if fanoutPeers.len < g.parameters.dLow:
|
||||
g.replenishFanout(topic)
|
||||
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
elif peers.len < g.parameters.dLow:
|
||||
# not subscribed or bad mesh, send to fanout peers
|
||||
# when flood-publishing, fanout won't help since all potential peers have
|
||||
# already been added
|
||||
|
||||
g.replenishFanout(topic) # Make sure fanout is populated
|
||||
|
||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
g.rng.shuffle(fanoutPeers)
|
||||
|
||||
for fanPeer in fanoutPeers:
|
||||
peers.incl(fanPeer)
|
||||
if peers.len > g.parameters.d: break
|
||||
|
||||
# even if we couldn't publish,
|
||||
# we still attempted to publish
|
||||
# on the topic, so it makes sense
|
||||
# to update the last topic publish
|
||||
# time
|
||||
# Attempting to publish counts as fanout send (even if the message
|
||||
# ultimately is not sent)
|
||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||
|
||||
if peers.len == 0:
|
||||
@@ -644,14 +691,16 @@ method publish*(g: GossipSub,
|
||||
|
||||
trace "Created new message", msg = shortLog(msg), peers = peers.len
|
||||
|
||||
if g.addSeen(msgId):
|
||||
# custom msgid providers might cause this
|
||||
if g.addSeen(g.salt(msgId)):
|
||||
# If the message was received or published recently, don't re-publish it -
|
||||
# this might happen when not using sequence id:s and / or with a custom
|
||||
# message id provider
|
||||
trace "Dropping already-seen message"
|
||||
return 0
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
@@ -686,30 +735,40 @@ proc maintainDirectPeers(g: GossipSub) {.async.} =
|
||||
for id, addrs in g.parameters.directPeers:
|
||||
await g.addDirectPeer(id, addrs)
|
||||
|
||||
method start*(g: GossipSub) {.async.} =
|
||||
method start*(
|
||||
g: GossipSub
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
trace "gossipsub start"
|
||||
|
||||
if not g.heartbeatFut.isNil:
|
||||
warn "Starting gossipsub twice"
|
||||
return
|
||||
return fut
|
||||
|
||||
g.heartbeatFut = g.heartbeat()
|
||||
g.scoringHeartbeatFut = g.scoringHeartbeat()
|
||||
g.directPeersLoop = g.maintainDirectPeers()
|
||||
g.started = true
|
||||
fut
|
||||
|
||||
method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
method stop*(g: GossipSub) {.async.} =
|
||||
trace "gossipsub stop"
|
||||
g.started = false
|
||||
if g.heartbeatFut.isNil:
|
||||
warn "Stopping gossipsub without starting it"
|
||||
return
|
||||
return fut
|
||||
|
||||
# stop heartbeat interval
|
||||
g.directPeersLoop.cancel()
|
||||
g.scoringHeartbeatFut.cancel()
|
||||
g.heartbeatFut.cancel()
|
||||
g.heartbeatFut = nil
|
||||
fut
|
||||
|
||||
method initPubSub*(g: GossipSub)
|
||||
{.raises: [InitializationError].} =
|
||||
@@ -723,7 +782,7 @@ method initPubSub*(g: GossipSub)
|
||||
raise newException(InitializationError, $validationRes.error)
|
||||
|
||||
# init the floodsub stuff here, we customize timedcache in gossip!
|
||||
g.seen = TimedCache[MessageId].init(g.parameters.seenTTL)
|
||||
g.seen = TimedCache[SaltedId].init(g.parameters.seenTTL)
|
||||
|
||||
# init gossip stuff
|
||||
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
||||
@@ -736,4 +795,5 @@ method getOrCreatePeer*(
|
||||
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
|
||||
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
|
||||
peer.maxNumElementsInNonPriorityQueue = g.parameters.maxNumElementsInNonPriorityQueue
|
||||
return peer
|
||||
|
||||
@@ -30,7 +30,7 @@ declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh wi
|
||||
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
||||
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
|
||||
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [].} =
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
|
||||
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
||||
var info = stats.topicInfos.getOrDefault(topic)
|
||||
info.graftTime = Moment.now()
|
||||
@@ -46,7 +46,7 @@ proc pruned*(g: GossipSub,
|
||||
p: PubSubPeer,
|
||||
topic: string,
|
||||
setBackoff: bool = true,
|
||||
backoff = none(Duration)) {.raises: [].} =
|
||||
backoff = none(Duration)) =
|
||||
if setBackoff:
|
||||
let
|
||||
backoffDuration = backoff.get(g.parameters.pruneBackoff)
|
||||
@@ -70,7 +70,7 @@ proc pruned*(g: GossipSub,
|
||||
|
||||
trace "pruned", peer=p, topic
|
||||
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [].} =
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) =
|
||||
let now = Moment.now()
|
||||
var expired = toSeq(t.getOrDefault(topic).pairs())
|
||||
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
|
||||
@@ -79,7 +79,7 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [].} =
|
||||
t.withValue(topic, v):
|
||||
v[].del(peer)
|
||||
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [].} =
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
|
||||
if not g.parameters.enablePX:
|
||||
return @[]
|
||||
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
|
||||
@@ -100,11 +100,11 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises:
|
||||
|
||||
proc handleGraft*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
grafts: seq[ControlGraft]): seq[ControlPrune] = # {.raises: [Defect].} TODO chronicles exception on windows
|
||||
grafts: seq[ControlGraft]): seq[ControlPrune] =
|
||||
var prunes: seq[ControlPrune]
|
||||
for graft in grafts:
|
||||
let topic = graft.topicId
|
||||
trace "peer grafted topic", peer, topic
|
||||
let topic = graft.topicID
|
||||
trace "peer grafted topicID", peer, topic
|
||||
|
||||
# It is an error to GRAFT on a direct peer
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
@@ -204,12 +204,11 @@ proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRe
|
||||
|
||||
routingRecords
|
||||
|
||||
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [].} =
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
||||
for prune in prunes:
|
||||
let topic = prune.topicId
|
||||
let topic = prune.topicID
|
||||
|
||||
trace "peer pruned topic", peer, topic
|
||||
trace "peer pruned topicID", peer, topic
|
||||
|
||||
# add peer backoff
|
||||
if prune.backoff > 0:
|
||||
@@ -239,7 +238,7 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
|
||||
|
||||
proc handleIHave*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
ihaves: seq[ControlIHave]): ControlIWant {.raises: [].} =
|
||||
ihaves: seq[ControlIHave]): ControlIWant =
|
||||
var res: ControlIWant
|
||||
if peer.score < g.parameters.gossipThreshold:
|
||||
trace "ihave: ignoring low score peer", peer, score = peer.score
|
||||
@@ -248,34 +247,32 @@ proc handleIHave*(g: GossipSub,
|
||||
else:
|
||||
for ihave in ihaves:
|
||||
trace "peer sent ihave",
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIds
|
||||
if ihave.topicId in g.topics:
|
||||
for msgId in ihave.messageIds:
|
||||
if not g.hasSeen(msgId):
|
||||
peer, topicID = ihave.topicID, msgs = ihave.messageIDs
|
||||
if ihave.topicID in g.topics:
|
||||
for msgId in ihave.messageIDs:
|
||||
if not g.hasSeen(g.salt(msgId)):
|
||||
if peer.iHaveBudget <= 0:
|
||||
break
|
||||
elif msgId notin res.messageIds and msgId notin g.outstandingIWANTs:
|
||||
g.outstandingIWANTs[msgId] = IWANTRequest(messageId: msgId, peer: peer, timestamp: Moment.now())
|
||||
res.messageIds.add(msgId)
|
||||
elif msgId notin res.messageIDs:
|
||||
res.messageIDs.add(msgId)
|
||||
dec peer.iHaveBudget
|
||||
trace "requested message via ihave", messageID=msgId
|
||||
# shuffling res.messageIDs before sending it out to increase the likelihood
|
||||
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
||||
g.rng.shuffle(res.messageIds)
|
||||
g.rng.shuffle(res.messageIDs)
|
||||
return res
|
||||
|
||||
proc handleIDontWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIds:
|
||||
for messageId in dontWant.messageIDs:
|
||||
if peer.heDontWants[^1].len > 1000: break
|
||||
if messageId.len > 100: continue
|
||||
peer.heDontWants[^1].incl(messageId)
|
||||
peer.heDontWants[^1].incl(g.salt(messageId))
|
||||
|
||||
proc handleIWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iwants: seq[ControlIWant]): seq[Message] {.raises: [].} =
|
||||
iwants: seq[ControlIWant]): seq[Message] =
|
||||
var
|
||||
messages: seq[Message]
|
||||
invalidRequests = 0
|
||||
@@ -283,7 +280,7 @@ proc handleIWant*(g: GossipSub,
|
||||
trace "iwant: ignoring low score peer", peer, score = peer.score
|
||||
else:
|
||||
for iwant in iwants:
|
||||
for mid in iwant.messageIds:
|
||||
for mid in iwant.messageIDs:
|
||||
trace "peer sent iwant", peer, messageID = mid
|
||||
# canAskIWant will only return true once for a specific message
|
||||
if not peer.canAskIWant(mid):
|
||||
@@ -301,18 +298,7 @@ proc handleIWant*(g: GossipSub,
|
||||
messages.add(msg)
|
||||
return messages
|
||||
|
||||
proc checkIWANTTimeouts(g: GossipSub, timeoutDuration: Duration) {.raises: [].} =
|
||||
let currentTime = Moment.now()
|
||||
var idsToRemove = newSeq[MessageId]()
|
||||
for msgId, request in g.outstandingIWANTs.pairs():
|
||||
if currentTime - request.timestamp > timeoutDuration:
|
||||
trace "IWANT request timed out", messageID=msgId, peer=request.peer
|
||||
request.peer.behaviourPenalty += 0.1
|
||||
idsToRemove.add(msgId)
|
||||
for msgId in idsToRemove:
|
||||
g.outstandingIWANTs.del(msgId)
|
||||
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
|
||||
proc commitMetrics(metrics: var MeshMetrics) =
|
||||
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
|
||||
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
|
||||
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
|
||||
@@ -321,7 +307,7 @@ proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
|
||||
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
|
||||
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
|
||||
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [].} =
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) =
|
||||
logScope:
|
||||
topic
|
||||
mesh = g.mesh.peers(topic)
|
||||
@@ -542,16 +528,16 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# Send changes to peers after table updates to avoid stale state
|
||||
if grafts.len > 0:
|
||||
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
||||
g.broadcast(grafts, graft)
|
||||
g.broadcast(grafts, graft, isHighPriority = true)
|
||||
if prunes.len > 0:
|
||||
let prune = RPCMsg(control: some(ControlMessage(
|
||||
prune: @[ControlPrune(
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
||||
proc dropFanoutPeers*(g: GossipSub) =
|
||||
# drop peers that we haven't published to in
|
||||
# GossipSubFanoutTTL seconds
|
||||
let now = Moment.now()
|
||||
@@ -564,7 +550,7 @@ proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
||||
for topic in drops:
|
||||
g.lastFanoutPubSub.del topic
|
||||
|
||||
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [].} =
|
||||
proc replenishFanout*(g: GossipSub, topic: string) =
|
||||
## get fanout peers for a topic
|
||||
logScope: topic
|
||||
trace "about to replenish fanout"
|
||||
@@ -580,7 +566,7 @@ proc replenishFanout*(g: GossipSub, topic: string) {.raises: [].} =
|
||||
|
||||
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
|
||||
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [].} =
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
||||
## gossip iHave messages to peers
|
||||
##
|
||||
|
||||
@@ -591,7 +577,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
trace "getting gossip peers (iHave)", ntopics=topics.len
|
||||
for topic in topics:
|
||||
if topic notin g.gossipsub:
|
||||
trace "topic not in gossip array, skipping", topicID = topic
|
||||
trace "topic not in gossip array, skipping", topic = topic
|
||||
continue
|
||||
|
||||
let mids = g.mcache.window(topic)
|
||||
@@ -624,26 +610,25 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
x notin gossipPeers and
|
||||
x.score >= g.parameters.gossipThreshold
|
||||
|
||||
var target = g.parameters.dLazy
|
||||
let factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
||||
if factor > target:
|
||||
target = min(factor, allPeers.len)
|
||||
# https://github.com/libp2p/specs/blob/98c5aa9421703fc31b0833ad8860a55db15be063/pubsub/gossipsub/gossipsub-v1.1.md#adaptive-gossip-dissemination
|
||||
let
|
||||
factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
||||
target = max(g.parameters.dLazy, factor)
|
||||
|
||||
if target < allPeers.len:
|
||||
g.rng.shuffle(allPeers)
|
||||
allPeers.setLen(target)
|
||||
|
||||
let msgIdsAsSet = ihave.messageIds.toHashSet()
|
||||
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
peer.sentIHaves[^1].incl(msgIdsAsSet)
|
||||
for msgId in ihave.messageIDs:
|
||||
peer.sentIHaves[^1].incl(msgId)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
return control
|
||||
|
||||
proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
proc onHeartbeat(g: GossipSub) =
|
||||
# reset IWANT budget
|
||||
# reset IHAVE cap
|
||||
block:
|
||||
@@ -651,7 +636,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
if peer.sentIHaves.len > g.parameters.historyLength:
|
||||
discard peer.sentIHaves.popLast()
|
||||
peer.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||
peer.heDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
if peer.heDontWants.len > g.parameters.historyLength:
|
||||
discard peer.heDontWants.popLast()
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
@@ -675,13 +660,14 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
g.pruned(peer, t)
|
||||
g.mesh.removePeer(t, peer)
|
||||
prunes &= peer
|
||||
peer.clearNonPriorityQueue()
|
||||
if prunes.len > 0:
|
||||
let prune = RPCMsg(control: some(ControlMessage(
|
||||
prune: @[ControlPrune(
|
||||
topicID: t,
|
||||
peers: g.peerExchangeList(t),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
# pass by ptr in order to both signal we want to update metrics
|
||||
# and as well update the struct for each topic during this iteration
|
||||
@@ -699,16 +685,14 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
for peer, control in peers:
|
||||
# only ihave from here
|
||||
for ihave in control.ihave:
|
||||
if g.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
||||
if g.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
||||
g.send(peer, RPCMsg(control: some(control)))
|
||||
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
|
||||
|
||||
g.mcache.shift() # shift the cache
|
||||
|
||||
# {.pop.} # raises []
|
||||
|
||||
proc heartbeat*(g: GossipSub) {.async.} =
|
||||
heartbeat "GossipSub", g.parameters.heartbeatInterval:
|
||||
trace "running heartbeat", instance = cast[int](g)
|
||||
@@ -717,5 +701,3 @@ proc heartbeat*(g: GossipSub) {.async.} =
|
||||
for trigger in g.heartbeatEvents:
|
||||
trace "firing heartbeat event", instance = cast[int](g)
|
||||
trigger.fire()
|
||||
|
||||
checkIWANTTimeouts(g, g.parameters.iwantTimeout)
|
||||
|
||||
@@ -87,8 +87,6 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
||||
else:
|
||||
0.0
|
||||
|
||||
{.pop.}
|
||||
|
||||
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
await g.switch.disconnect(peer.peerId)
|
||||
@@ -240,53 +238,52 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
||||
trace "running scoring heartbeat", instance = cast[int](g)
|
||||
g.updateScores()
|
||||
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
|
||||
let uselessAppBytesNum = msg.data.len
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
# discard g.disconnectPeer(peer)
|
||||
# debug "Peer disconnected", peer, uselessAppBytesNum
|
||||
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
let topic = msg.topic
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
for tt in msg.topicIds:
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
|
||||
let tt = t
|
||||
# update stats
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.mgetOrPut(tt, TopicInfo()).invalidMessageDeliveries += 1
|
||||
# update stats
|
||||
g.withPeerStats(peer.peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos.mgetOrPut(topic, TopicInfo()).invalidMessageDeliveries += 1
|
||||
|
||||
proc addCapped*[T](stat: var T, diff, cap: T) =
|
||||
stat += min(diff, cap - stat)
|
||||
|
||||
proc rewardDelivered*(
|
||||
g: GossipSub, peer: PubSubPeer, topics: openArray[string], first: bool, delay = ZeroDuration) =
|
||||
for tt in topics:
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
first: bool,
|
||||
delay = ZeroDuration,
|
||||
) =
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
let tt = t
|
||||
let topicParams = g.topicParams.mgetOrPut(t, TopicParams.init())
|
||||
# if in mesh add more delivery score
|
||||
let topicParams = g.topicParams.mgetOrPut(topic, TopicParams.init())
|
||||
# if in mesh add more delivery score
|
||||
|
||||
if delay > topicParams.meshMessageDeliveriesWindow:
|
||||
# Too old
|
||||
continue
|
||||
if delay > topicParams.meshMessageDeliveriesWindow:
|
||||
# Too old
|
||||
return
|
||||
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(tt, tstats):
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(topic, tstats):
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
|
||||
if tstats[].inMesh:
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't loose this information
|
||||
stats.topicInfos[tt] = TopicInfo(meshMessageDeliveries: 1)
|
||||
if tstats[].inMesh:
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't lose this information
|
||||
stats.topicInfos[topic] = TopicInfo(meshMessageDeliveries: 1)
|
||||
|
||||
@@ -102,6 +102,11 @@ type
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
|
||||
GossipSubParams* {.public.} = object
|
||||
# explicit is used to check if the GossipSubParams instance was created by the user either passing params to GossipSubParams(...)
|
||||
# or GossipSubParams.init(...). In the first case explicit should be set to true when calling the Nim constructor.
|
||||
# In the second case, the param isn't necessary and should be always be set to true by init.
|
||||
# If none of those options were used, it means the instance was created using Nim default values.
|
||||
# In this case, GossipSubParams.init() should be called when initing GossipSub to set the values to their default value defined by nim-libp2p.
|
||||
explicit*: bool
|
||||
pruneBackoff*: Duration
|
||||
unsubscribeBackoff*: Duration
|
||||
@@ -143,13 +148,15 @@ type
|
||||
enablePX*: bool
|
||||
|
||||
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
||||
iwantTimeout*: Duration
|
||||
|
||||
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||
disconnectPeerAboveRateLimit*: bool
|
||||
|
||||
# Max number of elements allowed in the non-priority queue. When this limit has been reached, the peer will be disconnected.
|
||||
maxNumElementsInNonPriorityQueue*: int
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
|
||||
|
||||
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
|
||||
RoutingRecordsHandler* =
|
||||
@@ -165,8 +172,6 @@ type
|
||||
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
|
||||
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
|
||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
||||
control*: Table[string, ControlMessage] # pending control messages
|
||||
mcache*: MCache # messages cache
|
||||
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||
@@ -181,7 +186,6 @@ type
|
||||
routingRecordsHandler*: seq[RoutingRecordsHandler] # Callback for peer exchange
|
||||
|
||||
heartbeatEvents*: seq[AsyncEvent]
|
||||
outstandingIWANTs*: Table[MessageId, IWANTRequest]
|
||||
|
||||
MeshMetrics* = object
|
||||
# scratch buffers for metrics
|
||||
@@ -192,8 +196,3 @@ type
|
||||
lowPeersTopics*: int64 # npeers < dlow
|
||||
healthyPeersTopics*: int64 # npeers >= dlow
|
||||
underDoutTopics*: int64
|
||||
|
||||
IWANTRequest* = object
|
||||
messageId*: MessageId
|
||||
peer*: PubSubPeer
|
||||
timestamp*: Moment
|
||||
|
||||
@@ -9,52 +9,57 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, tables, options]
|
||||
import std/[sets, tables]
|
||||
import rpc/[messages]
|
||||
import results
|
||||
|
||||
export sets, tables, messages, options
|
||||
export sets, tables, messages, results
|
||||
|
||||
type
|
||||
CacheEntry* = object
|
||||
mid*: MessageId
|
||||
topicIds*: seq[string]
|
||||
msgId*: MessageId
|
||||
topic*: string
|
||||
|
||||
MCache* = object of RootObj
|
||||
msgs*: Table[MessageId, Message]
|
||||
history*: seq[seq[CacheEntry]]
|
||||
pos*: int
|
||||
windowSize*: Natural
|
||||
|
||||
func get*(c: MCache, mid: MessageId): Option[Message] =
|
||||
if mid in c.msgs:
|
||||
try: some(c.msgs[mid])
|
||||
func get*(c: MCache, msgId: MessageId): Opt[Message] =
|
||||
if msgId in c.msgs:
|
||||
try: Opt.some(c.msgs[msgId])
|
||||
except KeyError: raiseAssert "checked"
|
||||
else:
|
||||
none(Message)
|
||||
Opt.none(Message)
|
||||
|
||||
func contains*(c: MCache, mid: MessageId): bool =
|
||||
mid in c.msgs
|
||||
func contains*(c: MCache, msgId: MessageId): bool =
|
||||
msgId in c.msgs
|
||||
|
||||
func put*(c: var MCache, msgId: MessageId, msg: Message) =
|
||||
if not c.msgs.hasKeyOrPut(msgId, msg):
|
||||
# Only add cache entry if the message was not already in the cache
|
||||
c.history[0].add(CacheEntry(mid: msgId, topicIds: msg.topicIds))
|
||||
c.history[c.pos].add(CacheEntry(msgId: msgId, topic: msg.topic))
|
||||
|
||||
func window*(c: MCache, topic: string): HashSet[MessageId] =
|
||||
let
|
||||
len = min(c.windowSize, c.history.len)
|
||||
|
||||
for i in 0..<len:
|
||||
for entry in c.history[i]:
|
||||
for t in entry.topicIds:
|
||||
if t == topic:
|
||||
result.incl(entry.mid)
|
||||
break
|
||||
# Work backwards from `pos` in the circular buffer
|
||||
for entry in c.history[(c.pos + c.history.len - i) mod c.history.len]:
|
||||
if entry.topic == topic:
|
||||
result.incl(entry.msgId)
|
||||
|
||||
func shift*(c: var MCache) =
|
||||
for entry in c.history.pop():
|
||||
c.msgs.del(entry.mid)
|
||||
# Shift circular buffer to write to a new position, clearing it from past
|
||||
# iterations
|
||||
c.pos = (c.pos + 1) mod c.history.len
|
||||
|
||||
c.history.insert(@[])
|
||||
for entry in c.history[c.pos]:
|
||||
c.msgs.del(entry.msgId)
|
||||
|
||||
reset(c.history[c.pos])
|
||||
|
||||
func init*(T: type MCache, window, history: Natural): T =
|
||||
T(
|
||||
|
||||
@@ -30,7 +30,6 @@ import ./errors as pubsub_errors,
|
||||
../../errors,
|
||||
../../utility
|
||||
|
||||
import metrics
|
||||
import stew/results
|
||||
export results
|
||||
|
||||
@@ -138,18 +137,34 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [].} =
|
||||
## Attempt to send `msg` to remote peer
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||
peer.send(msg, p.anonymize)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg) {.raises: [].} =
|
||||
## Attempt to send `msg` to the given peers
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
let npeers = sendPeers.len.int64
|
||||
for sub in msg.subscriptions:
|
||||
@@ -165,28 +180,28 @@ proc broadcast*(
|
||||
libp2p_pubsub_broadcast_unsubscriptions.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
for smsg in msg.messages:
|
||||
for topic in smsg.topicIds:
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
|
||||
let topic = smsg.topic
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
msg.control.withValue(control):
|
||||
libp2p_pubsub_broadcast_iwant.inc(npeers * control.iwant.len.int64)
|
||||
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicId])
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicId])
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
@@ -195,19 +210,19 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg)
|
||||
p.send(peer, msg, isHighPriority)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
topics: openArray[string],
|
||||
subscribe: bool) =
|
||||
## send subscriptions to remote peer
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe))
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
|
||||
|
||||
for topic in topics:
|
||||
if subscribe:
|
||||
@@ -236,29 +251,27 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
||||
libp2p_pubsub_received_unsubscriptions.inc(labelValues = ["generic"])
|
||||
|
||||
for i in 0..<rpcMsg.messages.len():
|
||||
template smsg: untyped = rpcMsg.messages[i]
|
||||
for j in 0..<smsg.topicIds.len():
|
||||
template topic: untyped = smsg.topicIds[j]
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
|
||||
let topic = rpcMsg.messages[i].topic
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
|
||||
|
||||
rpcMsg.control.withValue(control):
|
||||
libp2p_pubsub_received_iwant.inc(control.iwant.len.int64)
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicId])
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_graft.inc(labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicId])
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_prune.inc(labelValues = ["generic"])
|
||||
|
||||
@@ -273,11 +286,14 @@ method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} = discard
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
|
||||
# Peer event is raised for the send connection in particular
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
of PubSubPeerEventKind.StreamOpened:
|
||||
if p.topics.len > 0:
|
||||
p.sendSubs(peer, toSeq(p.topics.keys), true)
|
||||
of PubSubPeerEventKind.Disconnected:
|
||||
of PubSubPeerEventKind.StreamClosed:
|
||||
discard
|
||||
of PubSubPeerEventKind.DisconnectionRequested:
|
||||
discard
|
||||
|
||||
|
||||
method getOrCreatePeer*(
|
||||
p: PubSub,
|
||||
@@ -501,7 +517,7 @@ method addValidator*(p: PubSub,
|
||||
## will be sent to `hook`. `hook` can return either `Accept`,
|
||||
## `Ignore` or `Reject` (which can descore the peer)
|
||||
for t in topic:
|
||||
trace "adding validator for topic", topicId = t
|
||||
trace "adding validator for topic", topic = t
|
||||
p.validators.mgetOrPut(t, HashSet[ValidatorHandler]()).incl(hook)
|
||||
|
||||
method removeValidator*(p: PubSub,
|
||||
@@ -516,13 +532,13 @@ method removeValidator*(p: PubSub,
|
||||
method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async, base.} =
|
||||
var pending: seq[Future[ValidationResult]]
|
||||
trace "about to validate message"
|
||||
for topic in message.topicIds:
|
||||
trace "looking for validators on topic", topicId = topic,
|
||||
registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topicId = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
let topic = message.topic
|
||||
trace "looking for validators on topic",
|
||||
topic = topic, registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topic = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
|
||||
result = ValidationResult.Accept
|
||||
let futs = await allFinished(pending)
|
||||
|
||||
@@ -28,10 +28,18 @@ logScope:
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
|
||||
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
|
||||
|
||||
const
|
||||
DefaultMaxNumElementsInNonPriorityQueue* = 1024
|
||||
BehaviourPenaltyFoNonPriorityQueueOverLimit = 0.0001 # this value is quite arbitrary and was found empirically
|
||||
# to result in a behaviourPenalty around [0.1, 0.2] when the score is updated.
|
||||
|
||||
type
|
||||
PeerRateLimitError* = object of CatchableError
|
||||
|
||||
@@ -40,8 +48,9 @@ type
|
||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
|
||||
PubSubPeerEventKind* {.pure.} = enum
|
||||
Connected
|
||||
Disconnected
|
||||
StreamOpened
|
||||
StreamClosed
|
||||
DisconnectionRequested # tells gossipsub that the transport connection to the peer should be closed
|
||||
|
||||
PubSubPeerEvent* = object
|
||||
kind*: PubSubPeerEventKind
|
||||
@@ -50,6 +59,14 @@ type
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
||||
|
||||
RpcMessageQueue* = ref object
|
||||
# Tracks async tasks for sending high-priority peer-published messages.
|
||||
sendPriorityQueue: Deque[Future[void]]
|
||||
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
|
||||
nonPriorityQueue: AsyncQueue[seq[byte]]
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
onEvent*: OnEvent # Connectivity updates for peer
|
||||
@@ -63,7 +80,10 @@ type
|
||||
|
||||
score*: float64
|
||||
sentIHaves*: Deque[HashSet[MessageId]]
|
||||
heDontWants*: Deque[HashSet[MessageId]]
|
||||
heDontWants*: Deque[HashSet[SaltedId]]
|
||||
## IDONTWANT contains unvalidated message id:s which may be long and/or
|
||||
## expensive to look up, so we apply the same salting to them as during
|
||||
## unvalidated message processing
|
||||
iHaveBudget*: int
|
||||
pingBudget*: int
|
||||
maxMessageSize: int
|
||||
@@ -71,6 +91,9 @@ type
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||
|
||||
rpcmessagequeue: RpcMessageQueue
|
||||
maxNumElementsInNonPriorityQueue*: int # The max number of elements allowed in the non-priority queue.
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
@@ -83,6 +106,16 @@ when defined(libp2p_agents_metrics):
|
||||
#so we have to read the parents short agent..
|
||||
p.sendConn.getWrapped().shortAgent
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
|
||||
func hash*(p: PubSubPeer): Hash =
|
||||
p.peerId.hash
|
||||
|
||||
@@ -138,12 +171,6 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
conn, peer = p, closed = conn.closed,
|
||||
data = data.shortLog
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rmsg.messages:
|
||||
for t in m.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
except PeerRateLimitError as exc:
|
||||
@@ -164,6 +191,24 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
debug "exiting pubsub read loop",
|
||||
conn, peer = p, closed = conn.closed
|
||||
|
||||
proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
|
||||
if p.sendConn != nil:
|
||||
debug "Removing send connection", p, conn = p.sendConn
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: event))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Errors during diconnection events", error = exc.msg
|
||||
# don't cleanup p.address else we leak some gossip stat table
|
||||
|
||||
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
try:
|
||||
if p.connectedFut.finished:
|
||||
@@ -176,7 +221,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
# stop working so we make an effort to only keep a single channel alive
|
||||
|
||||
trace "Get new send connection", p, newConn
|
||||
debug "Get new send connection", p, newConn
|
||||
|
||||
# Careful to race conditions here.
|
||||
# Topic subscription relies on either connectedFut
|
||||
@@ -186,37 +231,21 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
|
||||
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.StreamOpened))
|
||||
|
||||
await handle(p, newConn)
|
||||
finally:
|
||||
if p.sendConn != nil:
|
||||
trace "Removing send connection", p, conn = p.sendConn
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
await p.closeSendConn(PubSubPeerEventKind.StreamClosed)
|
||||
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Errors during diconnection events", error = exc.msg
|
||||
|
||||
# don't cleanup p.address else we leak some gossip stat table
|
||||
|
||||
proc connectImpl(p: PubSubPeer) {.async.} =
|
||||
proc connectImpl(peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
# Keep trying to establish a connection while it's possible to do so - the
|
||||
# send connection might get disconnected due to a timeout or an unrelated
|
||||
# issue so we try to get a new on
|
||||
while true:
|
||||
await connectOnce(p)
|
||||
await connectOnce(peer)
|
||||
except CatchableError as exc: # never cancelled
|
||||
debug "Could not establish send connection", msg = exc.msg
|
||||
debug "Could not establish send connection", peer, msg = exc.msg
|
||||
|
||||
proc connect*(p: PubSubPeer) =
|
||||
if p.connected:
|
||||
@@ -230,35 +259,37 @@ proc hasSendConn*(p: PubSubPeer): bool =
|
||||
template sendMetrics(msg: RPCMsg): untyped =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for x in msg.messages:
|
||||
for t in x.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, x.topic])
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
proc clearSendPriorityQueue(p: PubSubPeer) =
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len == 0:
|
||||
return # fast path
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[0].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
|
||||
|
||||
if msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[^1].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popLast()
|
||||
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
await p.connectedFut
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(
|
||||
value = p.rpcmessagequeue.sendPriorityQueue.len.int64,
|
||||
labelValues = [$p.peerId])
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
return
|
||||
proc clearNonPriorityQueue*(p: PubSubPeer) =
|
||||
if len(p.rpcmessagequeue.nonPriorityQueue) > 0:
|
||||
p.rpcmessagequeue.nonPriorityQueue.clear()
|
||||
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
|
||||
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
|
||||
# Continuation for a pending `sendMsg` future from below
|
||||
try:
|
||||
await conn.writeLp(msg)
|
||||
await msgFut
|
||||
trace "sent pubsub message to remote", conn
|
||||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
@@ -269,6 +300,80 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||
# Slow path of `sendMsg` where msg is held in memory while send connection is
|
||||
# being set up
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
discard await race(p.connectedFut)
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
|
||||
if p.sendConn != nil and not p.sendConn.closed():
|
||||
# Fast path that avoids copying msg (which happens for {.async.})
|
||||
let conn = p.sendConn
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
let f = conn.writeLp(msg)
|
||||
if not f.completed():
|
||||
sendMsgContinue(conn, f)
|
||||
else:
|
||||
f
|
||||
else:
|
||||
sendMsgSlow(p, msg)
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
p.clearSendPriorityQueue()
|
||||
|
||||
# When queues are empty, skipping the non-priority queue for low priority
|
||||
# messages reduces latency
|
||||
let emptyQueues =
|
||||
(p.rpcmessagequeue.sendPriorityQueue.len() +
|
||||
p.rpcmessagequeue.nonPriorityQueue.len()) == 0
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", peer = p, msg = shortLog(msg)
|
||||
Future[void].completed()
|
||||
elif msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
Future[void].completed()
|
||||
elif isHighPriority or emptyQueues:
|
||||
let f = p.sendMsg(msg)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
else:
|
||||
if len(p.rpcmessagequeue.nonPriorityQueue) >= p.maxNumElementsInNonPriorityQueue:
|
||||
p.behaviourPenalty += BehaviourPenaltyFoNonPriorityQueueOverLimit
|
||||
trace "Peer has reached maxNumElementsInNonPriorityQueue. Discarding message and applying behaviour penalty.", peer = p, score = p.score,
|
||||
behaviourPenalty = p.behaviourPenalty, agent = p.getAgent()
|
||||
Future[void].completed()
|
||||
else:
|
||||
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
|
||||
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
||||
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
||||
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
||||
@@ -304,7 +409,16 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
|
||||
else:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.raises: [].} =
|
||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The `RPCMsg` instance representing the message to be sent.
|
||||
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
|
||||
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
# When sending messages, we take care to re-encode them with the right
|
||||
# anonymization flag to ensure that we're not penalized for sending invalid
|
||||
# or malicious data on the wire - in particular, re-encoding protects against
|
||||
@@ -324,11 +438,11 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
|
||||
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg)
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
asyncSpawn p.sendEncoded(encoded)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -337,6 +451,45 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
return true
|
||||
return false
|
||||
|
||||
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
|
||||
while true:
|
||||
# we send non-priority messages only if there are no pending priority messages
|
||||
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
p.clearSendPriorityQueue()
|
||||
# waiting for the last future minimizes the number of times we have to
|
||||
# wait for something (each wait = performance cost) -
|
||||
# clearSendPriorityQueue ensures we're not waiting for an already-finished
|
||||
# future
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
# `race` prevents `p.rpcmessagequeue.sendPriorityQueue[^1]` from being
|
||||
# cancelled when this task is cancelled
|
||||
discard await race(p.rpcmessagequeue.sendPriorityQueue[^1])
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
|
||||
await p.sendMsg(msg)
|
||||
|
||||
proc startSendNonPriorityTask(p: PubSubPeer) =
|
||||
debug "starting sendNonPriorityTask", p
|
||||
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
|
||||
|
||||
proc stopSendNonPriorityTask*(p: PubSubPeer) =
|
||||
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
debug "stopping sendNonPriorityTask", p
|
||||
p.rpcmessagequeue.sendNonPriorityTask.cancelSoon()
|
||||
p.rpcmessagequeue.sendNonPriorityTask = nil
|
||||
p.rpcmessagequeue.sendPriorityQueue.clear()
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
p.clearNonPriorityQueue()
|
||||
|
||||
proc new(T: typedesc[RpcMessageQueue]): T =
|
||||
return T(
|
||||
sendPriorityQueue: initDeque[Future[void]](),
|
||||
nonPriorityQueue: newAsyncQueue[seq[byte]]()
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
peerId: PeerId,
|
||||
@@ -344,6 +497,7 @@ proc new*(
|
||||
onEvent: OnEvent,
|
||||
codec: string,
|
||||
maxMessageSize: int,
|
||||
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
|
||||
|
||||
result = T(
|
||||
@@ -353,17 +507,10 @@ proc new*(
|
||||
peerId: peerId,
|
||||
connectedFut: newFuture[void](),
|
||||
maxMessageSize: maxMessageSize,
|
||||
overheadRateLimitOpt: overheadRateLimitOpt
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(),
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
result.heDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
result.startSendNonPriorityTask()
|
||||
|
||||
@@ -63,7 +63,7 @@ proc init*(
|
||||
seqno: Option[uint64],
|
||||
sign: bool = true): Message
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
var msg = Message(data: data, topic: topic)
|
||||
|
||||
# order matters, we want to include seqno in the signature
|
||||
seqno.withValue(seqn):
|
||||
@@ -87,7 +87,7 @@ proc init*(
|
||||
topic: string,
|
||||
seqno: Option[uint64]): Message
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
var msg = Message(data: data, topic: topic)
|
||||
msg.fromPeer = peerId
|
||||
|
||||
seqno.withValue(seqn):
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils, sugar
|
||||
import "../../.."/[
|
||||
import options, sequtils
|
||||
import ../../../[
|
||||
peerid,
|
||||
routing_record,
|
||||
utility
|
||||
@@ -27,52 +27,58 @@ proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises
|
||||
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
|
||||
|
||||
type
|
||||
PeerInfoMsg* = object
|
||||
peerId*: PeerId
|
||||
signedPeerRecord*: seq[byte]
|
||||
PeerInfoMsg* = object
|
||||
peerId*: PeerId
|
||||
signedPeerRecord*: seq[byte]
|
||||
|
||||
SubOpts* = object
|
||||
subscribe*: bool
|
||||
topic*: string
|
||||
SubOpts* = object
|
||||
subscribe*: bool
|
||||
topic*: string
|
||||
|
||||
MessageId* = seq[byte]
|
||||
MessageId* = seq[byte]
|
||||
|
||||
Message* = object
|
||||
fromPeer*: PeerId
|
||||
data*: seq[byte]
|
||||
seqno*: seq[byte]
|
||||
topicIds*: seq[string]
|
||||
signature*: seq[byte]
|
||||
key*: seq[byte]
|
||||
SaltedId* = object
|
||||
# Salted hash of message ID - used instead of the ordinary message ID to
|
||||
# avoid hash poisoning attacks and to make memory usage more predictable
|
||||
# with respect to the variable-length message id
|
||||
data*: MDigest[256]
|
||||
|
||||
ControlMessage* = object
|
||||
ihave*: seq[ControlIHave]
|
||||
iwant*: seq[ControlIWant]
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
idontwant*: seq[ControlIWant]
|
||||
Message* = object
|
||||
fromPeer*: PeerId
|
||||
data*: seq[byte]
|
||||
seqno*: seq[byte]
|
||||
topic*: string
|
||||
signature*: seq[byte]
|
||||
key*: seq[byte]
|
||||
|
||||
ControlIHave* = object
|
||||
topicId*: string
|
||||
messageIds*: seq[MessageId]
|
||||
ControlMessage* = object
|
||||
ihave*: seq[ControlIHave]
|
||||
iwant*: seq[ControlIWant]
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
idontwant*: seq[ControlIWant]
|
||||
|
||||
ControlIWant* = object
|
||||
messageIds*: seq[MessageId]
|
||||
ControlIHave* = object
|
||||
topicID*: string
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlGraft* = object
|
||||
topicId*: string
|
||||
ControlIWant* = object
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlPrune* = object
|
||||
topicId*: string
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
ControlGraft* = object
|
||||
topicID*: string
|
||||
|
||||
RPCMsg* = object
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
control*: Option[ControlMessage]
|
||||
ping*: seq[byte]
|
||||
pong*: seq[byte]
|
||||
ControlPrune* = object
|
||||
topicID*: string
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
|
||||
RPCMsg* = object
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
control*: Option[ControlMessage]
|
||||
ping*: seq[byte]
|
||||
pong*: seq[byte]
|
||||
|
||||
func withSubs*(
|
||||
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
|
||||
@@ -81,23 +87,23 @@ func withSubs*(
|
||||
|
||||
func shortLog*(s: ControlIHave): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog,
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
topic: s.topicID.shortLog,
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlIWant): auto =
|
||||
(
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlGraft): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog
|
||||
topic: s.topicID.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlPrune): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog
|
||||
topic: s.topicID.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(c: ControlMessage): auto =
|
||||
@@ -113,7 +119,7 @@ func shortLog*(msg: Message): auto =
|
||||
fromPeer: msg.fromPeer.shortLog,
|
||||
data: msg.data.shortLog,
|
||||
seqno: msg.seqno.shortLog,
|
||||
topicIds: $msg.topicIds,
|
||||
topic: msg.topic,
|
||||
signature: msg.signature.shortLog,
|
||||
key: msg.key.shortLog
|
||||
)
|
||||
@@ -133,35 +139,35 @@ static: expectedFields(SubOpts, @["subscribe", "topic"])
|
||||
proc byteSize(subOpts: SubOpts): int =
|
||||
1 + subOpts.topic.len # 1 byte for the bool
|
||||
|
||||
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topicIds", "signature", "key"])
|
||||
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topic", "signature", "key"])
|
||||
proc byteSize*(msg: Message): int =
|
||||
msg.fromPeer.len + msg.data.len + msg.seqno.len +
|
||||
msg.signature.len + msg.key.len + msg.topicIds.foldl(a + b.len, 0)
|
||||
msg.fromPeer.len + msg.data.len + msg.seqno.len + msg.signature.len + msg.key.len +
|
||||
msg.topic.len
|
||||
|
||||
proc byteSize*(msgs: seq[Message]): int =
|
||||
msgs.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIHave, @["topicId", "messageIds"])
|
||||
static: expectedFields(ControlIHave, @["topicID", "messageIDs"])
|
||||
proc byteSize(controlIHave: ControlIHave): int =
|
||||
controlIHave.topicId.len + controlIHave.messageIds.foldl(a + b.len, 0)
|
||||
controlIHave.topicID.len + controlIHave.messageIDs.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(ihaves: seq[ControlIHave]): int =
|
||||
ihaves.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIWant, @["messageIds"])
|
||||
static: expectedFields(ControlIWant, @["messageIDs"])
|
||||
proc byteSize(controlIWant: ControlIWant): int =
|
||||
controlIWant.messageIds.foldl(a + b.len, 0)
|
||||
controlIWant.messageIDs.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(iwants: seq[ControlIWant]): int =
|
||||
iwants.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlGraft, @["topicId"])
|
||||
static: expectedFields(ControlGraft, @["topicID"])
|
||||
proc byteSize(controlGraft: ControlGraft): int =
|
||||
controlGraft.topicId.len
|
||||
controlGraft.topicID.len
|
||||
|
||||
static: expectedFields(ControlPrune, @["topicId", "peers", "backoff"])
|
||||
static: expectedFields(ControlPrune, @["topicID", "peers", "backoff"])
|
||||
proc byteSize(controlPrune: ControlPrune): int =
|
||||
controlPrune.topicId.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
|
||||
controlPrune.topicID.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
|
||||
|
||||
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
|
||||
@@ -29,7 +29,7 @@ when defined(libp2p_protobuf_metrics):
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, graft.topicId)
|
||||
ipb.write(1, graft.topicID)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
@@ -45,7 +45,7 @@ proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, prune.topicId)
|
||||
ipb.write(1, prune.topicID)
|
||||
for peer in prune.peers:
|
||||
ipb.write(2, peer)
|
||||
ipb.write(3, prune.backoff)
|
||||
@@ -57,8 +57,8 @@ proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, ihave.topicId)
|
||||
for mid in ihave.messageIds:
|
||||
ipb.write(1, ihave.topicID)
|
||||
for mid in ihave.messageIDs:
|
||||
ipb.write(2, mid)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
@@ -68,7 +68,7 @@ proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, iwant: ControlIWant) =
|
||||
var ipb = initProtoBuffer()
|
||||
for mid in iwant.messageIds:
|
||||
for mid in iwant.messageIDs:
|
||||
ipb.write(1, mid)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
@@ -110,8 +110,7 @@ proc encodeMessage*(msg: Message, anonymize: bool): seq[byte] =
|
||||
pb.write(2, msg.data)
|
||||
if len(msg.seqno) > 0 and not anonymize:
|
||||
pb.write(3, msg.seqno)
|
||||
for topic in msg.topicIds:
|
||||
pb.write(4, topic)
|
||||
pb.write(4, msg.topic)
|
||||
if len(msg.signature) > 0 and not anonymize:
|
||||
pb.write(5, msg.signature)
|
||||
if len(msg.key) > 0 and not anonymize:
|
||||
@@ -133,10 +132,10 @@ proc decodeGraft*(pb: ProtoBuffer): ProtoResult[ControlGraft] {.
|
||||
|
||||
trace "decodeGraft: decoding message"
|
||||
var control = ControlGraft()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodeGraft: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodeGraft: read topicID", topicID = control.topicID
|
||||
else:
|
||||
trace "decodeGraft: topicId is missing"
|
||||
trace "decodeGraft: topicID is missing"
|
||||
ok(control)
|
||||
|
||||
proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.
|
||||
@@ -160,10 +159,10 @@ proc decodePrune*(pb: ProtoBuffer): ProtoResult[ControlPrune] {.
|
||||
|
||||
trace "decodePrune: decoding message"
|
||||
var control = ControlPrune()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodePrune: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodePrune: read topicID", topic = control.topicID
|
||||
else:
|
||||
trace "decodePrune: topicId is missing"
|
||||
trace "decodePrune: topicID is missing"
|
||||
var bpeers: seq[seq[byte]]
|
||||
if ? pb.getRepeatedField(2, bpeers):
|
||||
for bpeer in bpeers:
|
||||
@@ -179,12 +178,12 @@ proc decodeIHave*(pb: ProtoBuffer): ProtoResult[ControlIHave] {.
|
||||
|
||||
trace "decodeIHave: decoding message"
|
||||
var control = ControlIHave()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodeIHave: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodeIHave: read topicID", topic = control.topicID
|
||||
else:
|
||||
trace "decodeIHave: topicId is missing"
|
||||
if ? pb.getRepeatedField(2, control.messageIds):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIds
|
||||
trace "decodeIHave: topicID is missing"
|
||||
if ? pb.getRepeatedField(2, control.messageIDs):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIDs
|
||||
else:
|
||||
trace "decodeIHave: no messageIDs"
|
||||
ok(control)
|
||||
@@ -195,8 +194,8 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
|
||||
|
||||
trace "decodeIWant: decoding message"
|
||||
var control = ControlIWant()
|
||||
if ? pb.getRepeatedField(1, control.messageIds):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIds
|
||||
if ? pb.getRepeatedField(1, control.messageIDs):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIDs
|
||||
else:
|
||||
trace "decodeIWant: no messageIDs"
|
||||
ok(control)
|
||||
@@ -286,10 +285,11 @@ proc decodeMessage*(pb: ProtoBuffer): ProtoResult[Message] {.inline.} =
|
||||
trace "decodeMessage: read seqno", seqno = msg.seqno
|
||||
else:
|
||||
trace "decodeMessage: seqno is missing"
|
||||
if ? pb.getRepeatedField(4, msg.topicIds):
|
||||
trace "decodeMessage: read topics", topic_ids = msg.topicIds
|
||||
if ?pb.getField(4, msg.topic):
|
||||
trace "decodeMessage: read topic", topic = msg.topic
|
||||
else:
|
||||
trace "decodeMessage: topics are missing"
|
||||
trace "decodeMessage: topic is required"
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
if ? pb.getField(5, msg.signature):
|
||||
trace "decodeMessage: read signature", signature = msg.signature.shortLog()
|
||||
else:
|
||||
|
||||
@@ -9,12 +9,13 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables]
|
||||
|
||||
import std/[hashes, sets]
|
||||
import chronos/timer, stew/results
|
||||
|
||||
import ../../utility
|
||||
|
||||
export results
|
||||
|
||||
const Timeout* = 10.seconds # default timeout in ms
|
||||
|
||||
type
|
||||
@@ -26,20 +27,38 @@ type
|
||||
|
||||
TimedCache*[K] = object of RootObj
|
||||
head, tail: TimedEntry[K] # nim linked list doesn't allow inserting at pos
|
||||
entries: Table[K, TimedEntry[K]]
|
||||
entries: HashSet[TimedEntry[K]]
|
||||
timeout: Duration
|
||||
|
||||
func `==`*[E](a, b: TimedEntry[E]): bool =
|
||||
if isNil(a) == isNil(b):
|
||||
isNil(a) or a.key == b.key
|
||||
else:
|
||||
false
|
||||
|
||||
func hash*(a: TimedEntry): Hash =
|
||||
if isNil(a):
|
||||
default(Hash)
|
||||
else:
|
||||
hash(a[].key)
|
||||
|
||||
func expire*(t: var TimedCache, now: Moment = Moment.now()) =
|
||||
while t.head != nil and t.head.expiresAt < now:
|
||||
t.entries.del(t.head.key)
|
||||
t.entries.excl(t.head)
|
||||
t.head.prev = nil
|
||||
t.head = t.head.next
|
||||
if t.head == nil: t.tail = nil
|
||||
|
||||
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
|
||||
# Removes existing key from cache, returning the previous value if present
|
||||
var item: TimedEntry[K]
|
||||
if t.entries.pop(key, item):
|
||||
let tmp = TimedEntry[K](key: key)
|
||||
if tmp in t.entries:
|
||||
let item = try:
|
||||
t.entries[tmp] # use the shared instance in the set
|
||||
except KeyError:
|
||||
raiseAssert "just checked"
|
||||
t.entries.excl(item)
|
||||
|
||||
if t.head == item: t.head = item.next
|
||||
if t.tail == item: t.tail = item.prev
|
||||
|
||||
@@ -55,14 +74,14 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
# refreshed.
|
||||
t.expire(now)
|
||||
|
||||
var previous = t.del(k) # Refresh existing item
|
||||
|
||||
var addedAt = now
|
||||
previous.withValue(previous):
|
||||
addedAt = previous.addedAt
|
||||
let
|
||||
previous = t.del(k) # Refresh existing item
|
||||
addedAt = if previous.isSome():
|
||||
previous[].addedAt
|
||||
else:
|
||||
now
|
||||
|
||||
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
|
||||
|
||||
if t.head == nil:
|
||||
t.tail = node
|
||||
t.head = t.tail
|
||||
@@ -83,16 +102,24 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
if cur == t.tail:
|
||||
t.tail = node
|
||||
|
||||
t.entries[k] = node
|
||||
t.entries.incl(node)
|
||||
|
||||
previous.isSome()
|
||||
|
||||
func contains*[K](t: TimedCache[K], k: K): bool =
|
||||
k in t.entries
|
||||
let tmp = TimedEntry[K](key: k)
|
||||
tmp in t.entries
|
||||
|
||||
func addedAt*[K](t: TimedCache[K], k: K): Moment =
|
||||
t.entries.getOrDefault(k).addedAt
|
||||
func addedAt*[K](t: var TimedCache[K], k: K): Moment =
|
||||
let tmp = TimedEntry[K](key: k)
|
||||
try:
|
||||
if tmp in t.entries: # raising is slow
|
||||
# Use shared instance from entries
|
||||
return t.entries[tmp][].addedAt
|
||||
except KeyError:
|
||||
raiseAssert "just checked"
|
||||
|
||||
default(Moment)
|
||||
|
||||
func init*[K](T: type TimedCache[K], timeout: Duration = Timeout): T =
|
||||
T(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
|
||||
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||
)
|
||||
logScope: topics = "libp2p discovery rendezvous"
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
@@ -678,17 +678,25 @@ proc deletesRegister(rdv: RendezVous) {.async.} =
|
||||
libp2p_rendezvous_registered.set(int64(total))
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
|
||||
method start*(rdv: RendezVous) {.async.} =
|
||||
method start*(
|
||||
rdv: RendezVous
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not rdv.registerDeletionLoop.isNil:
|
||||
warn "Starting rendezvous twice"
|
||||
return
|
||||
return fut
|
||||
rdv.registerDeletionLoop = rdv.deletesRegister()
|
||||
rdv.started = true
|
||||
fut
|
||||
|
||||
method stop*(rdv: RendezVous) {.async.} =
|
||||
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if rdv.registerDeletionLoop.isNil:
|
||||
warn "Stopping rendezvous without starting it"
|
||||
return
|
||||
return fut
|
||||
rdv.started = false
|
||||
rdv.registerDeletionLoop.cancel()
|
||||
rdv.registerDeletionLoop = nil
|
||||
fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -89,7 +89,7 @@ type
|
||||
readCs: CipherState
|
||||
writeCs: CipherState
|
||||
|
||||
NoiseError* = object of LPError
|
||||
NoiseError* = object of LPStreamError
|
||||
NoiseHandshakeError* = object of NoiseError
|
||||
NoiseDecryptTagError* = object of NoiseError
|
||||
NoiseOversizedPayloadError* = object of NoiseError
|
||||
@@ -99,10 +99,10 @@ type
|
||||
|
||||
func shortLog*(conn: NoiseConnection): auto =
|
||||
try:
|
||||
if conn.isNil: "NoiseConnection(nil)"
|
||||
if conn == nil: "NoiseConnection(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(NoiseConnection): shortLog(it)
|
||||
|
||||
@@ -112,7 +112,7 @@ proc genKeyPair(rng: var HmacDrbgContext): KeyPair =
|
||||
|
||||
proc hashProtocol(name: string): MDigest[256] =
|
||||
# If protocol_name is less than or equal to HASHLEN bytes in length,
|
||||
# sets h equal to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# sets h to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# Otherwise sets h = HASH(protocol_name).
|
||||
|
||||
if name.len <= 32:
|
||||
@@ -142,7 +142,7 @@ proc encrypt(
|
||||
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
@@ -168,10 +168,11 @@ proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
trace "decryptWithAd", tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
|
||||
if tagIn != tagOut:
|
||||
debug "decryptWithAd failed", data = shortLog(data)
|
||||
raise newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.")
|
||||
raise (ref NoiseDecryptTagError)(msg:
|
||||
"decryptWithAd failed tag authentication.")
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
# Symmetricstate
|
||||
|
||||
@@ -181,8 +182,7 @@ proc init(_: type[SymmetricState]): SymmetricState =
|
||||
result.cs = CipherState(k: EmptyKey)
|
||||
|
||||
proc mixKey(ss: var SymmetricState, ikm: ChaChaPolyKey) =
|
||||
var
|
||||
temp_keys: array[2, ChaChaPolyKey]
|
||||
var temp_keys: array[2, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.cs = CipherState(k: temp_keys[1])
|
||||
@@ -198,8 +198,7 @@ proc mixHash(ss: var SymmetricState, data: openArray[byte]) =
|
||||
|
||||
# We might use this for other handshake patterns/tokens
|
||||
proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
|
||||
var
|
||||
temp_keys: array[3, ChaChaPolyKey]
|
||||
var temp_keys: array[3, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.mixHash(temp_keys[1])
|
||||
@@ -234,7 +233,8 @@ proc init(_: type[HandshakeState]): HandshakeState =
|
||||
|
||||
template write_e: untyped =
|
||||
trace "noise write e"
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR(). Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR().
|
||||
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
hs.e = genKeyPair(p.rng[])
|
||||
msg.add hs.e.publicKey
|
||||
hs.ss.mixHash(hs.e.publicKey)
|
||||
@@ -275,33 +275,37 @@ template read_e: untyped =
|
||||
trace "noise read e", size = msg.len
|
||||
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise E, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise E, expected more data")
|
||||
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message. Calls MixHash(re.public_key).
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message.
|
||||
# Calls MixHash(re.public_key).
|
||||
hs.re[0..Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
|
||||
msg.consume(Curve25519Key.len)
|
||||
hs.ss.mixHash(hs.re)
|
||||
|
||||
template read_s: untyped =
|
||||
trace "noise read s", size = msg.len
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True, or to the next DHLEN bytes otherwise.
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
|
||||
# or to the next DHLEN bytes otherwise.
|
||||
# Sets rs (which must be empty) to DecryptAndHash(temp).
|
||||
let
|
||||
rsLen =
|
||||
if hs.ss.cs.hasKey:
|
||||
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len + ChaChaPolyTag.len
|
||||
else:
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len
|
||||
hs.rs[0..Curve25519Key.high] =
|
||||
hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
|
||||
|
||||
msg.consume(rsLen)
|
||||
|
||||
proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
proc readFrame(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var besize {.noinit.}: array[2, byte]
|
||||
await sconn.readExactly(addr besize[0], besize.len)
|
||||
let size = uint16.fromBytesBE(besize).int
|
||||
@@ -313,7 +317,11 @@ proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
await sconn.readExactly(addr buffer[0], buffer.len)
|
||||
return buffer
|
||||
|
||||
proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc writeFrame(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
doAssert buf.len <= uint16.high.int
|
||||
var
|
||||
lesize = buf.len.uint16
|
||||
@@ -324,13 +332,24 @@ proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
outbuf &= buf
|
||||
sconn.write(outbuf)
|
||||
|
||||
proc receiveHSMessage(sconn: Connection): Future[seq[byte]] = readFrame(sconn)
|
||||
proc sendHSMessage(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc receiveHSMessage(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
readFrame(sconn)
|
||||
|
||||
proc sendHSMessage(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
writeFrame(sconn, buf)
|
||||
|
||||
proc handshakeXXOutbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = true
|
||||
var
|
||||
hs = HandshakeState.init()
|
||||
@@ -372,13 +391,16 @@ proc handshakeXXOutbound(
|
||||
await conn.sendHSMessage(msg.data)
|
||||
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
proc handshakeXXInbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = false
|
||||
|
||||
var
|
||||
@@ -422,11 +444,14 @@ proc handshakeXXInbound(
|
||||
let
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
(cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
|
||||
method readMessage*(
|
||||
sconn: NoiseConnection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
while true: # Discard 0-length payloads
|
||||
let frame = await sconn.stream.readFrame()
|
||||
sconn.activity = true
|
||||
@@ -458,7 +483,11 @@ proc encryptFrame(
|
||||
|
||||
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
|
||||
|
||||
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
sconn: NoiseConnection,
|
||||
message: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
# Fast path: `{.async.}` would introduce a copy of `message`
|
||||
const FramingSize = 2 + sizeof(ChaChaPolyTag)
|
||||
|
||||
@@ -478,7 +507,8 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
try:
|
||||
encryptFrame(
|
||||
sconn,
|
||||
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
|
||||
cipherFrames.toOpenArray(
|
||||
woffset, woffset + chunkSize + FramingSize - 1),
|
||||
message.toOpenArray(offset, offset + chunkSize - 1))
|
||||
except NoiseNonceMaxError as exc:
|
||||
debug "Noise nonce exceeded"
|
||||
@@ -501,21 +531,28 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
# sequencing issues
|
||||
sconn.stream.write(cipherFrames)
|
||||
|
||||
method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
method handshake*(
|
||||
p: Noise,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Starting Noise handshake", conn, initiator
|
||||
|
||||
let timeout = conn.timeout
|
||||
conn.timeout = HandshakeTimeout
|
||||
|
||||
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
|
||||
let
|
||||
signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes).tryGet()
|
||||
let signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes)
|
||||
if signedPayload.isErr():
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to sign public key: " & $signedPayload.error())
|
||||
|
||||
var
|
||||
libp2pProof = initProtoBuffer()
|
||||
libp2pProof.write(1, p.localPublicKey)
|
||||
libp2pProof.write(2, signedPayload.getBytes())
|
||||
libp2pProof.write(2, signedPayload.get().getBytes())
|
||||
# data field also there but not used!
|
||||
libp2pProof.finish()
|
||||
|
||||
@@ -534,29 +571,38 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
remoteSigBytes: seq[byte]
|
||||
|
||||
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote public key bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote signature bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
|
||||
if not remotePubKey.init(remotePubKeyBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
if not remoteSig.init(remoteSigBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
|
||||
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
|
||||
if not remoteSig.verify(verifyPayload, remotePubKey):
|
||||
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake signature verify failed.")
|
||||
else:
|
||||
trace "Remote signature verified", conn
|
||||
|
||||
let pid = PeerId.init(remotePubKey).valueOr:
|
||||
raise newException(NoiseHandshakeError, "Invalid remote peer id: " & $error)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Invalid remote peer id: " & $error)
|
||||
|
||||
trace "Remote peer id", pid = $pid
|
||||
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to validate expected peerId.")
|
||||
|
||||
if pid != targetPid:
|
||||
var
|
||||
@@ -566,7 +612,8 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
initiator, dealt_peer = conn,
|
||||
dealt_key = $failedKey, received_peer = $pid,
|
||||
received_key = $remotePubKey
|
||||
raise newException(NoiseHandshakeError, "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
conn.peerId = pid
|
||||
|
||||
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
||||
@@ -586,7 +633,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
return secure
|
||||
|
||||
method closeImpl*(s: NoiseConnection) {.async.} =
|
||||
method closeImpl*(s: NoiseConnection) {.async: (raises: []).} =
|
||||
await procCall SecureConn(s).closeImpl()
|
||||
|
||||
burnMem(s.readCs)
|
||||
@@ -597,15 +644,14 @@ method init*(p: Noise) {.gcsafe.} =
|
||||
p.codec = NoiseCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
let pkBytes = privateKey.getPublicKey()
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
|
||||
var noise = Noise(
|
||||
rng: rng,
|
||||
|
||||
@@ -19,7 +19,7 @@ type
|
||||
|
||||
method init(p: PlainText) {.gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string)
|
||||
{.async, gcsafe.} = discard
|
||||
{.async.} = discard
|
||||
## plain text doesn't do anything
|
||||
|
||||
p.codec = PlainTextCodec
|
||||
|
||||
@@ -1,444 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import bearssl/rand
|
||||
import chronos, chronicles, stew/endians2
|
||||
import nimcrypto/[hmac, sha2, sha, hash, rijndael, twofish, bcmode]
|
||||
import secure,
|
||||
../../stream/connection,
|
||||
../../peerinfo,
|
||||
../../crypto/crypto,
|
||||
../../crypto/ecnist,
|
||||
../../peerid,
|
||||
../../utility,
|
||||
../../errors
|
||||
|
||||
export hmac, sha2, sha, hash, rijndael, bcmode
|
||||
|
||||
logScope:
|
||||
topics = "libp2p secio"
|
||||
|
||||
const
|
||||
SecioCodec* = "/secio/1.0.0"
|
||||
SecioMaxMessageSize = 8 * 1024 * 1024 ## 8mb
|
||||
SecioMaxMacSize = sha512.sizeDigest
|
||||
SecioNonceSize = 16
|
||||
SecioExchanges = "P-256,P-384,P-521"
|
||||
SecioCiphers = "TwofishCTR,AES-256,AES-128"
|
||||
SecioHashes = "SHA256,SHA512"
|
||||
|
||||
type
|
||||
Secio* = ref object of Secure
|
||||
rng: ref HmacDrbgContext
|
||||
localPrivateKey: PrivateKey
|
||||
localPublicKey: PublicKey
|
||||
remotePublicKey: PublicKey
|
||||
|
||||
SecureCipherType {.pure.} = enum
|
||||
Aes128, Aes256, Twofish
|
||||
|
||||
SecureMacType {.pure.} = enum
|
||||
Sha1, Sha256, Sha512
|
||||
|
||||
SecureCipher = object
|
||||
case kind: SecureCipherType
|
||||
of SecureCipherType.Aes128:
|
||||
ctxaes128: CTR[aes128]
|
||||
of SecureCipherType.Aes256:
|
||||
ctxaes256: CTR[aes256]
|
||||
of SecureCipherType.Twofish:
|
||||
ctxtwofish256: CTR[twofish256]
|
||||
|
||||
SecureMac = object
|
||||
case kind: SecureMacType
|
||||
of SecureMacType.Sha256:
|
||||
ctxsha256: HMAC[sha256]
|
||||
of SecureMacType.Sha512:
|
||||
ctxsha512: HMAC[sha512]
|
||||
of SecureMacType.Sha1:
|
||||
ctxsha1: HMAC[sha1]
|
||||
|
||||
SecioConn = ref object of SecureConn
|
||||
writerMac: SecureMac
|
||||
readerMac: SecureMac
|
||||
writerCoder: SecureCipher
|
||||
readerCoder: SecureCipher
|
||||
|
||||
SecioError* = object of LPError
|
||||
|
||||
func shortLog*(conn: SecioConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecioConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
chronicles.formatIt(SecioConn): shortLog(it)
|
||||
|
||||
proc init(mac: var SecureMac, hash: string, key: openArray[byte]) =
|
||||
if hash == "SHA256":
|
||||
mac = SecureMac(kind: SecureMacType.Sha256)
|
||||
mac.ctxsha256.init(key)
|
||||
elif hash == "SHA512":
|
||||
mac = SecureMac(kind: SecureMacType.Sha512)
|
||||
mac.ctxsha512.init(key)
|
||||
elif hash == "SHA1":
|
||||
mac = SecureMac(kind: SecureMacType.Sha1)
|
||||
mac.ctxsha1.init(key)
|
||||
|
||||
proc update(mac: var SecureMac, data: openArray[byte]) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
update(mac.ctxsha256, data)
|
||||
of SecureMacType.Sha512:
|
||||
update(mac.ctxsha512, data)
|
||||
of SecureMacType.Sha1:
|
||||
update(mac.ctxsha1, data)
|
||||
|
||||
proc sizeDigest(mac: SecureMac): int {.inline.} =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
result = int(mac.ctxsha256.sizeDigest())
|
||||
of SecureMacType.Sha512:
|
||||
result = int(mac.ctxsha512.sizeDigest())
|
||||
of SecureMacType.Sha1:
|
||||
result = int(mac.ctxsha1.sizeDigest())
|
||||
|
||||
proc finish(mac: var SecureMac, data: var openArray[byte]) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
discard finish(mac.ctxsha256, data)
|
||||
of SecureMacType.Sha512:
|
||||
discard finish(mac.ctxsha512, data)
|
||||
of SecureMacType.Sha1:
|
||||
discard finish(mac.ctxsha1, data)
|
||||
|
||||
proc reset(mac: var SecureMac) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
reset(mac.ctxsha256)
|
||||
of SecureMacType.Sha512:
|
||||
reset(mac.ctxsha512)
|
||||
of SecureMacType.Sha1:
|
||||
reset(mac.ctxsha1)
|
||||
|
||||
proc init(sc: var SecureCipher, cipher: string, key: openArray[byte],
|
||||
iv: openArray[byte]) {.inline.} =
|
||||
if cipher == "AES-128":
|
||||
sc = SecureCipher(kind: SecureCipherType.Aes128)
|
||||
sc.ctxaes128.init(key, iv)
|
||||
elif cipher == "AES-256":
|
||||
sc = SecureCipher(kind: SecureCipherType.Aes256)
|
||||
sc.ctxaes256.init(key, iv)
|
||||
elif cipher == "TwofishCTR":
|
||||
sc = SecureCipher(kind: SecureCipherType.Twofish)
|
||||
sc.ctxtwofish256.init(key, iv)
|
||||
|
||||
proc encrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||
output: var openArray[byte]) {.inline.} =
|
||||
case cipher.kind
|
||||
of SecureCipherType.Aes128:
|
||||
cipher.ctxaes128.encrypt(input, output)
|
||||
of SecureCipherType.Aes256:
|
||||
cipher.ctxaes256.encrypt(input, output)
|
||||
of SecureCipherType.Twofish:
|
||||
cipher.ctxtwofish256.encrypt(input, output)
|
||||
|
||||
proc decrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||
output: var openArray[byte]) {.inline.} =
|
||||
case cipher.kind
|
||||
of SecureCipherType.Aes128:
|
||||
cipher.ctxaes128.decrypt(input, output)
|
||||
of SecureCipherType.Aes256:
|
||||
cipher.ctxaes256.decrypt(input, output)
|
||||
of SecureCipherType.Twofish:
|
||||
cipher.ctxtwofish256.decrypt(input, output)
|
||||
|
||||
proc macCheckAndDecode(sconn: SecioConn, data: var seq[byte]): bool =
|
||||
## This procedure checks MAC of recieved message ``data`` and if message is
|
||||
## authenticated, then decrypt message.
|
||||
##
|
||||
## Procedure returns ``false`` if message is too short or MAC verification
|
||||
## failed.
|
||||
var macData: array[SecioMaxMacSize, byte]
|
||||
let macsize = sconn.readerMac.sizeDigest()
|
||||
if len(data) < macsize:
|
||||
trace "Message is shorter then MAC size", message_length = len(data),
|
||||
mac_size = macsize
|
||||
return false
|
||||
let mark = len(data) - macsize
|
||||
sconn.readerMac.update(data.toOpenArray(0, mark - 1))
|
||||
sconn.readerMac.finish(macData)
|
||||
sconn.readerMac.reset()
|
||||
if not equalMem(addr data[mark], addr macData[0], macsize):
|
||||
trace "Invalid MAC",
|
||||
calculated = toHex(macData.toOpenArray(0, macsize - 1)),
|
||||
stored = toHex(data.toOpenArray(mark, data.high))
|
||||
return false
|
||||
|
||||
sconn.readerCoder.decrypt(data.toOpenArray(0, mark - 1),
|
||||
data.toOpenArray(0, mark - 1))
|
||||
data.setLen(mark)
|
||||
result = true
|
||||
|
||||
proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
|
||||
while true: # Discard 0-length payloads
|
||||
var lengthBuf: array[4, byte]
|
||||
await conn.readExactly(addr lengthBuf[0], lengthBuf.len)
|
||||
let length = uint32.fromBytesBE(lengthBuf)
|
||||
|
||||
trace "Recieved message header", header = lengthBuf.shortLog, length = length
|
||||
|
||||
if length > SecioMaxMessageSize: # Verify length before casting!
|
||||
trace "Received size of message exceed limits", conn, length = length
|
||||
raise (ref SecioError)(msg: "Message exceeds maximum length")
|
||||
|
||||
if length > 0:
|
||||
var buf = newSeq[byte](int(length))
|
||||
await conn.readExactly(addr buf[0], buf.len)
|
||||
trace "Received message body",
|
||||
conn, length = buf.len, buff = buf.shortLog
|
||||
return buf
|
||||
|
||||
trace "Discarding 0-length payload", conn
|
||||
|
||||
method readMessage*(sconn: SecioConn): Future[seq[byte]] {.async.} =
|
||||
## Read message from channel secure connection ``sconn``.
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
logScope:
|
||||
stream_oid = $sconn.stream.oid
|
||||
var buf = await sconn.stream.readRawMessage()
|
||||
if sconn.macCheckAndDecode(buf):
|
||||
result = buf
|
||||
else:
|
||||
trace "Message MAC verification failed", buf = buf.shortLog
|
||||
raise (ref SecioError)(msg: "message failed MAC verification")
|
||||
|
||||
method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
|
||||
## Write message ``message`` to secure connection ``sconn``.
|
||||
if message.len == 0:
|
||||
return
|
||||
|
||||
var
|
||||
left = message.len
|
||||
offset = 0
|
||||
while left > 0:
|
||||
let
|
||||
chunkSize = if left > SecioMaxMessageSize - 64: SecioMaxMessageSize - 64 else: left
|
||||
macsize = sconn.writerMac.sizeDigest()
|
||||
length = chunkSize + macsize
|
||||
|
||||
var msg = newSeq[byte](chunkSize + 4 + macsize)
|
||||
msg[0..<4] = uint32(length).toBytesBE()
|
||||
|
||||
sconn.writerCoder.encrypt(message.toOpenArray(offset, offset + chunkSize - 1),
|
||||
msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
left = left - chunkSize
|
||||
offset = offset + chunkSize
|
||||
let mo = 4 + chunkSize
|
||||
sconn.writerMac.update(msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
sconn.writerMac.finish(msg.toOpenArray(mo, mo + macsize - 1))
|
||||
sconn.writerMac.reset()
|
||||
|
||||
trace "Writing message", message = msg.shortLog, left, offset
|
||||
await sconn.stream.write(msg)
|
||||
sconn.activity = true
|
||||
|
||||
proc newSecioConn(conn: Connection,
|
||||
hash: string,
|
||||
cipher: string,
|
||||
secrets: Secret,
|
||||
order: int,
|
||||
remotePubKey: PublicKey): SecioConn
|
||||
{.raises: [LPError].} =
|
||||
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
|
||||
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
||||
## ``order``.
|
||||
|
||||
result = SecioConn.new(conn, conn.peerId, conn.observedAddr)
|
||||
|
||||
let i0 = if order < 0: 1 else: 0
|
||||
let i1 = if order < 0: 0 else: 1
|
||||
|
||||
trace "Writer credentials", mackey = secrets.macOpenArray(i0).shortLog,
|
||||
enckey = secrets.keyOpenArray(i0).shortLog,
|
||||
iv = secrets.ivOpenArray(i0).shortLog
|
||||
trace "Reader credentials", mackey = secrets.macOpenArray(i1).shortLog,
|
||||
enckey = secrets.keyOpenArray(i1).shortLog,
|
||||
iv = secrets.ivOpenArray(i1).shortLog
|
||||
result.writerMac.init(hash, secrets.macOpenArray(i0))
|
||||
result.readerMac.init(hash, secrets.macOpenArray(i1))
|
||||
result.writerCoder.init(cipher, secrets.keyOpenArray(i0),
|
||||
secrets.ivOpenArray(i0))
|
||||
result.readerCoder.init(cipher, secrets.keyOpenArray(i1),
|
||||
secrets.ivOpenArray(i1))
|
||||
|
||||
proc transactMessage(conn: Connection,
|
||||
msg: seq[byte]): Future[seq[byte]] {.async.} =
|
||||
trace "Sending message", message = msg.shortLog, length = len(msg)
|
||||
await conn.write(msg)
|
||||
return await conn.readRawMessage()
|
||||
|
||||
method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
var
|
||||
localNonce: array[SecioNonceSize, byte]
|
||||
remoteNonce: seq[byte]
|
||||
remoteBytesPubkey: seq[byte]
|
||||
remoteEBytesPubkey: seq[byte]
|
||||
remoteEBytesSig: seq[byte]
|
||||
remotePubkey: PublicKey
|
||||
remoteEPubkey: ecnist.EcPublicKey
|
||||
remoteESignature: Signature
|
||||
remoteExchanges: string
|
||||
remoteCiphers: string
|
||||
remoteHashes: string
|
||||
remotePeerId: PeerId
|
||||
localPeerId: PeerId
|
||||
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
|
||||
|
||||
hmacDrbgGenerate(s.rng[], localNonce)
|
||||
|
||||
var request = createProposal(localNonce,
|
||||
localBytesPubkey,
|
||||
SecioExchanges,
|
||||
SecioCiphers,
|
||||
SecioHashes)
|
||||
|
||||
localPeerId = PeerId.init(s.localPublicKey).tryGet()
|
||||
|
||||
trace "Local proposal", schemes = SecioExchanges,
|
||||
ciphers = SecioCiphers,
|
||||
hashes = SecioHashes,
|
||||
pubkey = localBytesPubkey.shortLog,
|
||||
peer = localPeerId
|
||||
|
||||
var answer = await transactMessage(conn, request)
|
||||
|
||||
if len(answer) == 0:
|
||||
trace "Proposal exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Proposal exchange failed")
|
||||
|
||||
if not decodeProposal(answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
|
||||
remoteCiphers, remoteHashes):
|
||||
trace "Remote proposal decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote proposal decoding failed")
|
||||
|
||||
if not remotePubkey.init(remoteBytesPubkey):
|
||||
trace "Remote public key incorrect or corrupted",
|
||||
pubkey = remoteBytesPubkey.shortLog
|
||||
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
|
||||
|
||||
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(SecioError, "Failed to validate expected peerId.")
|
||||
|
||||
if remotePeerId != targetPid:
|
||||
raise newException(SecioError, "Peer ids don't match!")
|
||||
conn.peerId = remotePeerId
|
||||
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
||||
remoteNonce).tryGet()
|
||||
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
|
||||
hashes = remoteHashes,
|
||||
pubkey = remoteBytesPubkey.shortLog, order = order,
|
||||
peer = remotePeerId
|
||||
|
||||
let scheme = selectBest(order, SecioExchanges, remoteExchanges)
|
||||
let cipher = selectBest(order, SecioCiphers, remoteCiphers)
|
||||
let hash = selectBest(order, SecioHashes, remoteHashes)
|
||||
if len(scheme) == 0 or len(cipher) == 0 or len(hash) == 0:
|
||||
trace "No algorithms in common", peer = remotePeerId
|
||||
raise (ref SecioError)(msg: "No algorithms in common")
|
||||
|
||||
trace "Encryption scheme selected", scheme = scheme, cipher = cipher,
|
||||
hash = hash
|
||||
|
||||
var ekeypair = ephemeral(scheme, s.rng[]).tryGet()
|
||||
# We need EC public key in raw binary form
|
||||
var epubkey = ekeypair.pubkey.getRawBytes().tryGet()
|
||||
var localCorpus = request[4..^1] & answer & epubkey
|
||||
var signature = s.localPrivateKey.sign(localCorpus).tryGet()
|
||||
|
||||
var localExchange = createExchange(epubkey, signature.getBytes())
|
||||
var remoteExchange = await transactMessage(conn, localExchange)
|
||||
if len(remoteExchange) == 0:
|
||||
trace "Corpus exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Corpus exchange failed")
|
||||
|
||||
if not decodeExchange(remoteExchange, remoteEBytesPubkey, remoteEBytesSig):
|
||||
trace "Remote exchange decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote exchange decoding failed")
|
||||
|
||||
if not remoteESignature.init(remoteEBytesSig):
|
||||
trace "Remote signature incorrect or corrupted", signature = remoteEBytesSig.shortLog
|
||||
raise (ref SecioError)(msg: "Remote signature incorrect or corrupted")
|
||||
|
||||
var remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
|
||||
if not remoteESignature.verify(remoteCorpus, remotePubkey):
|
||||
trace "Signature verification failed", scheme = $remotePubkey.scheme,
|
||||
signature = $remoteESignature,
|
||||
pubkey = $remotePubkey,
|
||||
corpus = $remoteCorpus
|
||||
raise (ref SecioError)(msg: "Signature verification failed")
|
||||
|
||||
trace "Signature verified", scheme = remotePubkey.scheme
|
||||
|
||||
if not remoteEPubkey.initRaw(remoteEBytesPubkey):
|
||||
trace "Remote ephemeral public key incorrect or corrupted",
|
||||
pubkey = toHex(remoteEBytesPubkey)
|
||||
raise (ref SecioError)(msg: "Remote ephemeral public key incorrect or corrupted")
|
||||
|
||||
var secret = getSecret(remoteEPubkey, ekeypair.seckey)
|
||||
if len(secret) == 0:
|
||||
trace "Shared secret could not be created"
|
||||
raise (ref SecioError)(msg: "Shared secret could not be created")
|
||||
|
||||
trace "Shared secret calculated", secret = secret.shortLog
|
||||
|
||||
var keys = stretchKeys(cipher, hash, secret)
|
||||
|
||||
trace "Authenticated encryption parameters",
|
||||
iv0 = toHex(keys.ivOpenArray(0)), key0 = keys.keyOpenArray(0).shortLog,
|
||||
mac0 = keys.macOpenArray(0).shortLog,
|
||||
iv1 = keys.ivOpenArray(1).shortLog, key1 = keys.keyOpenArray(1).shortLog,
|
||||
mac1 = keys.macOpenArray(1).shortLog
|
||||
|
||||
# Perform Nonce exchange over encrypted channel.
|
||||
|
||||
var secioConn = newSecioConn(conn, hash, cipher, keys, order, remotePubkey)
|
||||
result = secioConn
|
||||
await secioConn.write(remoteNonce)
|
||||
var res = await secioConn.readMessage()
|
||||
|
||||
if res != @localNonce:
|
||||
trace "Nonce verification failed", receivedNonce = res.shortLog,
|
||||
localNonce = localNonce.shortLog
|
||||
raise (ref SecioError)(msg: "Nonce verification failed")
|
||||
else:
|
||||
trace "Secure handshake succeeded"
|
||||
|
||||
method init(s: Secio) {.gcsafe.} =
|
||||
procCall Secure(s).init()
|
||||
s.codec = SecioCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Secio],
|
||||
rng: ref HmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
let secio = Secio(
|
||||
rng: rng,
|
||||
localPrivateKey: localPrivateKey,
|
||||
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
|
||||
)
|
||||
secio.init()
|
||||
secio
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -37,18 +37,19 @@ type
|
||||
|
||||
func shortLog*(conn: SecureConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecureConn(nil)"
|
||||
if conn == nil: "SecureConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(SecureConn): shortLog(it)
|
||||
|
||||
proc new*(T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
proc new*(
|
||||
T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
result = T(stream: conn,
|
||||
peerId: peerId,
|
||||
observedAddr: observedAddr,
|
||||
@@ -63,55 +64,72 @@ method initStream*(s: SecureConn) =
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
method closeImpl*(s: SecureConn) {.async.} =
|
||||
method closeImpl*(s: SecureConn) {.async: (raises: []).} =
|
||||
trace "Closing secure conn", s, dir = s.dir
|
||||
if not(isNil(s.stream)):
|
||||
if s.stream != nil:
|
||||
await s.stream.close()
|
||||
|
||||
await procCall Connection(s).closeImpl()
|
||||
|
||||
method readMessage*(c: SecureConn): Future[seq[byte]] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method readMessage*(
|
||||
c: SecureConn
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method getWrapped*(s: SecureConn): Connection = s.stream
|
||||
|
||||
method handshake*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[SecureConn] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method handshake*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc handleConn(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||
proc handleConn(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var sconn = await s.handshake(conn, initiator, peerId)
|
||||
# mark connection bottom level transport direction
|
||||
# this is the safest place to do this
|
||||
# we require this information in for example gossipsub
|
||||
sconn.transportDir = if initiator: Direction.Out else: Direction.In
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
proc cleanup() {.async: (raises: []).} =
|
||||
try:
|
||||
let futs = [conn.join(), sconn.join()]
|
||||
await futs[0] or futs[1]
|
||||
for f in futs:
|
||||
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
|
||||
block:
|
||||
let
|
||||
fut1 = conn.join()
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished: await fut1.cancelAndWait()
|
||||
if not fut2.finished: await fut2.cancelAndWait()
|
||||
block:
|
||||
let
|
||||
fut1 = sconn.close()
|
||||
fut2 = conn.close()
|
||||
await allFutures(fut1, fut2)
|
||||
static: doAssert typeof(fut1).E is void # Cannot fail
|
||||
static: doAssert typeof(fut2).E is void # Cannot fail
|
||||
|
||||
await allFuturesThrowing(
|
||||
sconn.close(), conn.close())
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
discard
|
||||
except CatchableError as exc:
|
||||
debug "error cleaning up secure connection", err = exc.msg, sconn
|
||||
|
||||
if not isNil(sconn):
|
||||
if sconn != nil:
|
||||
# All the errors are handled inside `cleanup()` procedure.
|
||||
asyncSpawn cleanup()
|
||||
|
||||
return sconn
|
||||
sconn
|
||||
|
||||
method init*(s: Secure) =
|
||||
procCall LPProtocol(s).init()
|
||||
@@ -127,23 +145,25 @@ method init*(s: Secure) =
|
||||
warn "securing connection canceled", conn
|
||||
await conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
warn "securing connection failed", err = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
s.handler = handle
|
||||
|
||||
method secure*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]):
|
||||
Future[Connection] {.base.} =
|
||||
s.handleConn(conn, initiator, peerId)
|
||||
method secure*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
s.handleConn(conn, conn.dir == Direction.Out, peerId)
|
||||
|
||||
method readOnce*(s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
doAssert(nbytes > 0, "nbytes must be positive integer")
|
||||
|
||||
if s.isEof:
|
||||
@@ -160,7 +180,7 @@ method readOnce*(s: SecureConn,
|
||||
raise err
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as err:
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name,
|
||||
message = err.msg,
|
||||
|
||||
@@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
|
||||
|
||||
proc addressMapper(
|
||||
self: AutoRelayService,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
await sleepAsync chronos.seconds(ttl - 30)
|
||||
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, listenAddrs)
|
||||
|
||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
||||
self.backingOff.keepItIf(it != pid)
|
||||
self.peerAvailable.fire()
|
||||
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
while true:
|
||||
# Remove relayPeers that failed
|
||||
let peers = toSeq(self.relayPeers.keys())
|
||||
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
await self.peerAvailable.wait()
|
||||
await sleepAsync(200.millis)
|
||||
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
if self.running:
|
||||
trace "Autorelay is already running"
|
||||
return
|
||||
self.running = true
|
||||
self.runner = self.innerRun(switch)
|
||||
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
self.running = false
|
||||
|
||||
@@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
||||
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.setup(switch)
|
||||
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -34,10 +34,10 @@ type
|
||||
|
||||
func shortLog*(s: BufferStream): auto =
|
||||
try:
|
||||
if s.isNil: "BufferStream(nil)"
|
||||
if s == nil: "BufferStream(nil)"
|
||||
else: &"{shortLog(s.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(BufferStream): shortLog(it)
|
||||
|
||||
@@ -55,14 +55,16 @@ method initStream*(s: BufferStream) =
|
||||
trace "BufferStream created", s
|
||||
|
||||
proc new*(
|
||||
T: typedesc[BufferStream],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
|
||||
T: typedesc[BufferStream],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
let bufferStream = T(timeout: timeout)
|
||||
bufferStream.initStream()
|
||||
bufferStream
|
||||
|
||||
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
method pushData*(
|
||||
s: BufferStream,
|
||||
data: seq[byte]
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Write bytes to internal read buffer, use this to fill up the
|
||||
## buffer with data.
|
||||
##
|
||||
@@ -70,7 +72,7 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
##
|
||||
|
||||
doAssert(not s.pushing,
|
||||
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent push allowed for stream " & s.shortLog())
|
||||
|
||||
if s.isClosed or s.pushedEof:
|
||||
raise newLPStreamClosedError()
|
||||
@@ -87,12 +89,14 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
finally:
|
||||
s.pushing = false
|
||||
|
||||
method pushEof*(s: BufferStream) {.base, async.} =
|
||||
method pushEof*(
|
||||
s: BufferStream
|
||||
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if s.pushedEof:
|
||||
return
|
||||
|
||||
doAssert(not s.pushing,
|
||||
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent push allowed for stream " & s.shortLog())
|
||||
|
||||
s.pushedEof = true
|
||||
|
||||
@@ -108,13 +112,14 @@ method pushEof*(s: BufferStream) {.base, async.} =
|
||||
method atEof*(s: BufferStream): bool =
|
||||
s.isEof and s.readBuf.len == 0
|
||||
|
||||
method readOnce*(s: BufferStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: BufferStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
doAssert(nbytes > 0, "nbytes must be positive integer")
|
||||
doAssert(not s.reading,
|
||||
&"Only one concurrent read allowed for stream {s.shortLog()}")
|
||||
"Only one concurrent read allowed for stream " & s.shortLog())
|
||||
|
||||
if s.returnedEof:
|
||||
raise newLPStreamEOFError()
|
||||
@@ -135,13 +140,6 @@ method readOnce*(s: BufferStream,
|
||||
# Not very efficient, but shouldn't happen often
|
||||
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
# When an exception happens here, the Bufferstream is effectively
|
||||
# broken and no more reads will be valid - for now, return EOF if it's
|
||||
# called again, though this is not completely true - EOF represents an
|
||||
# "orderly" shutdown and that's not what happened here..
|
||||
s.returnedEof = true
|
||||
raise exc
|
||||
finally:
|
||||
s.reading = false
|
||||
|
||||
@@ -173,7 +171,8 @@ method readOnce*(s: BufferStream,
|
||||
|
||||
return rbytes
|
||||
|
||||
method closeImpl*(s: BufferStream): Future[void] =
|
||||
method closeImpl*(
|
||||
s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
|
||||
## close the stream and clear the buffer
|
||||
trace "Closing BufferStream", s, len = s.len
|
||||
|
||||
@@ -209,8 +208,8 @@ method closeImpl*(s: BufferStream): Future[void] =
|
||||
if not s.readQueue.empty():
|
||||
discard s.readQueue.popFirstNoWait()
|
||||
except AsyncQueueFullError, AsyncQueueEmptyError:
|
||||
raise newException(Defect, getCurrentExceptionMsg())
|
||||
raiseAssert(getCurrentExceptionMsg())
|
||||
|
||||
trace "Closed BufferStream", s
|
||||
|
||||
procCall Connection(s).closeImpl() # noraises, nocancels
|
||||
procCall Connection(s).closeImpl()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -31,18 +31,22 @@ type
|
||||
tracked: bool
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
declareGauge(libp2p_peers_identity, "peers identities", labels = ["agent"])
|
||||
declareCounter(libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"])
|
||||
declareCounter(libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"])
|
||||
declareGauge libp2p_peers_identity,
|
||||
"peers identities", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_read,
|
||||
"incoming traffic", labels = ["agent"]
|
||||
declareCounter libp2p_peers_traffic_write,
|
||||
"outgoing traffic", labels = ["agent"]
|
||||
|
||||
declareCounter(libp2p_network_bytes, "total traffic", labels = ["direction"])
|
||||
declareCounter libp2p_network_bytes,
|
||||
"total traffic", labels = ["direction"]
|
||||
|
||||
func shortLog*(conn: ChronosStream): auto =
|
||||
try:
|
||||
if conn.isNil: "ChronosStream(nil)"
|
||||
if conn == nil: "ChronosStream(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(ChronosStream): shortLog(it)
|
||||
|
||||
@@ -50,17 +54,18 @@ method initStream*(s: ChronosStream) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = ChronosStreamTrackerName
|
||||
|
||||
s.timeoutHandler = proc() {.async, gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, closing ChronosStream", s
|
||||
await s.close()
|
||||
s.close()
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
proc init*(C: type ChronosStream,
|
||||
client: StreamTransport,
|
||||
dir: Direction,
|
||||
timeout = DefaultChronosStreamTimeout,
|
||||
observedAddr: Opt[MultiAddress]): ChronosStream =
|
||||
proc init*(
|
||||
C: type ChronosStream,
|
||||
client: StreamTransport,
|
||||
dir: Direction,
|
||||
timeout = DefaultChronosStreamTimeout,
|
||||
observedAddr: Opt[MultiAddress]): ChronosStream =
|
||||
result = C(client: client,
|
||||
timeout: timeout,
|
||||
dir: dir,
|
||||
@@ -94,7 +99,11 @@ when defined(libp2p_agents_metrics):
|
||||
libp2p_peers_identity.dec(labelValues = [s.shortAgent])
|
||||
s.tracked = false
|
||||
|
||||
method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: ChronosStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if s.atEof:
|
||||
raise newLPStreamEOFError()
|
||||
withExceptions:
|
||||
@@ -107,7 +116,10 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
|
||||
libp2p_peers_traffic_read.inc(result.int64, labelValues = [s.shortAgent])
|
||||
|
||||
proc completeWrite(
|
||||
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =
|
||||
s: ChronosStream,
|
||||
fut: Future[int].Raising([TransportError, CancelledError]),
|
||||
msgLen: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
withExceptions:
|
||||
# StreamTransport will only return written < msg.len on fatal failures where
|
||||
# further writing is not possible - in such cases, we'll raise here,
|
||||
@@ -124,7 +136,11 @@ proc completeWrite(
|
||||
if s.tracked:
|
||||
libp2p_peers_traffic_write.inc(msgLen.int64, labelValues = [s.shortAgent])
|
||||
|
||||
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: ChronosStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
||||
# drives up memory usage
|
||||
if msg.len == 0:
|
||||
@@ -145,19 +161,14 @@ method closed*(s: ChronosStream): bool =
|
||||
method atEof*(s: ChronosStream): bool =
|
||||
s.client.atEof()
|
||||
|
||||
method closeImpl*(s: ChronosStream) {.async.} =
|
||||
try:
|
||||
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
|
||||
method closeImpl*(
|
||||
s: ChronosStream) {.async: (raises: []).} =
|
||||
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
if not s.client.closed():
|
||||
await s.client.closeWait()
|
||||
if not s.client.closed():
|
||||
await s.client.closeWait()
|
||||
|
||||
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error closing chronosstream", s, msg = exc.msg
|
||||
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
# do this after closing!
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -27,25 +27,25 @@ const
|
||||
DefaultConnectionTimeout* = 5.minutes
|
||||
|
||||
type
|
||||
TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [].}
|
||||
TimeoutHandler* = proc(): Future[void] {.async: (raises: []).}
|
||||
|
||||
Connection* = ref object of LPStream
|
||||
activity*: bool # reset every time data is sent or received
|
||||
timeout*: Duration # channel timeout if no activity
|
||||
timerTaskFut: Future[void] # the current timer instance
|
||||
activity*: bool # reset every time data is sent or received
|
||||
timeout*: Duration # channel timeout if no activity
|
||||
timerTaskFut: Future[void].Raising([]) # the current timer instance
|
||||
timeoutHandler*: TimeoutHandler # timeout handler
|
||||
peerId*: PeerId
|
||||
observedAddr*: Opt[MultiAddress]
|
||||
protocol*: string # protocol used by the connection, used as tag for metrics
|
||||
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||
protocol*: string # protocol used by the connection, used as metrics tag
|
||||
transportDir*: Direction # underlying transport (usually socket) direction
|
||||
when defined(libp2p_agents_metrics):
|
||||
shortAgent*: string
|
||||
|
||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
||||
proc timeoutMonitor(s: Connection) {.async: (raises: []).}
|
||||
|
||||
func shortLog*(conn: Connection): string =
|
||||
try:
|
||||
if conn.isNil: "Connection(nil)"
|
||||
if conn == nil: "Connection(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
@@ -58,23 +58,28 @@ method initStream*(s: Connection) =
|
||||
|
||||
procCall LPStream(s).initStream()
|
||||
|
||||
doAssert(isNil(s.timerTaskFut))
|
||||
doAssert(s.timerTaskFut == nil)
|
||||
|
||||
if s.timeout > 0.millis:
|
||||
trace "Monitoring for timeout", s, timeout = s.timeout
|
||||
|
||||
s.timerTaskFut = s.timeoutMonitor()
|
||||
if isNil(s.timeoutHandler):
|
||||
s.timeoutHandler = proc(): Future[void] =
|
||||
trace "Idle timeout expired, closing connection", s
|
||||
s.close()
|
||||
if s.timeoutHandler == nil:
|
||||
s.timeoutHandler =
|
||||
proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, closing connection", s
|
||||
s.close()
|
||||
|
||||
method closeImpl*(s: Connection): Future[void] =
|
||||
method closeImpl*(s: Connection): Future[void] {.async: (raises: []).} =
|
||||
# Cleanup timeout timer
|
||||
trace "Closing connection", s
|
||||
|
||||
if not isNil(s.timerTaskFut) and not s.timerTaskFut.finished:
|
||||
s.timerTaskFut.cancel()
|
||||
if s.timerTaskFut != nil and not s.timerTaskFut.finished:
|
||||
# Don't `cancelAndWait` here to avoid risking deadlock in this scenario:
|
||||
# - `pollActivity` is waiting for `s.timeoutHandler` to complete.
|
||||
# - `s.timeoutHandler` may have triggered `closeImpl` and we are now here.
|
||||
# In this situation, we have to return for `s.timerTaskFut` to complete.
|
||||
s.timerTaskFut.cancelSoon()
|
||||
s.timerTaskFut = nil
|
||||
|
||||
trace "Closed connection", s
|
||||
@@ -84,7 +89,7 @@ method closeImpl*(s: Connection): Future[void] =
|
||||
func hash*(p: Connection): Hash =
|
||||
cast[pointer](p).hash
|
||||
|
||||
proc pollActivity(s: Connection): Future[bool] {.async.} =
|
||||
proc pollActivity(s: Connection): Future[bool] {.async: (raises: []).} =
|
||||
if s.closed and s.atEof:
|
||||
return false # Done, no more monitoring
|
||||
|
||||
@@ -95,22 +100,13 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
|
||||
# Inactivity timeout happened, call timeout monitor
|
||||
|
||||
trace "Connection timed out", s
|
||||
if not(isNil(s.timeoutHandler)):
|
||||
if s.timeoutHandler != nil:
|
||||
trace "Calling timeout handler", s
|
||||
|
||||
try:
|
||||
await s.timeoutHandler()
|
||||
except CancelledError:
|
||||
# timeoutHandler is expected to be fast, but it's still possible that
|
||||
# cancellation will happen here - no need to warn about it - we do want to
|
||||
# stop the polling however
|
||||
debug "Timeout handler cancelled", s
|
||||
except CatchableError as exc: # Shouldn't happen
|
||||
warn "exception in timeout handler", s, exc = exc.msg
|
||||
await s.timeoutHandler()
|
||||
|
||||
return false
|
||||
|
||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||
proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
|
||||
## monitor the channel for inactivity
|
||||
##
|
||||
## if the timeout was hit, it means that
|
||||
@@ -129,21 +125,22 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||
return
|
||||
|
||||
method getWrapped*(s: Connection): Connection {.base.} =
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
proc setShortAgent*(s: Connection, shortAgent: string) =
|
||||
var conn = s
|
||||
while not isNil(conn):
|
||||
while conn != nil:
|
||||
conn.shortAgent = shortAgent
|
||||
conn = conn.getWrapped()
|
||||
|
||||
proc new*(C: type Connection,
|
||||
peerId: PeerId,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout,
|
||||
timeoutHandler: TimeoutHandler = nil): Connection =
|
||||
proc new*(
|
||||
C: type Connection,
|
||||
peerId: PeerId,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout,
|
||||
timeoutHandler: TimeoutHandler = nil): Connection =
|
||||
result = C(peerId: peerId,
|
||||
dir: dir,
|
||||
timeout: timeout,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -23,8 +23,8 @@ import ../varint,
|
||||
|
||||
export errors
|
||||
|
||||
declareGauge(libp2p_open_streams,
|
||||
"open stream instances", labels = ["type", "dir"])
|
||||
declareGauge libp2p_open_streams,
|
||||
"open stream instances", labels = ["type", "dir"]
|
||||
|
||||
export oids
|
||||
|
||||
@@ -50,12 +50,7 @@ type
|
||||
|
||||
LPStreamError* = object of LPError
|
||||
LPStreamIncompleteError* = object of LPStreamError
|
||||
LPStreamIncorrectDefect* = object of Defect
|
||||
LPStreamLimitError* = object of LPStreamError
|
||||
LPStreamReadError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
LPStreamWriteError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
LPStreamEOFError* = object of LPStreamError
|
||||
|
||||
# X | Read | Write
|
||||
@@ -77,54 +72,12 @@ type
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
|
||||
proc setupStreamTracker*(name: string): StreamTracker =
|
||||
let tracker = new StreamTracker
|
||||
|
||||
proc dumpTracking(): string {.gcsafe.} =
|
||||
return "Opened " & tracker.id & ": " & $tracker.opened & "\n" &
|
||||
"Closed " & tracker.id & ": " & $tracker.closed
|
||||
|
||||
proc leakTransport(): bool {.gcsafe.} =
|
||||
return (tracker.opened != tracker.closed)
|
||||
|
||||
tracker.id = name
|
||||
tracker.opened = 0
|
||||
tracker.closed = 0
|
||||
tracker.dump = dumpTracking
|
||||
tracker.isLeaked = leakTransport
|
||||
addTracker(name, tracker)
|
||||
|
||||
return tracker
|
||||
|
||||
proc getStreamTracker(name: string): StreamTracker {.gcsafe.} =
|
||||
result = cast[StreamTracker](getTracker(name))
|
||||
if isNil(result):
|
||||
result = setupStreamTracker(name)
|
||||
|
||||
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
|
||||
var w = newException(LPStreamReadError, "Read stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
|
||||
proc newLPStreamReadError*(msg: string): ref LPStreamReadError =
|
||||
newException(LPStreamReadError, msg)
|
||||
|
||||
proc newLPStreamWriteError*(p: ref CatchableError): ref LPStreamWriteError =
|
||||
var w = newException(LPStreamWriteError, "Write stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
|
||||
proc newLPStreamIncompleteError*(): ref LPStreamIncompleteError =
|
||||
result = newException(LPStreamIncompleteError, "Incomplete data received")
|
||||
|
||||
proc newLPStreamLimitError*(): ref LPStreamLimitError =
|
||||
result = newException(LPStreamLimitError, "Buffer limit reached")
|
||||
|
||||
proc newLPStreamIncorrectDefect*(m: string): ref LPStreamIncorrectDefect =
|
||||
result = newException(LPStreamIncorrectDefect, m)
|
||||
|
||||
proc newLPStreamEOFError*(): ref LPStreamEOFError =
|
||||
result = newException(LPStreamEOFError, "Stream EOF!")
|
||||
|
||||
@@ -145,8 +98,9 @@ proc newLPStreamConnDownError*(
|
||||
parentException)
|
||||
|
||||
func shortLog*(s: LPStream): auto =
|
||||
if s.isNil: "LPStream(nil)"
|
||||
if s == nil: "LPStream(nil)"
|
||||
else: $s.oid
|
||||
|
||||
chronicles.formatIt(LPStream): shortLog(it)
|
||||
|
||||
method initStream*(s: LPStream) {.base.} =
|
||||
@@ -157,10 +111,12 @@ method initStream*(s: LPStream) {.base.} =
|
||||
s.oid = genOid()
|
||||
|
||||
libp2p_open_streams.inc(labelValues = [s.objName, $s.dir])
|
||||
inc getStreamTracker(s.objName).opened
|
||||
trackCounter(s.objName)
|
||||
trace "Stream created", s, objName = s.objName, dir = $s.dir
|
||||
|
||||
proc join*(s: LPStream): Future[void] {.public.} =
|
||||
proc join*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
|
||||
## Wait for the stream to be closed
|
||||
s.closeEvent.wait()
|
||||
|
||||
@@ -171,19 +127,21 @@ method atEof*(s: LPStream): bool {.base, public.} =
|
||||
s.isEof
|
||||
|
||||
method readOnce*(
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.base, async, public.} =
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.base, async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
## Reads whatever is available in the stream,
|
||||
## up to `nbytes`. Will block if nothing is
|
||||
## available
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc readExactly*(s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[void] {.async, public.} =
|
||||
proc readExactly*(
|
||||
s: LPStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Waits for `nbytes` to be available, then read
|
||||
## them and return them
|
||||
if s.atEof:
|
||||
@@ -217,10 +175,11 @@ proc readExactly*(s: LPStream,
|
||||
trace "couldn't read all bytes, incomplete data", s, nbytes, read
|
||||
raise newLPStreamIncompleteError()
|
||||
|
||||
proc readLine*(s: LPStream,
|
||||
limit = 0,
|
||||
sep = "\r\n"): Future[string]
|
||||
{.async, public.} =
|
||||
proc readLine*(
|
||||
s: LPStream,
|
||||
limit = 0,
|
||||
sep = "\r\n"
|
||||
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
## Reads up to `limit` bytes are read, or a `sep` is found
|
||||
# TODO replace with something that exploits buffering better
|
||||
var lim = if limit <= 0: -1 else: limit
|
||||
@@ -246,7 +205,9 @@ proc readLine*(s: LPStream,
|
||||
if len(result) == lim:
|
||||
break
|
||||
|
||||
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
||||
proc readVarint*(
|
||||
conn: LPStream
|
||||
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
|
||||
var
|
||||
buffer: array[10, byte]
|
||||
|
||||
@@ -264,7 +225,11 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
||||
if true: # can't end with a raise apparently
|
||||
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
||||
|
||||
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
|
||||
proc readLp*(
|
||||
s: LPStream,
|
||||
maxSize: int
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError]), public.} =
|
||||
## read length prefixed msg, with the length encoded as a varint
|
||||
let
|
||||
length = await s.readVarint()
|
||||
@@ -278,13 +243,21 @@ proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, publ
|
||||
|
||||
var res = newSeqUninitialized[byte](length)
|
||||
await s.readExactly(addr res[0], res.len)
|
||||
return res
|
||||
res
|
||||
|
||||
method write*(s: LPStream, msg: seq[byte]): Future[void] {.base, public.} =
|
||||
method write*(
|
||||
s: LPStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base, public.} =
|
||||
# Write `msg` to stream, waiting for the write to be finished
|
||||
doAssert(false, "not implemented!")
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
|
||||
proc writeLp*(
|
||||
s: LPStream,
|
||||
msg: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
## Write `msg` with a varint-encoded length prefix
|
||||
let vbytes = PB.toBytes(msg.len().uint64)
|
||||
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
|
||||
@@ -292,35 +265,53 @@ proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
|
||||
buf[vbytes.len..<buf.len] = msg
|
||||
s.write(buf)
|
||||
|
||||
proc writeLp*(s: LPStream, msg: string): Future[void] {.public.} =
|
||||
proc writeLp*(
|
||||
s: LPStream,
|
||||
msg: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
||||
|
||||
proc write*(s: LPStream, msg: string): Future[void] {.public.} =
|
||||
proc write*(
|
||||
s: LPStream,
|
||||
msg: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), public.} =
|
||||
s.write(msg.toBytes())
|
||||
|
||||
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
|
||||
method closeImpl*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [], raw: true), base.} =
|
||||
## Implementation of close - called only once
|
||||
trace "Closing stream", s, objName = s.objName, dir = $s.dir
|
||||
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
|
||||
inc getStreamTracker(s.objName).closed
|
||||
untrackCounter(s.objName)
|
||||
s.closeEvent.fire()
|
||||
trace "Closed stream", s, objName = s.objName, dir = $s.dir
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
fut
|
||||
|
||||
method close*(s: LPStream): Future[void] {.base, async, public.} = # {.raises [Defect].}
|
||||
method close*(
|
||||
s: LPStream
|
||||
): Future[void] {.async: (raises: [], raw: true), base, public.} =
|
||||
## close the stream - this may block, but will not raise exceptions
|
||||
##
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
return fut
|
||||
|
||||
s.isClosed = true # Set flag before performing virtual close
|
||||
|
||||
# An separate implementation method is used so that even when derived types
|
||||
# A separate implementation method is used so that even when derived types
|
||||
# override `closeImpl`, it is called only once - anyone overriding `close`
|
||||
# itself must implement this - once-only check as well, with their own field
|
||||
await closeImpl(s)
|
||||
closeImpl(s)
|
||||
|
||||
proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
|
||||
proc closeWithEOF*(
|
||||
s: LPStream): Future[void] {.async: (raises: []), public.} =
|
||||
## Close the stream and wait for EOF - use this with half-closed streams where
|
||||
## an EOF is expected to arrive from the other end.
|
||||
##
|
||||
@@ -349,9 +340,9 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
|
||||
var buf: array[8, byte]
|
||||
if (await readOnce(s, addr buf[0], buf.len)) != 0:
|
||||
debug "Unexpected bytes while waiting for EOF", s
|
||||
except CancelledError:
|
||||
discard
|
||||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, msg = exc.msg
|
||||
|
||||
@@ -71,17 +71,17 @@ type
|
||||
inUse: bool
|
||||
|
||||
|
||||
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||
if self.inUse:
|
||||
warn "service setup has already been called"
|
||||
return false
|
||||
self.inUse = true
|
||||
return true
|
||||
|
||||
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
|
||||
method run*(self: Service, switch: Switch) {.base, async.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||
if not self.inUse:
|
||||
warn "service is already stopped"
|
||||
return false
|
||||
@@ -141,10 +141,10 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out): Future[void] {.public.} =
|
||||
dir = Direction.Out): Future[void] {.public.} =
|
||||
## Connects to a peer without opening a stream to it
|
||||
|
||||
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
||||
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
s: Switch,
|
||||
@@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
|
||||
s.peerInfo.protocols.add(proto.codec)
|
||||
|
||||
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
|
||||
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
|
||||
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
|
||||
switch.connManager.storeMuxer(muxed)
|
||||
await switch.peerStore.identify(muxed)
|
||||
trace "Connection upgrade succeeded"
|
||||
@@ -273,6 +273,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
|
||||
except CancelledError as exc:
|
||||
trace "releasing semaphore on cancellation"
|
||||
upgrades.release() # always release the slot
|
||||
return
|
||||
except CatchableError as exc:
|
||||
error "Exception in accept loop, exiting", exc = exc.msg
|
||||
upgrades.release() # always release the slot
|
||||
@@ -288,6 +289,12 @@ proc stop*(s: Switch) {.async, public.} =
|
||||
|
||||
s.started = false
|
||||
|
||||
try:
|
||||
# Stop accepting incoming connections
|
||||
await allFutures(s.acceptFuts.mapIt(it.cancelAndWait())).wait(1.seconds)
|
||||
except CatchableError as exc:
|
||||
debug "Cannot cancel accepts", error = exc.msg
|
||||
|
||||
for service in s.services:
|
||||
discard await service.stop(s)
|
||||
|
||||
@@ -302,18 +309,6 @@ proc stop*(s: Switch) {.async, public.} =
|
||||
except CatchableError as exc:
|
||||
warn "error cleaning up transports", msg = exc.msg
|
||||
|
||||
try:
|
||||
await allFutures(s.acceptFuts)
|
||||
.wait(1.seconds)
|
||||
except CatchableError as exc:
|
||||
trace "Exception while stopping accept loops", exc = exc.msg
|
||||
|
||||
# check that all futures were properly
|
||||
# stopped and otherwise cancel them
|
||||
for a in s.acceptFuts:
|
||||
if not a.finished:
|
||||
a.cancel()
|
||||
|
||||
for service in s.services:
|
||||
discard await service.stop(s)
|
||||
|
||||
@@ -321,7 +316,7 @@ proc stop*(s: Switch) {.async, public.} =
|
||||
|
||||
trace "Switch stopped"
|
||||
|
||||
proc start*(s: Switch) {.async, gcsafe, public.} =
|
||||
proc start*(s: Switch) {.async, public.} =
|
||||
## Start listening on every transport
|
||||
|
||||
if s.started:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -12,274 +12,327 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import transport,
|
||||
../errors,
|
||||
../wire,
|
||||
../multicodec,
|
||||
../connmanager,
|
||||
../multiaddress,
|
||||
../stream/connection,
|
||||
../stream/chronosstream,
|
||||
../upgrademngrs/upgrade,
|
||||
../utility
|
||||
import
|
||||
./transport,
|
||||
../wire,
|
||||
../multiaddress,
|
||||
../stream/connection,
|
||||
../stream/chronosstream,
|
||||
../upgrademngrs/upgrade,
|
||||
../utility
|
||||
|
||||
logScope:
|
||||
topics = "libp2p tcptransport"
|
||||
|
||||
export transport, results
|
||||
export transport, connection, upgrade
|
||||
|
||||
const
|
||||
TcpTransportTrackerName* = "libp2p.tcptransport"
|
||||
const TcpTransportTrackerName* = "libp2p.tcptransport"
|
||||
|
||||
type
|
||||
AcceptFuture = typeof(default(StreamServer).accept())
|
||||
|
||||
TcpTransport* = ref object of Transport
|
||||
servers*: seq[StreamServer]
|
||||
clients: array[Direction, seq[StreamTransport]]
|
||||
flags: set[ServerFlags]
|
||||
clientFlags: set[SocketFlags]
|
||||
acceptFuts: seq[Future[StreamTransport]]
|
||||
acceptFuts: seq[AcceptFuture]
|
||||
connectionsTimeout: Duration
|
||||
|
||||
TcpTransportTracker* = ref object of TrackerBase
|
||||
opened*: uint64
|
||||
closed*: uint64
|
||||
stopping: bool
|
||||
|
||||
TcpTransportError* = object of transport.TransportError
|
||||
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [].}
|
||||
|
||||
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
|
||||
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
|
||||
if isNil(result):
|
||||
result = setupTcpTransportTracker()
|
||||
|
||||
proc dumpTracking(): string {.gcsafe.} =
|
||||
var tracker = getTcpTransportTracker()
|
||||
result = "Opened tcp transports: " & $tracker.opened & "\n" &
|
||||
"Closed tcp transports: " & $tracker.closed
|
||||
|
||||
proc leakTransport(): bool {.gcsafe.} =
|
||||
var tracker = getTcpTransportTracker()
|
||||
result = (tracker.opened != tracker.closed)
|
||||
|
||||
proc setupTcpTransportTracker(): TcpTransportTracker =
|
||||
result = new TcpTransportTracker
|
||||
result.opened = 0
|
||||
result.closed = 0
|
||||
result.dump = dumpTracking
|
||||
result.isLeaked = leakTransport
|
||||
addTracker(TcpTransportTrackerName, result)
|
||||
|
||||
proc connHandler*(self: TcpTransport,
|
||||
client: StreamTransport,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
|
||||
trace "Handling tcp connection", address = $observedAddr,
|
||||
dir = $dir,
|
||||
clients = self.clients[Direction.In].len +
|
||||
self.clients[Direction.Out].len
|
||||
proc connHandler*(
|
||||
self: TcpTransport,
|
||||
client: StreamTransport,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
dir: Direction,
|
||||
): Connection =
|
||||
trace "Handling tcp connection",
|
||||
address = $observedAddr,
|
||||
dir = $dir,
|
||||
clients = self.clients[Direction.In].len + self.clients[Direction.Out].len
|
||||
|
||||
let conn = Connection(
|
||||
ChronosStream.init(
|
||||
client = client,
|
||||
dir = dir,
|
||||
observedAddr = observedAddr,
|
||||
timeout = self.connectionsTimeout
|
||||
))
|
||||
timeout = self.connectionsTimeout,
|
||||
)
|
||||
)
|
||||
|
||||
proc onClose() {.async.} =
|
||||
try:
|
||||
let futs = @[client.join(), conn.join()]
|
||||
await futs[0] or futs[1]
|
||||
for f in futs:
|
||||
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
|
||||
proc onClose() {.async: (raises: []).} =
|
||||
await noCancel client.join()
|
||||
|
||||
trace "Cleaning up client", addrs = $client.remoteAddress,
|
||||
conn
|
||||
trace "Cleaning up client", addrs = $client.remoteAddress, conn
|
||||
|
||||
self.clients[dir].keepItIf( it != client )
|
||||
await allFuturesThrowing(
|
||||
conn.close(), client.closeWait())
|
||||
self.clients[dir].keepItIf(it != client)
|
||||
|
||||
trace "Cleaned up client", addrs = $client.remoteAddress,
|
||||
conn
|
||||
# Propagate the chronos client being closed to the connection
|
||||
# TODO This is somewhat dubious since it's the connection that owns the
|
||||
# client, but it allows the transport to close all connections when
|
||||
# shutting down (also dubious! it would make more sense that the owner
|
||||
# of all connections closes them, or the next read detects the closed
|
||||
# socket and does the right thing..)
|
||||
|
||||
except CatchableError as exc:
|
||||
let useExc {.used.} = exc
|
||||
debug "Error cleaning up client", errMsg = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
trace "Cleaned up client", addrs = $client.remoteAddress, conn
|
||||
|
||||
self.clients[dir].add(client)
|
||||
|
||||
asyncSpawn onClose()
|
||||
|
||||
return conn
|
||||
|
||||
proc new*(
|
||||
T: typedesc[TcpTransport],
|
||||
flags: set[ServerFlags] = {},
|
||||
upgrade: Upgrade,
|
||||
connectionsTimeout = 10.minutes): T {.public.} =
|
||||
T: typedesc[TcpTransport],
|
||||
flags: set[ServerFlags] = {},
|
||||
upgrade: Upgrade,
|
||||
connectionsTimeout = 10.minutes,
|
||||
): T {.public.} =
|
||||
T(
|
||||
flags: flags,
|
||||
clientFlags:
|
||||
if ServerFlags.TcpNoDelay in flags:
|
||||
{SocketFlags.TcpNoDelay}
|
||||
else:
|
||||
default(set[SocketFlags])
|
||||
,
|
||||
upgrader: upgrade,
|
||||
networkReachability: NetworkReachability.Unknown,
|
||||
connectionsTimeout: connectionsTimeout,
|
||||
)
|
||||
|
||||
let
|
||||
transport = T(
|
||||
flags: flags,
|
||||
clientFlags:
|
||||
if ServerFlags.TcpNoDelay in flags:
|
||||
compilesOr:
|
||||
{SocketFlags.TcpNoDelay}
|
||||
do:
|
||||
doAssert(false)
|
||||
default(set[SocketFlags])
|
||||
else:
|
||||
default(set[SocketFlags]),
|
||||
upgrader: upgrade,
|
||||
networkReachability: NetworkReachability.Unknown,
|
||||
connectionsTimeout: connectionsTimeout)
|
||||
method start*(self: TcpTransport, addrs: seq[MultiAddress]): Future[void] =
|
||||
## Start transport listening to the given addresses - for dial-only transports,
|
||||
## start with an empty list
|
||||
|
||||
return transport
|
||||
# TODO remove `impl` indirection throughout when `raises` is added to base
|
||||
|
||||
method start*(
|
||||
self: TcpTransport,
|
||||
addrs: seq[MultiAddress]) {.async.} =
|
||||
## listen on the transport
|
||||
##
|
||||
|
||||
if self.running:
|
||||
warn "TCP transport already running"
|
||||
return
|
||||
|
||||
await procCall Transport(self).start(addrs)
|
||||
trace "Starting TCP transport"
|
||||
inc getTcpTransportTracker().opened
|
||||
|
||||
for i, ma in addrs:
|
||||
if not self.handles(ma):
|
||||
trace "Invalid address detected, skipping!", address = ma
|
||||
continue
|
||||
|
||||
self.flags.incl(ServerFlags.ReusePort)
|
||||
let server = createStreamServer(
|
||||
ma = ma,
|
||||
flags = self.flags,
|
||||
udata = self)
|
||||
|
||||
# always get the resolved address in case we're bound to 0.0.0.0:0
|
||||
self.addrs[i] = MultiAddress.init(
|
||||
server.sock.getLocalAddress()
|
||||
).tryGet()
|
||||
|
||||
self.servers &= server
|
||||
|
||||
trace "Listening on", address = ma
|
||||
|
||||
method stop*(self: TcpTransport) {.async, gcsafe.} =
|
||||
## stop the transport
|
||||
##
|
||||
try:
|
||||
trace "Stopping TCP transport"
|
||||
|
||||
checkFutures(
|
||||
await allFinished(
|
||||
self.clients[Direction.In].mapIt(it.closeWait()) &
|
||||
self.clients[Direction.Out].mapIt(it.closeWait())))
|
||||
|
||||
if not self.running:
|
||||
warn "TCP transport already stopped"
|
||||
proc impl(
|
||||
self: TcpTransport, addrs: seq[MultiAddress]
|
||||
): Future[void] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
if self.running:
|
||||
warn "TCP transport already running"
|
||||
return
|
||||
|
||||
await procCall Transport(self).stop() # call base
|
||||
var toWait: seq[Future[void]]
|
||||
for fut in self.acceptFuts:
|
||||
if not fut.finished:
|
||||
toWait.add(fut.cancelAndWait())
|
||||
elif fut.done:
|
||||
toWait.add(fut.read().closeWait())
|
||||
trace "Starting TCP transport"
|
||||
|
||||
for server in self.servers:
|
||||
server.stop()
|
||||
toWait.add(server.closeWait())
|
||||
self.flags.incl(ServerFlags.ReusePort)
|
||||
|
||||
await allFutures(toWait)
|
||||
var supported: seq[MultiAddress]
|
||||
var initialized = false
|
||||
try:
|
||||
for i, ma in addrs:
|
||||
if not self.handles(ma):
|
||||
trace "Invalid address detected, skipping!", address = ma
|
||||
continue
|
||||
|
||||
self.servers = @[]
|
||||
let
|
||||
ta = initTAddress(ma).expect("valid address per handles check above")
|
||||
server =
|
||||
try:
|
||||
createStreamServer(ta, flags = self.flags)
|
||||
except common.TransportError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
|
||||
trace "Transport stopped"
|
||||
inc getTcpTransportTracker().closed
|
||||
except CatchableError as exc:
|
||||
trace "Error shutting down tcp transport", exc = exc.msg
|
||||
self.servers &= server
|
||||
|
||||
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
## accept a new TCP connection
|
||||
trace "Listening on", address = ma
|
||||
supported.add(
|
||||
MultiAddress.init(server.sock.getLocalAddress()).expect(
|
||||
"Can init from local address"
|
||||
)
|
||||
)
|
||||
|
||||
initialized = true
|
||||
finally:
|
||||
if not initialized:
|
||||
# Clean up partial success on exception
|
||||
await noCancel allFutures(self.servers.mapIt(it.closeWait()))
|
||||
reset(self.servers)
|
||||
|
||||
try:
|
||||
await procCall Transport(self).start(supported)
|
||||
except CatchableError:
|
||||
raiseAssert "Base method does not raise"
|
||||
|
||||
trackCounter(TcpTransportTrackerName)
|
||||
|
||||
impl(self, addrs)
|
||||
|
||||
method stop*(self: TcpTransport): Future[void] =
|
||||
## Stop the transport and close all connections it created
|
||||
proc impl(self: TcpTransport) {.async: (raises: []).} =
|
||||
trace "Stopping TCP transport"
|
||||
self.stopping = true
|
||||
defer:
|
||||
self.stopping = false
|
||||
|
||||
if self.running:
|
||||
# Reset the running flag
|
||||
try:
|
||||
await noCancel procCall Transport(self).stop()
|
||||
except CatchableError: # TODO remove when `accept` is annotated with raises
|
||||
raiseAssert "doesn't actually raise"
|
||||
|
||||
# Stop each server by closing the socket - this will cause all accept loops
|
||||
# to fail - since the running flag has been reset, it's also safe to close
|
||||
# all known clients since no more of them will be added
|
||||
await noCancel allFutures(
|
||||
self.servers.mapIt(it.closeWait()) &
|
||||
self.clients[Direction.In].mapIt(it.closeWait()) &
|
||||
self.clients[Direction.Out].mapIt(it.closeWait())
|
||||
)
|
||||
|
||||
self.servers = @[]
|
||||
|
||||
for acceptFut in self.acceptFuts:
|
||||
if acceptFut.completed():
|
||||
await acceptFut.value().closeWait()
|
||||
self.acceptFuts = @[]
|
||||
|
||||
if self.clients[Direction.In].len != 0 or self.clients[Direction.Out].len != 0:
|
||||
# Future updates could consider turning this warn into an assert since
|
||||
# it should never happen if the shutdown code is correct
|
||||
warn "Couldn't clean up clients",
|
||||
len = self.clients[Direction.In].len + self.clients[Direction.Out].len
|
||||
|
||||
trace "Transport stopped"
|
||||
untrackCounter(TcpTransportTrackerName)
|
||||
else:
|
||||
# For legacy reasons, `stop` on a transpart that wasn't started is
|
||||
# expected to close outgoing connections created by the transport
|
||||
warn "TCP transport already stopped"
|
||||
|
||||
doAssert self.clients[Direction.In].len == 0,
|
||||
"No incoming connections possible without start"
|
||||
await noCancel allFutures(self.clients[Direction.Out].mapIt(it.closeWait()))
|
||||
|
||||
impl(self)
|
||||
|
||||
method accept*(self: TcpTransport): Future[Connection] =
|
||||
## accept a new TCP connection, returning nil on non-fatal errors
|
||||
##
|
||||
## Raises an exception when the transport is broken and cannot be used for
|
||||
## accepting further connections
|
||||
# TODO returning nil for non-fatal errors is problematic in that error
|
||||
# information is lost and must be logged here instead of being
|
||||
# available to the caller - further refactoring should propagate errors
|
||||
# to the caller instead
|
||||
proc impl(
|
||||
self: TcpTransport
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
if not self.running:
|
||||
raise newTransportClosedError()
|
||||
|
||||
if not self.running:
|
||||
raise newTransportClosedError()
|
||||
|
||||
try:
|
||||
if self.acceptFuts.len <= 0:
|
||||
self.acceptFuts = self.servers.mapIt(it.accept())
|
||||
|
||||
if self.acceptFuts.len <= 0:
|
||||
return
|
||||
|
||||
let
|
||||
finished = await one(self.acceptFuts)
|
||||
finished =
|
||||
try:
|
||||
await one(self.acceptFuts)
|
||||
except ValueError:
|
||||
raise (ref TcpTransportError)(msg: "No listeners configured")
|
||||
|
||||
index = self.acceptFuts.find(finished)
|
||||
transp =
|
||||
try:
|
||||
await finished
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
return nil
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", exc = exc.msg
|
||||
return nil
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
if not self.running: # Stopped while waiting
|
||||
await transp.closeWait()
|
||||
raise newTransportClosedError()
|
||||
|
||||
self.acceptFuts[index] = self.servers[index].accept()
|
||||
|
||||
let transp = await finished
|
||||
try:
|
||||
let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
|
||||
return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
|
||||
except CancelledError as exc:
|
||||
transp.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Failed to handle connection", exc = exc.msg
|
||||
transp.close()
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", exc = exc.msg
|
||||
except TransportUseClosedError as exc:
|
||||
debug "Server was closed", exc = exc.msg
|
||||
raise newTransportClosedError(exc)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except TransportOsError as exc:
|
||||
info "OS Error", exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
info "Unexpected error accepting connection", exc = exc.msg
|
||||
raise exc
|
||||
let remote =
|
||||
try:
|
||||
transp.remoteAddress
|
||||
except TransportOsError as exc:
|
||||
# The connection had errors / was closed before `await` returned control
|
||||
await transp.closeWait()
|
||||
debug "Cannot read remote address", exc = exc.msg
|
||||
return nil
|
||||
|
||||
let observedAddr =
|
||||
MultiAddress.init(remote).expect("Can initialize from remote address")
|
||||
self.connHandler(transp, Opt.some(observedAddr), Direction.In)
|
||||
|
||||
impl(self)
|
||||
|
||||
method dial*(
|
||||
self: TcpTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
self: TcpTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId),
|
||||
): Future[Connection] =
|
||||
## dial a peer
|
||||
##
|
||||
proc impl(
|
||||
self: TcpTransport, hostname: string, address: MultiAddress, peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
if self.stopping:
|
||||
raise newTransportClosedError()
|
||||
|
||||
trace "Dialing remote peer", address = $address
|
||||
let transp =
|
||||
if self.networkReachability == NetworkReachability.NotReachable and self.addrs.len > 0:
|
||||
self.clientFlags.incl(SocketFlags.ReusePort)
|
||||
await connect(address, flags = self.clientFlags, localAddress = Opt.some(self.addrs[0]))
|
||||
else:
|
||||
await connect(address, flags = self.clientFlags)
|
||||
let ta = initTAddress(address).valueOr:
|
||||
raise (ref TcpTransportError)(msg: "Unsupported address: " & $address)
|
||||
|
||||
try:
|
||||
let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
|
||||
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||
except CatchableError as err:
|
||||
await transp.closeWait()
|
||||
raise err
|
||||
trace "Dialing remote peer", address = $address
|
||||
let transp =
|
||||
try:
|
||||
await(
|
||||
if self.networkReachability == NetworkReachability.NotReachable and
|
||||
self.addrs.len > 0:
|
||||
let local = initTAddress(self.addrs[0]).expect("self address is valid")
|
||||
self.clientFlags.incl(SocketFlags.ReusePort)
|
||||
connect(ta, flags = self.clientFlags, localAddress = local)
|
||||
else:
|
||||
connect(ta, flags = self.clientFlags)
|
||||
)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
|
||||
method handles*(t: TcpTransport, address: MultiAddress): bool {.gcsafe.} =
|
||||
# If `stop` is called after `connect` but before `await` returns, we might
|
||||
# end up with a race condition where `stop` returns but not all connections
|
||||
# have been closed - we drop connections in this case in order not to leak
|
||||
# them
|
||||
if self.stopping:
|
||||
# Stopped while waiting for new connection
|
||||
await transp.closeWait()
|
||||
raise newTransportClosedError()
|
||||
|
||||
let observedAddr =
|
||||
try:
|
||||
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
|
||||
except TransportOsError as exc:
|
||||
await transp.closeWait()
|
||||
raise (ref TcpTransportError)(msg: exc.msg)
|
||||
|
||||
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||
|
||||
impl(self, hostname, address, peerId)
|
||||
|
||||
method handles*(t: TcpTransport, address: MultiAddress): bool =
|
||||
if procCall Transport(t).handles(address):
|
||||
if address.protocols.isOk:
|
||||
return TCP.match(address)
|
||||
return TCP.match(address)
|
||||
@@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
|
||||
return TcpOnion3.match(address)
|
||||
|
||||
proc connectToTorServer(
|
||||
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
|
||||
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
|
||||
let transp = await connect(transportAddress)
|
||||
try:
|
||||
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
|
||||
@@ -99,7 +99,7 @@ proc connectToTorServer(
|
||||
await transp.closeWait()
|
||||
raise err
|
||||
|
||||
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
||||
proc readServerReply(transp: StreamTransport) {.async.} =
|
||||
## The specification for this code is defined on
|
||||
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
|
||||
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
|
||||
@@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
||||
let atyp = firstFourOctets[3]
|
||||
case atyp:
|
||||
of Socks5AddressType.IPv4.byte:
|
||||
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||
of Socks5AddressType.FQDN.byte:
|
||||
let fqdnNumOctets = await transp.read(1)
|
||||
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
|
||||
@@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
|
||||
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
||||
|
||||
proc dialPeer(
|
||||
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
|
||||
transp: StreamTransport, address: MultiAddress) {.async.} =
|
||||
let (atyp, dstAddr, dstPort) =
|
||||
if Onion3.match(address):
|
||||
parseOnion3(address)
|
||||
@@ -190,7 +190,7 @@ method dial*(
|
||||
self: TorTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
## dial a peer
|
||||
##
|
||||
if not handlesDial(address):
|
||||
@@ -200,7 +200,7 @@ method dial*(
|
||||
|
||||
try:
|
||||
await dialPeer(transp, address)
|
||||
return await self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
|
||||
return self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
|
||||
except CatchableError as err:
|
||||
await transp.closeWait()
|
||||
raise err
|
||||
@@ -229,14 +229,14 @@ method start*(
|
||||
else:
|
||||
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
|
||||
|
||||
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: TorTransport): Future[Connection] {.async.} =
|
||||
## accept a new Tor connection
|
||||
##
|
||||
let conn = await self.tcpTransport.accept()
|
||||
conn.observedAddr = Opt.none(MultiAddress)
|
||||
return conn
|
||||
|
||||
method stop*(self: TorTransport) {.async, gcsafe.} =
|
||||
method stop*(self: TorTransport) {.async.} =
|
||||
## stop the transport
|
||||
##
|
||||
await procCall Transport(self).stop() # call base
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -35,7 +35,7 @@ type
|
||||
upgrader*: Upgrade
|
||||
networkReachability*: NetworkReachability
|
||||
|
||||
proc newTransportClosedError*(parent: ref Exception = nil): ref LPError =
|
||||
proc newTransportClosedError*(parent: ref Exception = nil): ref TransportError =
|
||||
newException(TransportClosedError,
|
||||
"Transport closed, no more connections!", parent)
|
||||
|
||||
@@ -81,26 +81,25 @@ proc dial*(
|
||||
self.dial("", address)
|
||||
|
||||
method upgrade*(
|
||||
self: Transport,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
|
||||
self: Transport,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.base, async: (raises: [
|
||||
CancelledError, LPError], raw: true).} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
self.upgrader.upgrade(conn, direction, peerId)
|
||||
self.upgrader.upgrade(conn, peerId)
|
||||
|
||||
method handles*(
|
||||
self: Transport,
|
||||
address: MultiAddress): bool {.base, gcsafe.} =
|
||||
self: Transport,
|
||||
address: MultiAddress): bool {.base, gcsafe.} =
|
||||
## check if transport supports the multiaddress
|
||||
##
|
||||
|
||||
# by default we skip circuit addresses to avoid
|
||||
# having to repeat the check in every transport
|
||||
let protocols = address.protocols.valueOr: return false
|
||||
return protocols
|
||||
protocols
|
||||
.filterIt(
|
||||
it == multiCodec("p2p-circuit")
|
||||
).len == 0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -44,11 +44,12 @@ method initStream*(s: WsStream) =
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
proc new*(T: type WsStream,
|
||||
session: WSSession,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout = 10.minutes): T =
|
||||
proc new*(
|
||||
T: type WsStream,
|
||||
session: WSSession,
|
||||
dir: Direction,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout = 10.minutes): T =
|
||||
|
||||
let stream = T(
|
||||
session: session,
|
||||
@@ -63,18 +64,23 @@ template mapExceptions(body: untyped) =
|
||||
try:
|
||||
body
|
||||
except AsyncStreamIncompleteError:
|
||||
raise newLPStreamEOFError()
|
||||
raise newLPStreamIncompleteError()
|
||||
except AsyncStreamLimitError:
|
||||
raise newLPStreamLimitError()
|
||||
except AsyncStreamUseClosedError:
|
||||
raise newLPStreamEOFError()
|
||||
except WSClosedError:
|
||||
raise newLPStreamEOFError()
|
||||
except AsyncStreamLimitError:
|
||||
raise newLPStreamLimitError()
|
||||
except WebSocketError:
|
||||
raise newLPStreamEOFError()
|
||||
except CatchableError:
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
method readOnce*(
|
||||
s: WsStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[int] {.async.} =
|
||||
s: WsStream,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let res = mapExceptions(await s.session.recv(pbytes, nbytes))
|
||||
|
||||
if res == 0 and s.session.readyState == ReadyState.Closed:
|
||||
@@ -83,13 +89,17 @@ method readOnce*(
|
||||
return res
|
||||
|
||||
method write*(
|
||||
s: WsStream,
|
||||
msg: seq[byte]): Future[void] {.async.} =
|
||||
s: WsStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
mapExceptions(await s.session.send(msg, Opcode.Binary))
|
||||
s.activity = true # reset activity flag
|
||||
|
||||
method closeImpl*(s: WsStream): Future[void] {.async.} =
|
||||
await s.session.close()
|
||||
method closeImpl*(s: WsStream): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
await s.session.close()
|
||||
except CatchableError:
|
||||
discard
|
||||
await procCall Connection(s).closeImpl()
|
||||
|
||||
method getWrapped*(s: WsStream): Connection = nil
|
||||
@@ -136,7 +146,7 @@ method start*(
|
||||
if WSS.match(ma):
|
||||
if self.secure: true
|
||||
else:
|
||||
warn "Trying to listen on a WSS address without setting the certificate!"
|
||||
warn "Trying to listen on a WSS address without setting certificate!"
|
||||
false
|
||||
else: false
|
||||
|
||||
@@ -173,7 +183,7 @@ method start*(
|
||||
|
||||
self.running = true
|
||||
|
||||
method stop*(self: WsTransport) {.async, gcsafe.} =
|
||||
method stop*(self: WsTransport) {.async.} =
|
||||
## stop the transport
|
||||
##
|
||||
|
||||
@@ -237,7 +247,7 @@ proc connHandler(self: WsTransport,
|
||||
asyncSpawn onClose()
|
||||
return conn
|
||||
|
||||
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: WsTransport): Future[Connection] {.async.} =
|
||||
## accept a new WS connection
|
||||
##
|
||||
|
||||
@@ -295,7 +305,7 @@ method dial*(
|
||||
self: WsTransport,
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -25,55 +25,61 @@ type
|
||||
muxers*: seq[MuxerProvider]
|
||||
streamHandler*: StreamHandler
|
||||
|
||||
proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
|
||||
func getMuxerByCodec(
|
||||
self: MuxedUpgrade, muxerName: string): Opt[MuxerProvider] =
|
||||
if muxerName.len == 0 or muxerName == "na":
|
||||
return Opt.none(MuxerProvider)
|
||||
for m in self.muxers:
|
||||
if muxerName == m.codec:
|
||||
return m
|
||||
return Opt.some(m)
|
||||
Opt.none(MuxerProvider)
|
||||
|
||||
proc mux*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
direction: Direction): Future[Muxer] {.async, gcsafe.} =
|
||||
proc mux(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection
|
||||
): Future[Opt[Muxer]] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
## mux connection
|
||||
|
||||
trace "Muxing connection", conn
|
||||
if self.muxers.len == 0:
|
||||
warn "no muxers registered, skipping upgrade flow", conn
|
||||
return
|
||||
return Opt.none(Muxer)
|
||||
|
||||
let muxerName =
|
||||
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
||||
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
|
||||
|
||||
if muxerName.len == 0 or muxerName == "na":
|
||||
debug "no muxer available, early exit", conn
|
||||
return
|
||||
let
|
||||
muxerName =
|
||||
case conn.dir
|
||||
of Direction.Out:
|
||||
await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
||||
of Direction.In:
|
||||
await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
|
||||
muxerProvider = self.getMuxerByCodec(muxerName).valueOr:
|
||||
debug "no muxer available, early exit", conn, muxerName
|
||||
return Opt.none(Muxer)
|
||||
|
||||
trace "Found a muxer", conn, muxerName
|
||||
|
||||
# create new muxer for connection
|
||||
let muxer = self.getMuxerByCodec(muxerName).newMuxer(conn)
|
||||
let muxer = muxerProvider.newMuxer(conn)
|
||||
|
||||
# install stream handler
|
||||
muxer.streamHandler = self.streamHandler
|
||||
muxer.handler = muxer.handle()
|
||||
return muxer
|
||||
Opt.some(muxer)
|
||||
|
||||
method upgrade*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
|
||||
trace "Upgrading connection", conn, direction
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.async: (raises: [CancelledError, LPError]).} =
|
||||
trace "Upgrading connection", conn, direction = conn.dir
|
||||
|
||||
let sconn = await self.secure(conn, direction, peerId) # secure the connection
|
||||
if isNil(sconn):
|
||||
raise newException(UpgradeFailedError,
|
||||
let sconn = await self.secure(conn, peerId) # secure the connection
|
||||
if sconn == nil:
|
||||
raise (ref UpgradeFailedError)(msg:
|
||||
"unable to secure connection, stopping upgrade")
|
||||
|
||||
let muxer = await self.mux(sconn, direction) # mux it if possible
|
||||
if muxer == nil:
|
||||
raise newException(UpgradeFailedError,
|
||||
let muxer = (await self.mux(sconn)).valueOr: # mux it if possible
|
||||
raise (ref UpgradeFailedError)(msg:
|
||||
"a muxer is required for outgoing connections")
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
@@ -81,32 +87,28 @@ method upgrade*(
|
||||
|
||||
if sconn.closed():
|
||||
await sconn.close()
|
||||
raise newException(UpgradeFailedError,
|
||||
raise (ref UpgradeFailedError)(msg:
|
||||
"Connection closed or missing peer info, stopping upgrade")
|
||||
|
||||
trace "Upgraded connection", conn, sconn, direction
|
||||
return muxer
|
||||
trace "Upgraded connection", conn, sconn, direction = conn.dir
|
||||
muxer
|
||||
|
||||
proc new*(
|
||||
T: type MuxedUpgrade,
|
||||
muxers: seq[MuxerProvider],
|
||||
secureManagers: openArray[Secure] = [],
|
||||
ms: MultistreamSelect): T =
|
||||
|
||||
T: type MuxedUpgrade,
|
||||
muxers: seq[MuxerProvider],
|
||||
secureManagers: openArray[Secure] = [],
|
||||
ms: MultistreamSelect): T =
|
||||
let upgrader = T(
|
||||
muxers: muxers,
|
||||
secureManagers: @secureManagers,
|
||||
ms: ms)
|
||||
|
||||
upgrader.streamHandler = proc(conn: Connection)
|
||||
{.async, gcsafe, raises: [].} =
|
||||
upgrader.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
|
||||
trace "Starting stream handler", conn
|
||||
try:
|
||||
await upgrader.ms.handle(conn) # handle incoming connection
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in stream handler", conn, msg = exc.msg
|
||||
return
|
||||
finally:
|
||||
await conn.closeWithEOF()
|
||||
trace "Stream handler done", conn
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -24,8 +24,10 @@ import ../stream/connection,
|
||||
|
||||
export connmanager, connection, identify, secure, multistream
|
||||
|
||||
declarePublicCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades")
|
||||
declarePublicCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
|
||||
declarePublicCounter(libp2p_failed_upgrades_incoming,
|
||||
"incoming connections failed upgrades")
|
||||
declarePublicCounter(libp2p_failed_upgrades_outgoing,
|
||||
"outgoing connections failed upgrades")
|
||||
|
||||
logScope:
|
||||
topics = "libp2p upgrade"
|
||||
@@ -38,25 +40,28 @@ type
|
||||
secureManagers*: seq[Secure]
|
||||
|
||||
method upgrade*(
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Muxer] {.async: (raises: [
|
||||
CancelledError, LPError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc secure*(
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
direction: Direction,
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
|
||||
self: Upgrade,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPError]).} =
|
||||
if self.secureManagers.len <= 0:
|
||||
raise newException(UpgradeFailedError, "No secure managers registered!")
|
||||
raise (ref UpgradeFailedError)(msg: "No secure managers registered!")
|
||||
|
||||
let codec =
|
||||
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
|
||||
if conn.dir == Out:
|
||||
await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||
else:
|
||||
await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
|
||||
if codec.len == 0:
|
||||
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
|
||||
raise (ref UpgradeFailedError)(msg: "Unable to negotiate a secure channel!")
|
||||
|
||||
trace "Securing connection", conn, codec
|
||||
let secureProtocol = self.secureManagers.filterIt(it.codec == codec)
|
||||
@@ -65,4 +70,4 @@ proc secure*(
|
||||
# let's avoid duplicating checks but detect if it fails to do it properly
|
||||
doAssert(secureProtocol.len > 0)
|
||||
|
||||
return await secureProtocol[0].secure(conn, direction == Out, peerId)
|
||||
await secureProtocol[0].secure(conn, peerId)
|
||||
|
||||
@@ -89,28 +89,50 @@ template exceptionToAssert*(body: untyped): untyped =
|
||||
res
|
||||
|
||||
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
|
||||
if self.isSome:
|
||||
let value {.inject.} = self.get()
|
||||
## This template provides a convenient way to work with `Option` types in Nim.
|
||||
## It allows you to execute a block of code (`body`) only when the `Option` is not empty.
|
||||
##
|
||||
## `self` is the `Option` instance being checked.
|
||||
## `value` is the variable name to be used within the `body` for the unwrapped value.
|
||||
## `body` is a block of code that is executed only if `self` contains a value.
|
||||
##
|
||||
## The `value` within `body` is automatically unwrapped from the `Option`, making it
|
||||
## simpler to work with without needing explicit checks or unwrapping.
|
||||
##
|
||||
## Example:
|
||||
## ```nim
|
||||
## let myOpt = Opt.some(5)
|
||||
## myOpt.withValue(value):
|
||||
## echo value # Will print 5
|
||||
## ```
|
||||
##
|
||||
## Note: This is a template, and it will be inlined at the call site, offering good performance.
|
||||
let temp = (self)
|
||||
if temp.isSome:
|
||||
let value {.inject.} = temp.get()
|
||||
body
|
||||
|
||||
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =
|
||||
let elseBody = body2[0]
|
||||
macro withValue*[T](self: Opt[T] | Option[T], value, body, elseStmt: untyped): untyped =
|
||||
let elseBody = elseStmt[0]
|
||||
quote do:
|
||||
if `self`.isSome:
|
||||
let `value` {.inject.} = `self`.get()
|
||||
let temp = (`self`)
|
||||
if temp.isSome:
|
||||
let `value` {.inject.} = temp.get()
|
||||
`body`
|
||||
else:
|
||||
`elseBody`
|
||||
|
||||
template valueOr*[T](self: Option[T], body: untyped): untyped =
|
||||
if self.isSome:
|
||||
self.get()
|
||||
let temp = (self)
|
||||
if temp.isSome:
|
||||
temp.get()
|
||||
else:
|
||||
body
|
||||
|
||||
template toOpt*[T, E](self: Result[T, E]): Opt[T] =
|
||||
if self.isOk:
|
||||
let temp = (self)
|
||||
if temp.isOk:
|
||||
when T is void: Result[void, void].ok()
|
||||
else: Opt.some(self.unsafeGet())
|
||||
else: Opt.some(temp.unsafeGet())
|
||||
else:
|
||||
Opt.none(type(T))
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
import chronos, stew/endians2
|
||||
import multiaddress, multicodec, errors, utility
|
||||
|
||||
export multiaddress, chronos
|
||||
|
||||
when defined(windows):
|
||||
import winlean
|
||||
else:
|
||||
@@ -30,7 +32,6 @@ const
|
||||
UDP,
|
||||
)
|
||||
|
||||
|
||||
proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
|
||||
## Initialize ``TransportAddress`` with MultiAddress ``ma``.
|
||||
##
|
||||
@@ -76,7 +77,7 @@ proc connect*(
|
||||
child: StreamTransport = nil,
|
||||
flags = default(set[SocketFlags]),
|
||||
localAddress: Opt[MultiAddress] = Opt.none(MultiAddress)): Future[StreamTransport]
|
||||
{.raises: [LPError, MaInvalidAddress].} =
|
||||
{.async.} =
|
||||
## Open new connection to remote peer with address ``ma`` and create
|
||||
## new transport object ``StreamTransport`` for established connection.
|
||||
## ``bufferSize`` is size of internal buffer for transport.
|
||||
@@ -88,12 +89,12 @@ proc connect*(
|
||||
let transportAddress = initTAddress(ma).tryGet()
|
||||
|
||||
compilesOr:
|
||||
return connect(transportAddress, bufferSize, child,
|
||||
return await connect(transportAddress, bufferSize, child,
|
||||
if localAddress.isSome(): initTAddress(localAddress.expect("just checked")).tryGet() else: TransportAddress(),
|
||||
flags)
|
||||
do:
|
||||
# support for older chronos versions
|
||||
return connect(transportAddress, bufferSize, child)
|
||||
return await connect(transportAddress, bufferSize, child)
|
||||
|
||||
proc createStreamServer*[T](ma: MultiAddress,
|
||||
cbproc: StreamCallback,
|
||||
|
||||
@@ -89,6 +89,7 @@ build_target() {
|
||||
mkdir "$CACHE_DIR"
|
||||
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
|
||||
fi
|
||||
echo "Binary built successfully."
|
||||
}
|
||||
|
||||
if target_needs_rebuilding; then
|
||||
|
||||
@@ -5,21 +5,21 @@ export unittest2, chronos
|
||||
template asyncTeardown*(body: untyped): untyped =
|
||||
teardown:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
template asyncSetup*(body: untyped): untyped =
|
||||
setup:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
template asyncTest*(name: string, body: untyped): untyped =
|
||||
test name:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
|
||||
@@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
|
||||
inc attemptNumber
|
||||
try:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
proc() {.async.} =
|
||||
body
|
||||
)())
|
||||
except Exception as e:
|
||||
|
||||
@@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
|
||||
buf.finish()
|
||||
result = s.write(buf.buffer)
|
||||
|
||||
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
|
||||
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
## read length prefixed msg
|
||||
var
|
||||
size: uint
|
||||
|
||||
@@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
|
||||
let transport2 = transpProvider()
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
if conn.observedAddr.isSome():
|
||||
check transport1.handles(conn.observedAddr.get())
|
||||
@@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
await conn.write("Hello!")
|
||||
await conn.close()
|
||||
@@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
var msg = newSeq[byte](6)
|
||||
await conn.readExactly(addr msg[0], 6)
|
||||
@@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(addrs)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
while true:
|
||||
let conn = await transport1.accept()
|
||||
await conn.write(newSeq[byte](0))
|
||||
@@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
||||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
proc acceptHandler() {.async.} =
|
||||
let conn = await transport1.accept()
|
||||
await conn.close()
|
||||
|
||||
|
||||
29
tests/errorhelpers.nim
Normal file
29
tests/errorhelpers.nim
Normal file
@@ -0,0 +1,29 @@
|
||||
import
|
||||
std/sequtils,
|
||||
chronos
|
||||
|
||||
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
|
||||
# This proc is only meant for use in tests / not suitable for general use.
|
||||
# - Swallowing errors arbitrarily instead of aggregating them is bad design
|
||||
# - It raises `CatchableError` instead of the union of the `futs` errors,
|
||||
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
|
||||
let futs = @args
|
||||
(proc() {.async: (raises: [CatchableError]).} =
|
||||
await allFutures(futs)
|
||||
var firstErr: ref CatchableError
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.error()
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if firstErr == nil:
|
||||
firstErr = err
|
||||
if firstErr != nil:
|
||||
raise firstErr)()
|
||||
|
||||
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
|
||||
proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432
|
||||
futs: varargs[InternalRaisesFuture[T, E]]): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
@@ -1,6 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import macros
|
||||
import algorithm
|
||||
|
||||
import ../libp2p/transports/tcptransport
|
||||
@@ -13,8 +14,8 @@ import ../libp2p/protocols/secure/secure
|
||||
import ../libp2p/switch
|
||||
import ../libp2p/nameresolving/[nameresolver, mockresolver]
|
||||
|
||||
import ./asyncunit
|
||||
export asyncunit, mockresolver
|
||||
import "."/[asyncunit, errorhelpers]
|
||||
export asyncunit, errorhelpers, mockresolver
|
||||
|
||||
const
|
||||
StreamTransportTrackerName = "stream.transport"
|
||||
@@ -34,25 +35,19 @@ const
|
||||
ChronosStreamTrackerName
|
||||
]
|
||||
|
||||
iterator testTrackers*(extras: openArray[string] = []): TrackerBase =
|
||||
for name in trackerNames:
|
||||
let t = getTracker(name)
|
||||
if not isNil(t): yield t
|
||||
for name in extras:
|
||||
let t = getTracker(name)
|
||||
if not isNil(t): yield t
|
||||
|
||||
template checkTracker*(name: string) =
|
||||
var tracker = getTracker(name)
|
||||
if tracker.isLeaked():
|
||||
checkpoint tracker.dump()
|
||||
if isCounterLeaked(name):
|
||||
let
|
||||
tracker = getTrackerCounter(name)
|
||||
trackerDescription =
|
||||
"Opened " & name & ": " & $tracker.opened & "\n" &
|
||||
"Closed " & name & ": " & $tracker.closed
|
||||
checkpoint trackerDescription
|
||||
fail()
|
||||
|
||||
template checkTrackers*() =
|
||||
for tracker in testTrackers():
|
||||
if tracker.isLeaked():
|
||||
checkpoint tracker.dump()
|
||||
fail()
|
||||
for name in trackerNames:
|
||||
checkTracker(name)
|
||||
# Also test the GC is not fooling with us
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.push warning[BareExcept]:off.}
|
||||
@@ -81,11 +76,18 @@ template rng*(): ref HmacDrbgContext =
|
||||
getRng()
|
||||
|
||||
type
|
||||
WriteHandler* = proc(data: seq[byte]): Future[void] {.gcsafe, raises: [].}
|
||||
WriteHandler* = proc(
|
||||
data: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).}
|
||||
|
||||
TestBufferStream* = ref object of BufferStream
|
||||
writeHandler*: WriteHandler
|
||||
|
||||
method write*(s: TestBufferStream, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: TestBufferStream,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
s.writeHandler(msg)
|
||||
|
||||
method getWrapped*(s: TestBufferStream): Connection = nil
|
||||
@@ -103,26 +105,94 @@ proc bridgedConnections*: (Connection, Connection) =
|
||||
connB.dir = Direction.In
|
||||
connA.initStream()
|
||||
connB.initStream()
|
||||
connA.writeHandler = proc(data: seq[byte]) {.async.} =
|
||||
await connB.pushData(data)
|
||||
connA.writeHandler =
|
||||
proc(data: seq[byte]) {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
connB.pushData(data)
|
||||
|
||||
connB.writeHandler = proc(data: seq[byte]) {.async.} =
|
||||
await connA.pushData(data)
|
||||
connB.writeHandler =
|
||||
proc(data: seq[byte]) {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
connA.pushData(data)
|
||||
return (connA, connB)
|
||||
|
||||
|
||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
if Moment.now() > (start + chronos.seconds(5)):
|
||||
return false
|
||||
elif cond():
|
||||
return true
|
||||
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
|
||||
## Periodically checks a given condition until it is true or a timeout occurs.
|
||||
##
|
||||
## `code`: untyped - A condition expression that should eventually evaluate to true.
|
||||
## `timeout`: Duration - The maximum duration to wait for the condition to be true.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(2.seconds):
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilCustomTimeout(5.seconds)::
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
## ```
|
||||
# Helper proc to recursively build a combined boolean expression
|
||||
proc buildAndExpr(n: NimNode): NimNode =
|
||||
if n.kind == nnkStmtList and n.len > 0:
|
||||
var combinedExpr = n[0] # Start with the first expression
|
||||
for i in 1..<n.len:
|
||||
# Combine the current expression with the next using 'and'
|
||||
combinedExpr = newCall("and", combinedExpr, n[i])
|
||||
return combinedExpr
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
return n
|
||||
|
||||
template checkExpiring*(code: untyped): untyped =
|
||||
check await checkExpiringInternal(proc(): bool = code)
|
||||
# Build the combined expression
|
||||
let combinedBoolExpr = buildAndExpr(code)
|
||||
|
||||
result = quote do:
|
||||
proc checkExpiringInternal(): Future[void] {.gensym, async.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
if Moment.now() > (start + `timeout`):
|
||||
checkpoint("[TIMEOUT] Timeout was reached and the conditions were not true. Check if the code is working as " &
|
||||
"expected or consider increasing the timeout param.")
|
||||
check `code`
|
||||
return
|
||||
else:
|
||||
if `combinedBoolExpr`:
|
||||
return
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
await checkExpiringInternal()
|
||||
|
||||
macro checkUntilTimeout*(code: untyped): untyped =
|
||||
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
|
||||
##
|
||||
## Examples:
|
||||
## ```nim
|
||||
## # Example 1:
|
||||
## asyncTest "checkUntilTimeout should pass if the condition is true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilTimeout:
|
||||
## a == b
|
||||
##
|
||||
## # Example 2: Multiple conditions
|
||||
## asyncTest "checkUntilTimeout should pass if the conditions are true":
|
||||
## let a = 2
|
||||
## let b = 2
|
||||
## checkUntilTimeout:
|
||||
## a == b
|
||||
## a == 2
|
||||
## b == 1
|
||||
## ```
|
||||
result = quote do:
|
||||
checkUntilCustomTimeout(10.seconds, `code`)
|
||||
|
||||
proc unorderedCompare*[T](a, b: seq[T]): bool =
|
||||
if a == b:
|
||||
@@ -146,8 +216,8 @@ proc default*(T: typedesc[MockResolver]): T =
|
||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||
resolver
|
||||
|
||||
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
proc setDNSAddr*(switch: Switch) {.async.} =
|
||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
||||
switch.peerInfo.addressMappers.add(addressMapper)
|
||||
await switch.peerInfo.update()
|
||||
|
||||
17
tests/hole-punching-interop/Dockerfile
Normal file
17
tests/hole-punching-interop/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bookworm-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
|
||||
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||
ENV RUST_BACKTRACE=1
|
||||
115
tests/hole-punching-interop/hole_punching.nim
Normal file
115
tests/hole-punching-interop/hole_punching.nim
Normal file
@@ -0,0 +1,115 @@
|
||||
import std/[os, options, strformat]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import ../../libp2p/[builders,
|
||||
switch,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping]
|
||||
import ../stubs/autonatclientstub
|
||||
import ../errorhelpers
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng=rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
try:
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
debug "Got relay address", relayMA
|
||||
let relayId = await switch.connect(relayMA)
|
||||
debug "Connected to relay", relayId
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while switch.peerInfo.addrs.len == 0:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
quit(0)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", msg = e.msg
|
||||
|
||||
discard waitFor(main().withTimeout(4.minutes))
|
||||
quit(1)
|
||||
7
tests/hole-punching-interop/version.json
Normal file
7
tests/hole-punching-interop/version.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp"
|
||||
]
|
||||
}
|
||||
@@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||
proc waitSub(sender, receiver: auto; key: string) {.async.} =
|
||||
# turn things deterministic
|
||||
# this is for testing purposes only
|
||||
var ceil = 15
|
||||
@@ -43,7 +43,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub basic publish/subscribe A -> B":
|
||||
var completionFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
@@ -81,7 +81,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub basic publish/subscribe B -> A":
|
||||
var completionFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
completionFut.complete(true)
|
||||
|
||||
@@ -113,7 +113,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -151,7 +151,7 @@ suite "FloodSub":
|
||||
await allFuturesThrowing(nodesFut)
|
||||
|
||||
asyncTest "FloodSub validation should fail":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -186,7 +186,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -235,7 +235,7 @@ suite "FloodSub":
|
||||
counter = new int
|
||||
futs[i] = (
|
||||
fut,
|
||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
inc counter[]
|
||||
if counter[] == runs - 1:
|
||||
@@ -283,7 +283,7 @@ suite "FloodSub":
|
||||
counter = new int
|
||||
futs[i] = (
|
||||
fut,
|
||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
inc counter[]
|
||||
if counter[] == runs - 1:
|
||||
@@ -333,7 +333,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub message size validation":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check data.len < 50
|
||||
inc(messageReceived)
|
||||
|
||||
@@ -361,7 +361,7 @@ suite "FloodSub":
|
||||
check (await smallNode[0].publish("foo", smallMessage1)) > 0
|
||||
check (await bigNode[0].publish("foo", smallMessage2)) > 0
|
||||
|
||||
checkExpiring: messageReceived == 2
|
||||
checkUntilTimeout: messageReceived == 2
|
||||
|
||||
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
||||
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
||||
@@ -375,7 +375,7 @@ suite "FloodSub":
|
||||
|
||||
asyncTest "FloodSub message size validation 2":
|
||||
var messageReceived = 0
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
inc(messageReceived)
|
||||
|
||||
let
|
||||
@@ -396,7 +396,7 @@ suite "FloodSub":
|
||||
|
||||
check (await bigNode1[0].publish("foo", bigMessage)) > 0
|
||||
|
||||
checkExpiring: messageReceived == 1
|
||||
checkUntilTimeout: messageReceived == 1
|
||||
|
||||
await allFuturesThrowing(
|
||||
bigNode1[0].switch.stop(),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -24,7 +24,8 @@ import utils
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
@@ -524,6 +525,17 @@ suite "GossipSub internal":
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
@@ -680,7 +692,7 @@ suite "GossipSub internal":
|
||||
)
|
||||
peer.iHaveBudget = 0
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check: iwants.messageIds.len == 0
|
||||
check: iwants.messageIDs.len == 0
|
||||
|
||||
block:
|
||||
# given duplicate ihave should generate only one iwant
|
||||
@@ -695,7 +707,7 @@ suite "GossipSub internal":
|
||||
messageIDs: @[id, id, id]
|
||||
)
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check: iwants.messageIds.len == 1
|
||||
check: iwants.messageIDs.len == 1
|
||||
|
||||
block:
|
||||
# given duplicate iwant should generate only one message
|
||||
@@ -718,104 +730,6 @@ suite "GossipSub internal":
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "two IHAVEs should generate only one IWANT":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
var iwantCount = 0
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Setup two connections and two peers
|
||||
var ihaveMessageId: string
|
||||
var firstPeer: PubSubPeer
|
||||
let seqno = @[0'u8, 1, 2, 3]
|
||||
for i in 0..<2:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
if isNil(firstPeer):
|
||||
firstPeer = peer
|
||||
ihaveMessageId = byteutils.toHex(seqno) & $firstPeer.peerId
|
||||
peer.handler = handler
|
||||
|
||||
# Simulate that each peer sends an IHAVE message to our node
|
||||
let msg = ControlIHave(
|
||||
topicID: topic,
|
||||
messageIDs: @[ihaveMessageId.toBytes()]
|
||||
)
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
if iwants.messageIds.len > 0:
|
||||
iwantCount += 1
|
||||
|
||||
# Verify that our node responds with only one IWANT message
|
||||
check: iwantCount == 1
|
||||
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
||||
|
||||
# Simulate that our node receives the RPCMsg in response to the IWANT
|
||||
let actualMessageData = "Hello, World!".toBytes
|
||||
let rpcMsg = RPCMsg(
|
||||
messages: @[Message(
|
||||
fromPeer: firstPeer.peerId,
|
||||
seqno: seqno,
|
||||
data: actualMessageData
|
||||
)]
|
||||
)
|
||||
await gossipSub.rpcHandler(firstPeer, encodeRpcMsg(rpcMsg, false))
|
||||
|
||||
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handle unanswered IWANT messages":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.heartbeatInterval = 50.milliseconds
|
||||
gossipSub.parameters.iwantTimeout = 10.milliseconds
|
||||
await gossipSub.start()
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} = discard
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Setup a connection and a peer
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# Simulate that the peer sends an IHAVE message to our node
|
||||
let ihaveMessageId = @[0'u8, 1, 2, 3]
|
||||
let ihaveMsg = ControlIHave(
|
||||
topicID: topic,
|
||||
messageIDs: @[ihaveMessageId]
|
||||
)
|
||||
discard gossipSub.handleIHave(peer, @[ihaveMsg])
|
||||
|
||||
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId)
|
||||
check: peer.behaviourPenalty == 0.0
|
||||
|
||||
await sleepAsync(60.milliseconds)
|
||||
|
||||
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId)
|
||||
check: peer.behaviourPenalty == 0.1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
@@ -828,10 +742,10 @@ suite "GossipSub internal":
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
@@ -876,10 +790,10 @@ suite "GossipSub internal":
|
||||
let (iwantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
|
||||
))))
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
||||
))), isHighPriority = false)
|
||||
|
||||
checkExpiring: receivedMessages[] == sentMessages
|
||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
@@ -893,11 +807,11 @@ suite "GossipSub internal":
|
||||
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
))), isHighPriority = false)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkExpiring: receivedMessages[].len == 0
|
||||
checkUntilTimeout: receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
@@ -910,10 +824,10 @@ suite "GossipSub internal":
|
||||
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
))), isHighPriority = false)
|
||||
|
||||
checkExpiring: receivedMessages[] == sentMessages
|
||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
@@ -928,8 +842,8 @@ suite "GossipSub internal":
|
||||
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||
))))
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
))), isHighPriority = false)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
@@ -938,7 +852,7 @@ suite "GossipSub internal":
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkExpiring: receivedMessages[] == smallestSet
|
||||
checkUntilTimeout: receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
@@ -47,7 +47,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -92,7 +92,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub validation should fail (reject)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -138,7 +138,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub validation should fail (ignore)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let
|
||||
@@ -185,7 +185,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -238,7 +238,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -289,7 +289,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let
|
||||
@@ -310,9 +310,9 @@ suite "GossipSub":
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkExpiring:
|
||||
"foobar" in gossip2.topics and
|
||||
"foobar" in gossip1.gossipsub and
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -323,7 +323,7 @@ suite "GossipSub":
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let
|
||||
@@ -374,7 +374,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
@@ -428,7 +428,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
@@ -454,9 +454,9 @@ suite "GossipSub":
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkExpiring:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0 and
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0 and
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
@@ -481,7 +481,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
@@ -548,11 +548,11 @@ suite "GossipSub":
|
||||
var
|
||||
aReceived = 0
|
||||
cReceived = 0
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
inc aReceived
|
||||
check aReceived < 2
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
inc cReceived
|
||||
check cReceived < 2
|
||||
cRelayed.complete()
|
||||
@@ -569,19 +569,19 @@ suite "GossipSub":
|
||||
proc slowValidator(topic: string, message: Message): Future[ValidationResult] {.async.} =
|
||||
await cRelayed
|
||||
# Empty A & C caches to detect duplicates
|
||||
gossip1.seen = TimedCache[MessageId].init()
|
||||
gossip3.seen = TimedCache[MessageId].init()
|
||||
gossip1.seen = TimedCache[SaltedId].init()
|
||||
gossip3.seen = TimedCache[SaltedId].init()
|
||||
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||
checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||
checkUntilTimeout(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||
result = ValidationResult.Accept
|
||||
bFinished.complete()
|
||||
|
||||
nodes[1].addValidator("foobar", slowValidator)
|
||||
|
||||
checkExpiring(
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2 and
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2 and
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2)
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
|
||||
|
||||
await bFinished
|
||||
@@ -596,7 +596,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
@@ -653,7 +653,7 @@ suite "GossipSub":
|
||||
)
|
||||
|
||||
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for node in nodes:
|
||||
@@ -661,7 +661,7 @@ suite "GossipSub":
|
||||
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
block setup:
|
||||
@@ -676,7 +676,7 @@ suite "GossipSub":
|
||||
|
||||
# Now try with a mesh
|
||||
gossip1.subscribe("foobar", handler)
|
||||
checkExpiring: gossip1.mesh.peers("foobar") > 5
|
||||
checkUntilTimeout: gossip1.mesh.peers("foobar") > 5
|
||||
|
||||
# use a different length so that the message is not equal to the last
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
|
||||
@@ -727,7 +727,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -778,7 +778,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
capture dialer, i:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -819,7 +819,7 @@ suite "GossipSub":
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard # not used in this test
|
||||
|
||||
let
|
||||
@@ -895,9 +895,9 @@ suite "GossipSub":
|
||||
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
@@ -911,15 +911,15 @@ suite "GossipSub":
|
||||
check: gossip3.mesh.peers("foobar") == 1
|
||||
|
||||
gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
|
||||
))))
|
||||
checkExpiring: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
||||
idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])]
|
||||
))), isHighPriority = true)
|
||||
checkUntilTimeout: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
checkExpiring: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
|
||||
checkUntilTimeout: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
|
||||
check: toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 0)
|
||||
|
||||
await allFuturesThrowing(
|
||||
@@ -943,7 +943,7 @@ suite "GossipSub":
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} = discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
@@ -952,6 +952,10 @@ suite "GossipSub":
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
@@ -964,8 +968,11 @@ suite "GossipSub":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: "Valid data".toBytes)])
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
@@ -973,9 +980,14 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
@@ -986,8 +998,7 @@ suite "GossipSub":
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
let msg = newSeqWith[byte](30, 1.byte)
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(msg)
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte), isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
@@ -995,9 +1006,9 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(msg)
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte), isHighPriority = true)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
@@ -1008,11 +1019,10 @@ suite "GossipSub":
|
||||
|
||||
let msg = RPCMsg(control: some(ControlMessage(prune: @[
|
||||
ControlPrune(topicID: "foobar", peers: @[
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](30)))
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
|
||||
], backoff: 123'u64)
|
||||
])))
|
||||
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
@@ -1020,9 +1030,48 @@ suite "GossipSub":
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||
let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
|
||||
ControlPrune(topicID: "foobar", peers: @[
|
||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
|
||||
], backoff: 123'u64)
|
||||
])))
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await sleepAsync(300.millis)
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
@@ -59,7 +59,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -93,7 +93,7 @@ suite "GossipSub":
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
@@ -155,7 +155,7 @@ suite "GossipSub":
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await subscribeNodes(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} = discard
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
@@ -182,10 +182,10 @@ suite "GossipSub":
|
||||
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
@@ -226,7 +226,7 @@ suite "GossipSub":
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
@@ -272,7 +272,7 @@ suite "GossipSub":
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
@@ -324,7 +324,7 @@ suite "GossipSub":
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0..<runs:
|
||||
@@ -368,7 +368,7 @@ suite "GossipSub":
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
{.used.}
|
||||
|
||||
import unittest2, options, sets, sequtils
|
||||
import unittest2, sequtils
|
||||
import stew/byteutils
|
||||
import ../../libp2p/[peerid,
|
||||
crypto/crypto,
|
||||
protocols/pubsub/mcache,
|
||||
protocols/pubsub/rpc/messages]
|
||||
import ./utils
|
||||
protocols/pubsub/rpc/message]
|
||||
|
||||
var rng = newRng()
|
||||
|
||||
@@ -27,48 +26,48 @@ suite "MCache":
|
||||
var mCache = MCache.init(3, 5)
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["foo"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "foo")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
for i in 0..<5:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["bar"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "bar")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
var mids = mCache.window("foo")
|
||||
check mids.len == 3
|
||||
|
||||
var id = toSeq(mids)[0]
|
||||
check mCache.get(id).get().topicIds[0] == "foo"
|
||||
check mCache.get(id).get().topic == "foo"
|
||||
|
||||
test "shift - shift 1 window at a time":
|
||||
var mCache = MCache.init(1, 5)
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["foo"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "foo")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
mCache.shift()
|
||||
check mCache.window("foo").len == 0
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["bar"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "bar")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
mCache.shift()
|
||||
check mCache.window("bar").len == 0
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["baz"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "baz")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
mCache.shift()
|
||||
@@ -78,21 +77,21 @@ suite "MCache":
|
||||
var mCache = MCache.init(1, 5)
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["foo"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "foo")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["bar"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "bar")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
for i in 0..<3:
|
||||
var msg = Message(fromPeer: randomPeerId(),
|
||||
seqno: "12345".toBytes(),
|
||||
topicIDs: @["baz"])
|
||||
var
|
||||
msg =
|
||||
Message(fromPeer: randomPeerId(), seqno: "12345".toBytes(), topic: "baz")
|
||||
mCache.put(defaultMsgIdProvider(msg).expect(MsgIdGenSuccess), msg)
|
||||
|
||||
mCache.shift()
|
||||
|
||||
@@ -75,14 +75,17 @@ suite "Message":
|
||||
msgIdResult.error == ValidationResult.Reject
|
||||
|
||||
test "byteSize for RPCMsg":
|
||||
var msg = Message(
|
||||
fromPeer: PeerId(data: @['a'.byte, 'b'.byte]), # 2 bytes
|
||||
data: @[1'u8, 2, 3], # 3 bytes
|
||||
seqno: @[4'u8, 5], # 2 bytes
|
||||
signature: @['c'.byte, 'd'.byte], # 2 bytes
|
||||
key: @[6'u8, 7], # 2 bytes
|
||||
topicIds: @["abc", "defgh"] # 3 + 5 = 8 bytes
|
||||
)
|
||||
var
|
||||
msg =
|
||||
Message(
|
||||
fromPeer: PeerId(data: @['a'.byte, 'b'.byte]), # 2 bytes
|
||||
data: @[1'u8, 2, 3], # 3 bytes
|
||||
seqno: @[4'u8, 5], # 2 bytes
|
||||
signature: @['c'.byte, 'd'.byte], # 2 bytes
|
||||
key: @[6'u8, 7], # 2 bytes
|
||||
topic: "abcde" # 5 bytes
|
||||
,
|
||||
)
|
||||
|
||||
var peerInfo = PeerInfoMsg(
|
||||
peerId: PeerId(data: @['e'.byte]), # 1 byte
|
||||
@@ -90,20 +93,20 @@ suite "Message":
|
||||
)
|
||||
|
||||
var controlIHave = ControlIHave(
|
||||
topicId: "ijk", # 3 bytes
|
||||
messageIds: @[ @['l'.byte], @['m'.byte, 'n'.byte] ] # 1 + 2 = 3 bytes
|
||||
topicID: "ijk", # 3 bytes
|
||||
messageIDs: @[ @['l'.byte], @['m'.byte, 'n'.byte] ] # 1 + 2 = 3 bytes
|
||||
)
|
||||
|
||||
var controlIWant = ControlIWant(
|
||||
messageIds: @[ @['o'.byte, 'p'.byte], @['q'.byte] ] # 2 + 1 = 3 bytes
|
||||
messageIDs: @[ @['o'.byte, 'p'.byte], @['q'.byte] ] # 2 + 1 = 3 bytes
|
||||
)
|
||||
|
||||
var controlGraft = ControlGraft(
|
||||
topicId: "rst" # 3 bytes
|
||||
topicID: "rst" # 3 bytes
|
||||
)
|
||||
|
||||
var controlPrune = ControlPrune(
|
||||
topicId: "uvw", # 3 bytes
|
||||
topicID: "uvw", # 3 bytes
|
||||
peers: @[peerInfo, peerInfo], # (1 + 2) * 2 = 6 bytes
|
||||
backoff: 12345678 # 8 bytes for uint64
|
||||
)
|
||||
@@ -118,10 +121,10 @@ suite "Message":
|
||||
|
||||
var rpcMsg = RPCMsg(
|
||||
subscriptions: @[SubOpts(subscribe: true, topic: "a".repeat(12)), SubOpts(subscribe: false, topic: "b".repeat(14))], # 1 + 12 + 1 + 14 = 28 bytes
|
||||
messages: @[msg, msg], # 19 * 2 = 38 bytes
|
||||
messages: @[msg, msg], # 16 * 2 = 32 bytes
|
||||
ping: @[1'u8, 2], # 2 bytes
|
||||
pong: @[3'u8, 4], # 2 bytes
|
||||
control: some(control) # 12 + 3 + 3 + 17 + 3 = 38 bytes
|
||||
)
|
||||
|
||||
check byteSize(rpcMsg) == 28 + 38 + 2 + 2 + 38 # Total: 108 bytes
|
||||
check byteSize(rpcMsg) == 28 + 32 + 2 + 2 + 38 # Total: 102 bytes
|
||||
|
||||
@@ -24,6 +24,8 @@ suite "TimedCache":
|
||||
2 in cache
|
||||
3 in cache
|
||||
|
||||
cache.addedAt(2) == now + 3.seconds
|
||||
|
||||
check:
|
||||
cache.put(2, now + 7.seconds) # refreshes 2
|
||||
not cache.put(4, now + 12.seconds) # expires 3
|
||||
@@ -33,6 +35,23 @@ suite "TimedCache":
|
||||
3 notin cache
|
||||
4 in cache
|
||||
|
||||
check:
|
||||
cache.del(4).isSome()
|
||||
4 notin cache
|
||||
|
||||
check:
|
||||
not cache.put(100, now + 100.seconds) # expires everything
|
||||
100 in cache
|
||||
2 notin cache
|
||||
|
||||
test "enough items to force cache heap storage growth":
|
||||
var cache = TimedCache[int].init(5.seconds)
|
||||
|
||||
let now = Moment.now()
|
||||
for i in 101..100000:
|
||||
check:
|
||||
not cache.put(i, now)
|
||||
|
||||
for i in 101..100000:
|
||||
check:
|
||||
i in cache
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user