mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 12:28:18 -05:00
Compare commits
14 Commits
unused-dep
...
test-inter
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75b05fa6ba | ||
|
|
9997f3e3d3 | ||
|
|
4d0b4ecc22 | ||
|
|
ccb24b5f1f | ||
|
|
5cb493439d | ||
|
|
24b284240a | ||
|
|
b0f77d24f9 | ||
|
|
e32ac492d3 | ||
|
|
470a7f8cc5 | ||
|
|
b269fce289 | ||
|
|
bc4febe92c | ||
|
|
b5f9bfe0f4 | ||
|
|
4ce1e8119b | ||
|
|
65136b38e2 |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @vacp2p/p2p
|
||||
12
.github/workflows/auto_assign_pr.yml
vendored
12
.github/workflows/auto_assign_pr.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: Auto Assign PR to Creator
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
assign_creator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
120
.github/workflows/ci.yml
vendored
120
.github/workflows/ci.yml
vendored
@@ -1,120 +0,0 @@
|
||||
name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: linux-gcc-14
|
||||
cpu: amd64
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: macos-14
|
||||
cpu: arm64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
nim:
|
||||
- ref: version-1-6
|
||||
memory_management: refc
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
include:
|
||||
- platform:
|
||||
os: linux
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- platform:
|
||||
os: linux-gcc-14
|
||||
builder: ubuntu-24.04
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos-14
|
||||
builder: macos-14
|
||||
shell: bash
|
||||
- platform:
|
||||
os: windows
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
cpu: ${{ matrix.platform.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Use gcc 14
|
||||
if : ${{ matrix.platform.os == 'linux-gcc-14'}}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
70
.github/workflows/coverage.yml
vendored
70
.github/workflows/coverage.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
# On push to common branches, this computes the coverage that PRs will use for diff
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
codecov:
|
||||
name: Run coverage and upload to codecov
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CICOV: YES
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: linux
|
||||
cpu: amd64
|
||||
shell: bash
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Setup coverage
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
|
||||
- name: Run test suite with coverage flags
|
||||
run: |
|
||||
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
nimble testnative
|
||||
nimble testpubsub
|
||||
nimble testfilter
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
find nimcache -name *.c -delete
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/output
|
||||
|
||||
- name: Upload coverage to codecov
|
||||
run: |
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
14
.github/workflows/daily_amd64.yml
vendored
14
.github/workflows/daily_amd64.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Daily amd64
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily amd64
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
101
.github/workflows/daily_common.yml
vendored
101
.github/workflows/daily_common.yml
vendored
@@ -1,101 +0,0 @@
|
||||
name: Daily Common
|
||||
# Serves as base workflow for daily tasks, it's not run by itself.
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
use_sat_solver:
|
||||
description: 'Install dependencies with SAT Solver'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
nim: ${{ fromJSON(inputs.nim) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_ref: ${{ matrix.nim.ref }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.16.0'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
|
||||
dependency_solver="sat"
|
||||
else
|
||||
dependency_solver="legacy"
|
||||
fi
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
nimble test
|
||||
14
.github/workflows/daily_devel.yml
vendored
14
.github/workflows/daily_devel.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Daily Nim Devel
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_nim_devel:
|
||||
name: Daily Nim Devel
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['amd64']"
|
||||
15
.github/workflows/daily_i386.yml
vendored
15
.github/workflows/daily_i386.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Daily i386
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_i386:
|
||||
name: Daily i386 (Linux)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
15
.github/workflows/daily_sat.yml
vendored
15
.github/workflows/daily_sat.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Daily SAT
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily SAT
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
use_sat_solver: true
|
||||
53
.github/workflows/dependencies.yml
vendored
53
.github/workflows/dependencies.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
bumper:
|
||||
# Pushes new refs to interested external repositories, so they can do early testing against libp2p's newer versions
|
||||
runs-on: ubuntu-latest
|
||||
name: Bump libp2p's version for ${{ matrix.target.repository }}:${{ matrix.target.ref }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
token: ${{ secrets.ACTIONS_GITHUB_NIMBUS_ETH2 }}
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_NWAKU }}
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
token: ${{ secrets.ACTIONS_GITHUB_NIM_CODEX }}
|
||||
steps:
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ matrix.target.repository }}
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ matrix.target.token }}
|
||||
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Push this ref to target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
|
||||
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
|
||||
111
.github/workflows/documentation.yml
vendored
111
.github/workflows/documentation.yml
vendored
@@ -1,111 +0,0 @@
|
||||
name: Documentation Generation And Publishing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 20
|
||||
|
||||
name: 'Generate & upload documentation'
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: '1.6.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install_pinned
|
||||
# nim doc can "fail", but the doc is still generated
|
||||
nim doc --git.url:https://github.com/vacp2p/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true
|
||||
|
||||
# check that the folder exists
|
||||
ls ${GITHUB_REF##*/}
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
||||
path: subdoc
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push
|
||||
run: |
|
||||
cd subdoc
|
||||
|
||||
# Update / create this branch doc
|
||||
rm -rf ${GITHUB_REF##*/}
|
||||
mv ../${GITHUB_REF##*/} .
|
||||
|
||||
# Remove .idx files
|
||||
# NOTE: git also uses idx files in his
|
||||
# internal folder, hence the `*` instead of `.`
|
||||
find * -name "*.idx" -delete
|
||||
git add .
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit -a -m "update docs for ${GITHUB_REF##*/}"
|
||||
git push origin gh-pages
|
||||
|
||||
update_site:
|
||||
name: 'Rebuild website'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
|
||||
- name: Generate website
|
||||
run: pip install mkdocs-material && nimble -y website
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
||||
path: subdoc
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit & push
|
||||
run: |
|
||||
cd subdoc
|
||||
|
||||
# Ensure the latest changes are fetched and reset to the remote branch
|
||||
git fetch origin gh-pages
|
||||
git reset --hard origin/gh-pages
|
||||
|
||||
rm -rf docs
|
||||
mv ../site docs
|
||||
|
||||
git add .
|
||||
|
||||
if git diff-index --quiet HEAD --; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name "${{ github.actor }}"
|
||||
|
||||
git commit -m "update website"
|
||||
git push origin gh-pages
|
||||
fi
|
||||
60
.github/workflows/examples.yml
vendored
60
.github/workflows/examples.yml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Examples
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
examples:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
name: "Build Examples"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
shell: bash
|
||||
os: linux
|
||||
cpu: amd64
|
||||
nim_ref: version-1-6
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Build and run examples
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble examples
|
||||
27
.github/workflows/linters.yml
vendored
27
.github/workflows/linters.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: Linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
nph:
|
||||
name: NPH
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||
|
||||
- name: Check `nph` formatting
|
||||
uses: arnetheduck/nph-action@v1
|
||||
with:
|
||||
version: 0.6.1
|
||||
options: "examples libp2p tests tools *.nim*"
|
||||
fail: true
|
||||
suggest: true
|
||||
35
.github/workflows/pr_lint.yml
vendored
35
.github/workflows/pr_lint.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: "Conventional Commits"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
jobs:
|
||||
main:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
id: lint_pr_title
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: marocchino/sticky-pull-request-comment@v2
|
||||
# When the previous steps fails, the workflow would stop. By adding this
|
||||
# condition you can continue the execution with the populated error message.
|
||||
if: always() && (steps.lint_pr_title.outputs.error_message != null)
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
message: |
|
||||
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
|
||||
# Delete a previous comment when the issue has been resolved
|
||||
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
|
||||
uses: marocchino/sticky-pull-request-comment@v2
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
delete: true
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -18,9 +18,10 @@ nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
# Ignore all test build files in tests folder (auto generated when running tests),
|
||||
# by ignoring anything that does not have following file name scheme:
|
||||
# has extension or is Dockerfile...
|
||||
/tests/*
|
||||
!/tests/*.*
|
||||
!/tests/Dockerfile
|
||||
# Ignore all test build files in tests folder (auto generated when running tests).
|
||||
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
|
||||
# Second and third rules are here to un-ignores all files with extension and Docker file,
|
||||
# because it appears that vs code is skipping text search is some tests files without these rules.
|
||||
tests/**/test*[^.]*
|
||||
!tests/**/*.*
|
||||
!tests/**/Dockerfile
|
||||
@@ -149,6 +149,7 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.9.0"
|
||||
version = "1.10.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -10,7 +10,7 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2",
|
||||
"websock", "unittest2", "results",
|
||||
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
@@ -56,7 +56,6 @@ task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest("pubsub/testgossipinternal")
|
||||
runTest("pubsub/testpubsub")
|
||||
|
||||
task testfilter, "Run PKI filter test":
|
||||
|
||||
@@ -262,6 +262,10 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let pkRes = PrivateKey.random(b.rng[])
|
||||
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
debug "no secure managers defined. Adding noise by default"
|
||||
b.secureManagers.add(SecureProtocol.Noise)
|
||||
|
||||
var secureManagerInstances: seq[Secure]
|
||||
if SecureProtocol.Noise in b.secureManagers:
|
||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
import stew/[base58, results]
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
import stew/base58
|
||||
|
||||
export results
|
||||
|
||||
@@ -41,6 +41,7 @@ const ContentIdsList = [
|
||||
multiCodec("dag-pb"),
|
||||
multiCodec("dag-cbor"),
|
||||
multiCodec("dag-json"),
|
||||
multiCodec("libp2p-key"),
|
||||
multiCodec("git-raw"),
|
||||
multiCodec("eth-block"),
|
||||
multiCodec("eth-block-list"),
|
||||
|
||||
@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import stew/results
|
||||
import results
|
||||
export results, utility
|
||||
|
||||
# This is workaround for Nim's `import` bug
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
import results
|
||||
from stew/assign2 import assign
|
||||
export results
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
|
||||
import nimcrypto/utils as ncrutils
|
||||
import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/[results, ctops]
|
||||
import stew/ctops
|
||||
import results
|
||||
|
||||
import ../utility
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@ import constants
|
||||
import nimcrypto/[hash, sha2]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
|
||||
import ../../utility
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
import stew/[endians2, ctops]
|
||||
import results
|
||||
export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
|
||||
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
||||
res
|
||||
|
||||
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
|
||||
var v = value
|
||||
if value <= cast[T](0x7F):
|
||||
if len(dest) >= 1:
|
||||
dest[0] = cast[byte](value)
|
||||
1
|
||||
else:
|
||||
var s = 0
|
||||
var res = 0
|
||||
while v != 0:
|
||||
v = v shr 7
|
||||
s += 7
|
||||
inc(res)
|
||||
if len(dest) >= res:
|
||||
var k = 0
|
||||
while s != 0:
|
||||
s -= 7
|
||||
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
|
||||
inc(k)
|
||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||
## number of bytes (octets) used.
|
||||
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
return ok(field)
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
inclass = false
|
||||
ttag = 0
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
|
||||
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
|
||||
|
||||
export sha2, results, rand
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
import results
|
||||
import peerid, stream/connection, transports/transport
|
||||
|
||||
export results
|
||||
|
||||
@@ -9,8 +9,7 @@
|
||||
|
||||
import std/tables
|
||||
|
||||
import stew/results
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import pkg/[chronos, chronicles, metrics, results]
|
||||
|
||||
import
|
||||
dial,
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
import chronos, chronicles, results
|
||||
import ../errors
|
||||
|
||||
type
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
import results
|
||||
import stew/[base32, base58, base64]
|
||||
|
||||
type
|
||||
MultiBaseStatus* {.pure.} = enum
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
import tables, hashes
|
||||
import vbuffer
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
## List of officially supported codecs can BE found here
|
||||
@@ -404,6 +404,7 @@ const MultiCodecList = [
|
||||
# IPLD formats
|
||||
("dag-pb", 0x70),
|
||||
("dag-cbor", 0x71),
|
||||
("libp2p-key", 0x72),
|
||||
("dag-json", 0x129),
|
||||
("git-raw", 0x78),
|
||||
("eth-block", 0x90),
|
||||
|
||||
@@ -27,7 +27,7 @@ import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
import varint, vbuffer, multicodec, multibase
|
||||
import stew/base58
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
# This is workaround for Nim `import` bug.
|
||||
export sha, sha2, keccak, blake2, hash, utils
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
import
|
||||
std/[hashes, strutils],
|
||||
stew/[base58, results],
|
||||
stew/base58,
|
||||
results,
|
||||
chronicles,
|
||||
nimcrypto/utils,
|
||||
utility,
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
{.push public.}
|
||||
|
||||
import std/sequtils
|
||||
import pkg/[chronos, chronicles, stew/results]
|
||||
import pkg/[chronos, chronicles, results]
|
||||
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||
|
||||
export peerid, multiaddress, crypto, routing_record, errors, results
|
||||
|
||||
@@ -160,10 +160,10 @@ proc updatePeerInfo*(
|
||||
peerStore[KeyBook][info.peerId] = pubkey
|
||||
|
||||
info.agentVersion.withValue(agentVersion):
|
||||
peerStore[AgentBook][info.peerId] = agentVersion.string
|
||||
peerStore[AgentBook][info.peerId] = agentVersion
|
||||
|
||||
info.protoVersion.withValue(protoVersion):
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion
|
||||
|
||||
if info.protos.len > 0:
|
||||
peerStore[ProtoBook][info.peerId] = info.protos
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/[endians2, results]
|
||||
import ../varint, ../utility, stew/endians2, results
|
||||
export results, utility
|
||||
|
||||
{.push public.}
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
import ../../../multiaddress, ../../../peerid, ../../../errors
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../protocol,
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import macros
|
||||
import stew/[objects, results]
|
||||
import stew/objects
|
||||
import results
|
||||
import ../../../peerinfo, ../../../signed_envelope
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, options, strutils, sugar]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import results, chronos, chronicles
|
||||
import
|
||||
../protobuf/minprotobuf,
|
||||
../peerinfo,
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, stew/results
|
||||
import chronos, results
|
||||
import ../stream/connection
|
||||
|
||||
export results
|
||||
|
||||
@@ -589,7 +589,7 @@ method addValidator*(
|
||||
|
||||
method removeValidator*(
|
||||
p: PubSub, topic: varargs[string], hook: ValidatorHandler
|
||||
) {.base, public.} =
|
||||
) {.base, public, gcsafe.} =
|
||||
for t in topic:
|
||||
p.validators.withValue(t, validators):
|
||||
validators[].excl(hook)
|
||||
|
||||
@@ -20,7 +20,6 @@ import ../../peerid
|
||||
import ../../peerinfo
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../utility
|
||||
import ../../errors
|
||||
|
||||
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
||||
|
||||
|
||||
@@ -11,15 +11,14 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../protocol,
|
||||
../../stream/streamseq,
|
||||
../../stream/connection,
|
||||
../../multiaddress,
|
||||
../../peerinfo,
|
||||
../../errors
|
||||
../../peerinfo
|
||||
|
||||
export protocol, results
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, times]
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
|
||||
|
||||
export peerid, multiaddress, signed_envelope
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import stew/[byteutils, results, endians2]
|
||||
import chronos, chronos/transports/[osnet, ipnet], chronicles
|
||||
import stew/endians2
|
||||
import chronos, chronos/transports/[osnet, ipnet], chronicles, results
|
||||
import ../[multiaddress, multicodec]
|
||||
import ../switch
|
||||
|
||||
@@ -73,7 +73,6 @@ proc new*(
|
||||
return T(networkInterfaceProvider: networkInterfaceProvider)
|
||||
|
||||
proc getProtocolArgument*(ma: MultiAddress, codec: MultiCodec): MaResult[seq[byte]] =
|
||||
var buffer: seq[byte]
|
||||
for item in ma:
|
||||
let
|
||||
ritem = ?item
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sugar
|
||||
import pkg/stew/[results, byteutils]
|
||||
import pkg/stew/byteutils, pkg/results
|
||||
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
|
||||
|
||||
export crypto
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles, metrics
|
||||
import connection
|
||||
import ../utility
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[hashes, oids, strformat]
|
||||
import stew/results
|
||||
import results
|
||||
import chronicles, chronos, metrics
|
||||
import lpstream, ../multiaddress, ../peerinfo, ../errors
|
||||
|
||||
|
||||
@@ -9,15 +9,12 @@
|
||||
|
||||
import locks
|
||||
import tables
|
||||
import std/sequtils
|
||||
import stew/byteutils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import ./transport
|
||||
import ../multiaddress
|
||||
import ../stream/connection
|
||||
import ../stream/bridgestream
|
||||
import ../muxers/muxer
|
||||
|
||||
type
|
||||
MemoryTransportError* = object of transport.TransportError
|
||||
|
||||
@@ -17,7 +17,6 @@ import ../multiaddress
|
||||
import ../stream/connection
|
||||
import ../crypto/crypto
|
||||
import ../upgrademngrs/upgrade
|
||||
import ../muxers/muxer
|
||||
import ./memorymanager
|
||||
|
||||
export connection
|
||||
|
||||
@@ -959,4 +959,133 @@ void cert_free_key(cert_key_t key) {
|
||||
struct cert_key_s *k = (struct cert_key_s *)key;
|
||||
EVP_PKEY_free(k->pkey);
|
||||
free(k);
|
||||
}
|
||||
|
||||
// Function to check if a Common Name is correct
|
||||
// each label should have <= 63 characters
|
||||
// the whole CN should have <= 253 characters
|
||||
cert_error_t check_cn(const char *cn) {
|
||||
cert_error_t ret_code = CERT_SUCCESS;
|
||||
|
||||
if (!cn || strlen(cn) == 0) {
|
||||
return CERT_ERROR_CN_EMPTY;
|
||||
}
|
||||
if (strlen(cn) > 253) {
|
||||
return CERT_ERROR_CN_TOO_LONG;
|
||||
}
|
||||
|
||||
char *cn_copy = strdup(cn);
|
||||
char *cn_copy_orig = cn_copy;
|
||||
|
||||
// trim trailing dot if any before checking
|
||||
size_t len = strlen(cn_copy);
|
||||
if (len > 0 && cn_copy[len - 1] == '.') {
|
||||
cn_copy[len - 1] = '\0';
|
||||
}
|
||||
|
||||
char *label;
|
||||
char *last = NULL;
|
||||
char *ptr = cn_copy;
|
||||
|
||||
while ((label = strtok(ptr, ".")) != NULL) {
|
||||
if (last && last + strlen(last) + 1 != label) {
|
||||
// empty label (e.g., "example..com")
|
||||
ret_code = CERT_ERROR_CN_EMPTY_LABEL;
|
||||
break;
|
||||
}
|
||||
if (strlen(label) > 63) {
|
||||
ret_code = CERT_ERROR_CN_LABEL_TOO_LONG;
|
||||
break;
|
||||
}
|
||||
last = label;
|
||||
ptr = NULL;
|
||||
}
|
||||
|
||||
free(cn_copy_orig);
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
cert_error_t cert_signing_req(const char *cn, cert_key_t key, cert_buffer **csr_buffer) {
|
||||
cert_error_t ret_code = CERT_SUCCESS;
|
||||
X509_REQ *x509_req = NULL;
|
||||
X509_NAME *name = NULL;
|
||||
X509_EXTENSION *ext = NULL;
|
||||
X509V3_CTX ctx;
|
||||
STACK_OF(X509_EXTENSION) *exts = NULL;
|
||||
unsigned char *der = NULL;
|
||||
size_t der_len = 0;
|
||||
|
||||
ret_code = check_cn(cn);
|
||||
if (ret_code != CERT_SUCCESS) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!key || !(key->pkey)) {
|
||||
ret_code = CERT_ERROR_NO_PUBKEY;
|
||||
goto cleanup;
|
||||
}
|
||||
EVP_PKEY *pkey = key->pkey;
|
||||
|
||||
x509_req = X509_REQ_new();
|
||||
if (!x509_req) {
|
||||
ret_code = CERT_ERROR_X509_REQ_GEN;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!X509_REQ_set_pubkey(x509_req, pkey)) {
|
||||
ret_code = CERT_ERROR_PUBKEY_SET;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
// Build SAN extension
|
||||
X509V3_set_ctx(&ctx, NULL, NULL, x509_req, NULL, 0);
|
||||
char san_str[258]; // max of 253 from cn + 4 "DNS:" + \0
|
||||
snprintf(san_str, sizeof(san_str), "DNS:%s", cn);
|
||||
|
||||
ext = X509V3_EXT_conf_nid(NULL, &ctx, NID_subject_alt_name, san_str);
|
||||
if (!ext) {
|
||||
ret_code = CERT_ERROR_X509_SAN;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
exts = sk_X509_EXTENSION_new_null();
|
||||
if (!exts || !sk_X509_EXTENSION_push(exts, ext)) {
|
||||
ret_code = CERT_ERROR_X509_SAN;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!X509_REQ_add_extensions(x509_req, exts)) {
|
||||
ret_code = CERT_ERROR_X509_SAN;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!X509_REQ_sign(x509_req, pkey, EVP_sha256())) {
|
||||
ret_code = CERT_ERROR_SIGN;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
der_len = i2d_X509_REQ(x509_req, &der);
|
||||
if (der_len < 0) {
|
||||
ret_code = CERT_ERROR_X509_REQ_DER;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret_code = init_cert_buffer(csr_buffer, der, der_len);
|
||||
if (ret_code < 0) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (exts)
|
||||
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
|
||||
if (x509_req)
|
||||
X509_REQ_free(x509_req);
|
||||
if (der)
|
||||
OPENSSL_free(der);
|
||||
if (ret_code != CERT_SUCCESS && csr_buffer) {
|
||||
cert_free_buffer(*csr_buffer);
|
||||
*csr_buffer = NULL;
|
||||
}
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
@@ -54,6 +54,14 @@ typedef int32_t cert_error_t;
|
||||
#define CERT_ERROR_PUBKEY_DER_CONV -41
|
||||
#define CERT_ERROR_INIT_KEYGEN -42
|
||||
#define CERT_ERROR_SET_CURVE -43
|
||||
#define CERT_ERROR_X509_REQ_GEN -44
|
||||
#define CERT_ERROR_X509_REQ_DER -45
|
||||
#define CERT_ERROR_NO_PUBKEY -46
|
||||
#define CERT_ERROR_X509_SAN -47
|
||||
#define CERT_ERROR_CN_TOO_LONG -48
|
||||
#define CERT_ERROR_CN_LABEL_TOO_LONG -49
|
||||
#define CERT_ERROR_CN_EMPTY_LABEL -50
|
||||
#define CERT_ERROR_CN_EMPTY -51
|
||||
|
||||
typedef enum { CERT_FORMAT_DER = 0, CERT_FORMAT_PEM = 1 } cert_format_t;
|
||||
|
||||
@@ -184,4 +192,15 @@ void cert_free_key(cert_key_t key);
|
||||
*/
|
||||
void cert_free_buffer(cert_buffer *buffer);
|
||||
|
||||
/**
|
||||
* Create a X.509 certificate request
|
||||
*
|
||||
* @param cn Domain for which we're requesting the certificate
|
||||
* @param key Public key of the requesting client
|
||||
* @param csr_buffer Pointer to the buffer that will be set to the CSR in DER format
|
||||
*
|
||||
* @return CERT_SUCCESS on successful execution, an error code otherwise
|
||||
*/
|
||||
cert_error_t cert_signing_req(const char *cn, cert_key_t key, cert_buffer **csr_buffer);
|
||||
|
||||
#endif /* LIBP2P_CERT_H */
|
||||
@@ -55,10 +55,10 @@ type EncodingFormat* = enum
|
||||
proc cert_format_t(self: EncodingFormat): cert_format_t =
|
||||
if self == EncodingFormat.DER: CERT_FORMAT_DER else: CERT_FORMAT_PEM
|
||||
|
||||
proc toCertBuffer(self: seq[uint8]): cert_buffer =
|
||||
proc toCertBuffer*(self: seq[uint8]): cert_buffer =
|
||||
cert_buffer(data: self[0].unsafeAddr, length: self.len.csize_t)
|
||||
|
||||
proc toSeq(self: ptr cert_buffer): seq[byte] =
|
||||
proc toSeq*(self: ptr cert_buffer): seq[byte] =
|
||||
toOpenArray(cast[ptr UncheckedArray[byte]](self.data), 0, self.length.int - 1).toSeq()
|
||||
|
||||
# Initialize entropy and DRBG contexts at the module level
|
||||
|
||||
@@ -79,3 +79,7 @@ proc cert_free_buffer*(
|
||||
proc cert_free_parsed*(
|
||||
cert: ptr cert_parsed
|
||||
): void {.cdecl, importc: "cert_free_parsed".}
|
||||
|
||||
proc cert_signing_req*(
|
||||
cn: cstring, key: cert_key_t, csr_buffer: ptr ptr cert_buffer
|
||||
): cert_error_t {.cdecl, importc: "cert_signing_req".}
|
||||
|
||||
@@ -11,9 +11,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import chronos, chronicles, strutils
|
||||
import stew/[byteutils, endians2, results, objects]
|
||||
import chronos, chronicles, strutils, results
|
||||
import stew/[byteutils, endians2, objects]
|
||||
import ../multicodec
|
||||
import
|
||||
transport,
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, options, macros]
|
||||
import stew/[byteutils, results]
|
||||
import stew/byteutils
|
||||
import results
|
||||
|
||||
export results
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[byteutils, leb128, results]
|
||||
import stew/[byteutils, leb128]
|
||||
import results
|
||||
export leb128, results
|
||||
|
||||
type
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
{.used.}
|
||||
|
||||
import chronos, stew/[byteutils, results]
|
||||
import chronos, results, stew/byteutils
|
||||
import
|
||||
../libp2p/
|
||||
[
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
upgrademngrs/upgrade,
|
||||
multiaddress,
|
||||
errors,
|
||||
]
|
||||
[stream/connection, transports/transport, upgrademngrs/upgrade, multiaddress]
|
||||
|
||||
import ./helpers
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import ../libp2p/stream/chronosstream
|
||||
import ../libp2p/muxers/mplex/lpchannel
|
||||
import ../libp2p/protocols/secure/secure
|
||||
import ../libp2p/switch
|
||||
import ../libp2p/nameresolving/[nameresolver, mockresolver]
|
||||
import ../libp2p/nameresolving/mockresolver
|
||||
|
||||
import errorhelpers
|
||||
import utils/async_tests
|
||||
|
||||
@@ -1,925 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[options, deques, sequtils, enumerate, algorithm]
|
||||
import stew/byteutils
|
||||
import ../../libp2p/builders
|
||||
import ../../libp2p/errors
|
||||
import ../../libp2p/crypto/crypto
|
||||
import ../../libp2p/stream/bufferstream
|
||||
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
|
||||
import ../../libp2p/switch
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../../libp2p/protocols/pubsub/rpc/protobuf
|
||||
import utils
|
||||
|
||||
import ../helpers
|
||||
|
||||
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub internal":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, handler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "topic params":
|
||||
let params = TopicParams.init()
|
||||
params.validateParameters().tryGet()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var scoreLow = -11'f64
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.score = scoreLow
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
var peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic1 = "foobar1"
|
||||
let topic2 = "foobar2"
|
||||
gossipSub.topicParams[topic1] = TopicParams.init()
|
||||
gossipSub.topicParams[topic2] = TopicParams.init()
|
||||
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic1].incl(peer)
|
||||
gossipSub.fanout[topic2].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
for p in peers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = handler
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for i in 0 ..< 15:
|
||||
let peerId = conns[i].peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.In
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 40.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
for i in 0 ..< 7:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.Out
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 10.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handleIHave/Iwant tests":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
|
||||
for i in 0 ..< 30:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
check gossipSub.mcache.msgs.len == 1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
proc setupTest(): Future[
|
||||
tuple[
|
||||
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
|
||||
]
|
||||
] {.async.} =
|
||||
let nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await nodes[1].switch.connect(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(nodes[0])
|
||||
var gossip1: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
return (gossip0, gossip1, receivedMessages)
|
||||
|
||||
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
|
||||
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
|
||||
|
||||
proc createMessages(
|
||||
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
|
||||
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
|
||||
var iwantMessageIds = newSeq[MessageId]()
|
||||
var sentMessages = initHashSet[seq[byte]]()
|
||||
|
||||
for i, size in enumerate([size1, size2]):
|
||||
let data = newSeqWith(size, i.byte)
|
||||
sentMessages.incl(data)
|
||||
|
||||
let msg =
|
||||
Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
|
||||
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
|
||||
iwantMessageIds.add(iwantMessageId)
|
||||
gossip1.mcache.put(iwantMessageId, msg)
|
||||
|
||||
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
|
||||
peer.sentIHaves[^1].incl(iwantMessageId)
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize div 2 + 1
|
||||
let (iwantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let size1 = gossip1.maxMessageSize div 2
|
||||
let size2 = gossip1.maxMessageSize div 3
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let maxSize = gossip1.maxMessageSize
|
||||
let size1 = maxSize div 2
|
||||
let size2 = maxSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
if seqs[0] < seqs[1]:
|
||||
smallestSet.incl(seqs[0])
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,394 +0,0 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import sequtils, options, tables, sets
|
||||
import chronos, stew/byteutils, chronicles
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
errors,
|
||||
peerid,
|
||||
peerinfo,
|
||||
stream/connection,
|
||||
stream/bufferstream,
|
||||
crypto/crypto,
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/gossipsub,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/rpc/messages,
|
||||
],
|
||||
../utils/[futures, async_tests],
|
||||
../helpers
|
||||
|
||||
template tryPublish(
|
||||
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
|
||||
): untyped =
|
||||
var
|
||||
expiration = Moment.now() + timeout
|
||||
pubs = 0
|
||||
while pubs < require and Moment.now() < expiration:
|
||||
pubs = pubs + call
|
||||
await sleepAsync(wait)
|
||||
|
||||
doAssert pubs >= require, "Failed to publish!"
|
||||
|
||||
suite "GossipSub":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesSparse(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
let dgossip = GossipSub(dialer)
|
||||
dgossip.parameters.dHigh = 2
|
||||
dgossip.parameters.dLow = 1
|
||||
dgossip.parameters.d = 1
|
||||
dgossip.parameters.dOut = 1
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
info "seen up", count = seen.len
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], dialer, "foobar")
|
||||
|
||||
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
|
||||
let publishedTo = nodes[0].publish(
|
||||
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
|
||||
).await
|
||||
check:
|
||||
publishedTo != 0
|
||||
publishedTo != runs
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub directPeers: always forward messages":
|
||||
let nodes = generateNodes(3, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[2]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
nodes[1].subscribe("foobar", noop)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut.wait(2.seconds)
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check "foobar" notin GossipSub(nodes[0]).mesh
|
||||
check "foobar" notin GossipSub(nodes[1]).mesh
|
||||
check "foobar" notin GossipSub(nodes[2]).mesh
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
|
||||
# Given 2 nodes
|
||||
let
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
node0 = nodes[0]
|
||||
node1 = nodes[1]
|
||||
g0 = GossipSub(node0)
|
||||
g1 = GossipSub(node1)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# With message observers
|
||||
var
|
||||
messageReceived0 = newFuture[bool]()
|
||||
messageReceived1 = newFuture[bool]()
|
||||
|
||||
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived0.complete(true)
|
||||
|
||||
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived1.complete(true)
|
||||
|
||||
node0.addObserver(PubSubObserver(onRecv: observer0))
|
||||
node1.addObserver(PubSubObserver(onRecv: observer1))
|
||||
|
||||
# Connect them as direct peers
|
||||
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
|
||||
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
|
||||
|
||||
# When node 0 sends a message
|
||||
let message = "Hello!".toBytes()
|
||||
let publishResult = await node0.publish("foobar", message)
|
||||
|
||||
# None should receive the message as they are not subscribed to the topic
|
||||
let results = await waitForStates(@[messageReceived0, messageReceived1])
|
||||
check:
|
||||
publishResult == 0
|
||||
results[0].isPending()
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
# MacOs has some nasty jitter when sleeping
|
||||
# (up to 7 ms), so we need some pretty long
|
||||
# sleeps to be safe here
|
||||
gossip.parameters.decayInterval = 300.milliseconds
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
await sleepAsync(1500.milliseconds)
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
196
tests/pubsub/testgossipsubfanout.nim
Normal file
196
tests/pubsub/testgossipsubfanout.nim
Normal file
@@ -0,0 +1,196 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../helpers
|
||||
|
||||
suite "GossipSub Fanout Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`replenishFanout` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
var peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
gossipSub.replenishFanout(topic)
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic notin gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic1 = "foobar1"
|
||||
let topic2 = "foobar2"
|
||||
gossipSub.topicParams[topic1] = TopicParams.init()
|
||||
gossipSub.topicParams[topic2] = TopicParams.init()
|
||||
gossipSub.fanout[topic1] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic2] = initHashSet[PubSubPeer]()
|
||||
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
|
||||
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
|
||||
await sleepAsync(5.millis) # allow the topic to expire
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.fanout[topic1].incl(peer)
|
||||
gossipSub.fanout[topic2].incl(peer)
|
||||
|
||||
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
|
||||
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
|
||||
|
||||
gossipSub.dropFanoutPeers()
|
||||
check topic1 notin gossipSub.fanout
|
||||
check topic2 in gossipSub.fanout
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
var observed = 0
|
||||
let
|
||||
obs1 = PubSubObserver(
|
||||
onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
obs2 = PubSubObserver(
|
||||
onSend: proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc observed
|
||||
)
|
||||
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[0].addObserver(obs2)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
await passed.wait(2.seconds)
|
||||
|
||||
check observed == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||
var passed = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete()
|
||||
|
||||
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
GossipSub(nodes[1]).parameters.d = 0
|
||||
GossipSub(nodes[1]).parameters.dHigh = 0
|
||||
GossipSub(nodes[1]).parameters.dLow = 0
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gsNode = GossipSub(nodes[1])
|
||||
checkUntilTimeout:
|
||||
gsNode.mesh.getOrDefault("foobar").len == 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
(
|
||||
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
|
||||
)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len > 0
|
||||
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
|
||||
|
||||
await passed.wait(2.seconds)
|
||||
|
||||
trace "test done, stopping..."
|
||||
844
tests/pubsub/testgossipsubgossip.nim
Normal file
844
tests/pubsub/testgossipsubgossip.nim
Normal file
@@ -0,0 +1,844 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import chronicles
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
suite "GossipSub Gossip Protocol":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
check gossipSub.fanout[topic].len == 15
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
check gossipSub.gossipsub[topic].len == 15
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
for p in peers.keys:
|
||||
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
if i mod 2 == 0:
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
gossipSub.grafted(peer, topic)
|
||||
else:
|
||||
gossipSub.fanout[topic].incl(peer)
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
|
||||
|
||||
let peers = gossipSub.getGossipPeers()
|
||||
check peers.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "handleIHave/Iwant tests":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
proc handler2(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.subscribe(topic, handler2)
|
||||
|
||||
# Instantiates 30 peers and connects all of them to the previously defined `gossipSub`
|
||||
for i in 0 ..< 30:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
# Add the connection to `gossipSub`, to their `gossipSub.gossipsub` and `gossipSub.mesh` tables
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
# Peers with no budget should not request messages
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the peer has no budget to request messages
|
||||
peer.iHaveBudget = 0
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` has
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should not generate an IWant message for the message,
|
||||
check:
|
||||
iwants.messageIDs.len == 0
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
# Build an IHAVE message that contains the same message ID three times
|
||||
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
|
||||
# Given the budget is not 0 (because it's not been overridden)
|
||||
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
# Then `gossipSub` should generate an IWant message for the message
|
||||
check:
|
||||
iwants.messageIDs.len == 1
|
||||
|
||||
# Peers with budget should request messages. If ids are repeated, only one request should be generated
|
||||
block:
|
||||
# Define a new connection
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
# Add message to `gossipSub`'s message cache
|
||||
let id = @[0'u8, 1, 2, 3]
|
||||
gossipSub.mcache.put(id, Message())
|
||||
peer.sentIHaves[^1].incl(id)
|
||||
# Build an IWANT message that contains the same message ID three times
|
||||
let msg = ControlIWant(messageIDs: @[id, id, id])
|
||||
# When a peer makes an IWANT request for the a message that `gossipSub` has
|
||||
let genmsg = gossipSub.handleIWant(peer, @[msg])
|
||||
# Then `gossipSub` should return the message
|
||||
check:
|
||||
genmsg.len == 1
|
||||
|
||||
check gossipSub.mcache.msgs.len == 1
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are interconnected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 4), PeerTableType.Gossipsub
|
||||
)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
|
||||
await waitForHeartbeat()
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
anyIt(receivedIHaves, it > 0)
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await waitForHeartbeat()
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == 0
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
# That's because the gossip factor is 0.5 over 16 available nodes
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= 8
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with dLazy priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(@[nodes[0]], topic, @[19], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) in 2 .. 3
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
# That's because the dLazy is 6
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
|
||||
|
||||
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iDontWant messages
|
||||
var receivedIDontWantsRef = new seq[int]
|
||||
addIDontWantObservers(nodes, receivedIDontWantsRef)
|
||||
|
||||
# And are connected in a line
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForPeersInTable(nodes, topic, @[1, 2, 1], PeerTableType.Gossipsub)
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
check (await nodes[0].publish(topic, largeMsg)) == 1
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
let receivedIDontWants = receivedIDontWantsRef[]
|
||||
check:
|
||||
receivedIDontWants[0] == 0
|
||||
receivedIDontWants[1] == 0
|
||||
receivedIDontWants[2] == 1
|
||||
|
||||
asyncTest "e2e - GossipSub peer exchange":
|
||||
# A, B & C are subscribed to something
|
||||
# B unsubcribe from it, it should send
|
||||
# PX to A & C
|
||||
#
|
||||
# C sent his SPR, not A
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard # not used in this test
|
||||
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, enablePX = true) &
|
||||
generateNodes(1, gossip = true, sendSignedPeerRecord = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var
|
||||
gossip0 = GossipSub(nodes[0])
|
||||
gossip1 = GossipSub(nodes[1])
|
||||
gossip2 = GossipSub(nodes[2])
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
for x in 0 ..< 3:
|
||||
for y in 0 ..< 3:
|
||||
if x != y:
|
||||
await waitSub(nodes[x], nodes[y], "foobar")
|
||||
|
||||
# Setup record handlers for all nodes
|
||||
var
|
||||
passed0: Future[void] = newFuture[void]()
|
||||
passed2: Future[void] = newFuture[void]()
|
||||
gossip0.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed0.complete()
|
||||
)
|
||||
gossip1.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
raiseAssert "should not get here"
|
||||
)
|
||||
gossip2.routingRecordsHandler.add(
|
||||
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
|
||||
check:
|
||||
tag == "foobar"
|
||||
peers.len == 2
|
||||
peers[0].record.isSome() xor peers[1].record.isSome()
|
||||
passed2.complete()
|
||||
)
|
||||
|
||||
# Unsubscribe from the topic
|
||||
nodes[1].unsubscribe("foobar", handler)
|
||||
|
||||
# Then verify what nodes receive the PX
|
||||
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted()
|
||||
results[1].isCompleted()
|
||||
|
||||
asyncTest "e2e - iDontWant":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(3, gossip = true, msgIdProvider = dumbMsgIdProvider)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await nodes[1].switch.connect(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
doAssert false
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
nodes[2].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||
|
||||
check:
|
||||
gossip3.mesh.peers("foobar") == 1
|
||||
|
||||
gossip3.broadcast(
|
||||
gossip3.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
|
||||
)
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
checkUntilTimeout:
|
||||
toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 1)
|
||||
check:
|
||||
toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "e2e - iDontWant is broadcasted on publish":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let nodes = generateNodes(
|
||||
2, gossip = true, msgIdProvider = dumbMsgIdProvider, sendIDontWantOnPublish = true
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await nodes[0].switch.connect(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip2.mesh.getOrDefault("foobar").anyIt(it.iDontWants[^1].len == 1)
|
||||
|
||||
asyncTest "e2e - iDontWant is sent only for 1.2":
|
||||
# 3 nodes: A <=> B <=> C
|
||||
# (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
|
||||
# and check that B doesn't relay the message to C.
|
||||
# We also check that B sends IDONTWANT to C, but not A
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let
|
||||
nodeA = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeB = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
nodeC = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_11,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[nodeA, nodeB, nodeC])
|
||||
|
||||
await nodeA.switch.connect(
|
||||
nodeB.switch.peerInfo.peerId, nodeB.switch.peerInfo.addrs
|
||||
)
|
||||
await nodeB.switch.connect(
|
||||
nodeC.switch.peerInfo.peerId, nodeC.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let bFinished = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
bFinished.complete()
|
||||
|
||||
nodeA.subscribe("foobar", handler)
|
||||
nodeB.subscribe("foobar", handlerB)
|
||||
nodeC.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[nodeA, nodeB, nodeC], "foobar")
|
||||
|
||||
var gossipA: GossipSub = GossipSub(nodeA)
|
||||
var gossipB: GossipSub = GossipSub(nodeB)
|
||||
var gossipC: GossipSub = GossipSub(nodeC)
|
||||
|
||||
check:
|
||||
gossipC.mesh.peers("foobar") == 1
|
||||
|
||||
tryPublish await nodeA.publish("foobar", newSeq[byte](10000)), 1
|
||||
|
||||
await bFinished
|
||||
|
||||
# "check" alone isn't suitable for testing that a condition is true after some time has passed. Below we verify that
|
||||
# peers A and C haven't received an IDONTWANT message from B, but we need wait some time for potential in flight messages to arrive.
|
||||
await waitForHeartbeat()
|
||||
check:
|
||||
toSeq(gossipC.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
toSeq(gossipA.mesh.getOrDefault("foobar")).anyIt(it.iDontWants[^1].len == 0)
|
||||
|
||||
asyncTest "Peer must send right gosspipsub version":
|
||||
func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
|
||||
ok(newSeq[byte](10))
|
||||
let node0 = generateNodes(1, gossip = true, msgIdProvider = dumbMsgIdProvider)[0]
|
||||
let node1 = generateNodes(
|
||||
1,
|
||||
gossip = true,
|
||||
msgIdProvider = dumbMsgIdProvider,
|
||||
gossipSubVersion = GossipSubCodec_10,
|
||||
)[0]
|
||||
|
||||
startNodesAndDeferStop(@[node0, node1])
|
||||
|
||||
await node0.switch.connect(
|
||||
node1.switch.peerInfo.peerId, node1.switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
node0.subscribe("foobar", handler)
|
||||
node1.subscribe("foobar", handler)
|
||||
await waitSubGraph(@[node0, node1], "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(node0)
|
||||
var gossip1: GossipSub = GossipSub(node1)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip0.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").toSeq[0].codec == GossipSubCodec_10
|
||||
|
||||
asyncTest "IHAVE messages correctly advertise message ID to peers":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IHAVE observer
|
||||
var receivedIHave = newFuture[(string, seq[MessageId])]()
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.ihave:
|
||||
receivedIHave.complete((msg.topicID, msg.messageIDs))
|
||||
n1.addObserver(PubSubObserver(onRecv: checkForIhaves))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IHAVE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
let r = await receivedIHave.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
r.isCompleted((topic, @[messageID]))
|
||||
|
||||
asyncTest "IWANT messages correctly request messages by their IDs":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var receivedIWant = newFuture[seq[MessageId]]()
|
||||
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.iwant:
|
||||
receivedIWant.complete(msg.messageIDs)
|
||||
n1.addObserver(PubSubObserver(onRecv: checkForIwants))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When an IWANT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer has the message ID
|
||||
let r = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
r.isCompleted(@[messageID])
|
||||
|
||||
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
messageID = @[0'u8, 1, 2, 3]
|
||||
ihaveMessage =
|
||||
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Given node1 has an IWANT observer
|
||||
var receivedIWant = newFuture[seq[MessageId]]()
|
||||
let checkForIwants = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
for msg in msgs.control.get.iwant:
|
||||
receivedIWant.complete(msg.messageIDs)
|
||||
n0.addObserver(PubSubObserver(onRecv: checkForIwants))
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both nodes subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# When an IHAVE message is sent from node0
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
|
||||
let iWantResult = await receivedIWant.waitForState(HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
iWantResult.isCompleted(@[messageID])
|
||||
719
tests/pubsub/testgossipsubmeshmanagement.nim
Normal file
719
tests/pubsub/testgossipsubmeshmanagement.nim
Normal file
@@ -0,0 +1,719 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Mesh Management":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "topic params":
|
||||
let params = TopicParams.init()
|
||||
params.validateParameters().tryGet()
|
||||
|
||||
asyncTest "subscribe/unsubscribeAll":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} =
|
||||
discard
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.subscribe(topic, handler)
|
||||
|
||||
check:
|
||||
gossipSub.topics.contains(topic)
|
||||
gossipSub.gossipsub[topic].len() > 0
|
||||
gossipSub.mesh[topic].len() > 0
|
||||
|
||||
# test via dynamic dispatch
|
||||
gossipSub.PubSub.unsubscribeAll(topic)
|
||||
|
||||
check:
|
||||
topic notin gossipSub.topics # not in local topics
|
||||
topic notin gossipSub.mesh # not in mesh
|
||||
topic in gossipSub.gossipsub # but still in gossipsub table (for fanning out)
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Lo":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len == gossipSub.parameters.d
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh - bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var scoreLow = -11'f64
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.score = scoreLow
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
scoreLow += 1.0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# low score peers should not be in mesh, that's why the count must be 4
|
||||
check gossipSub.mesh[topic].len == 4
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
check peer.score >= 0.0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "`rebalanceMesh` Degree Hi":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len ==
|
||||
gossipSub.parameters.d + gossipSub.parameters.dScore
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
|
||||
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
|
||||
peerId, Moment.now() + 1.hours
|
||||
)
|
||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||
# there must be a control prune due to violation of backoff
|
||||
check prunes.len != 0
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# expect 0 since they are all backing off
|
||||
check gossipSub.mesh[topic].len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh fail due to backoff - remote":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 15:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
gossipSub.gossipsub[topic].incl(peer)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.peers.len == 15
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
check gossipSub.mesh[topic].len != 0
|
||||
|
||||
for i in 0 ..< 15:
|
||||
let peerId = conns[i].peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
gossipSub.handlePrune(
|
||||
peer,
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: topic,
|
||||
peers: @[],
|
||||
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
# expect topic cleaned up since they are all pruned
|
||||
check topic notin gossipSub.mesh
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "rebalanceMesh Degree Hi - audit scenario":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
let topic = "foobar"
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.topicParams[topic] = TopicParams.init()
|
||||
gossipSub.parameters.dScore = 4
|
||||
gossipSub.parameters.d = 6
|
||||
gossipSub.parameters.dOut = 3
|
||||
gossipSub.parameters.dHigh = 12
|
||||
gossipSub.parameters.dLow = 4
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0 ..< 6:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.In
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 40.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
for i in 0 ..< 7:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.Out
|
||||
conns &= conn
|
||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.score = 10.0
|
||||
peer.sendConn = conn
|
||||
gossipSub.grafted(peer, topic)
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
check gossipSub.mesh[topic].len == 13
|
||||
gossipSub.rebalanceMesh(topic)
|
||||
# ensure we are above dlow
|
||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||
var outbound = 0
|
||||
for peer in gossipSub.mesh[topic]:
|
||||
if peer.sendConn.transportDir == Direction.Out:
|
||||
inc outbound
|
||||
# ensure we give priority and keep at least dOut outbound peers
|
||||
check outbound >= gossipSub.parameters.dOut
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "dont prune peers if mesh len is less than d_high":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len == expectedNumberOfPeers
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "prune peers if mesh len is higher than d_high":
|
||||
let
|
||||
numberOfNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberOfNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes,
|
||||
topic,
|
||||
newSeqWith(numberOfNodes, expectedNumberOfPeers),
|
||||
PeerTableType.Gossipsub,
|
||||
)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
# For this test to work we'd require a way to disable fanout.
|
||||
# There's not a way to toggle it, and mocking it didn't work as there's not a reliable mock available.
|
||||
skip()
|
||||
return
|
||||
|
||||
# Instantiate handlers and validators
|
||||
var handlerFut0 = newFuture[bool]()
|
||||
proc handler0(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut0.complete(true)
|
||||
|
||||
var handlerFut1 = newFuture[bool]()
|
||||
proc handler1(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut1.complete(true)
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
check topic == "foobar"
|
||||
validatorFut.complete(true)
|
||||
result = ValidationResult.Accept
|
||||
|
||||
# Setup nodes and start switches
|
||||
let
|
||||
nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 5.seconds)
|
||||
topic = "foobar"
|
||||
|
||||
# Connect nodes
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# Subscribe both nodes to the topic and node1 (receiver) to the validator
|
||||
nodes[0].subscribe(topic, handler0)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Wait for both nodes to verify others' subscription
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], topic)
|
||||
subs &= waitSub(nodes[0], nodes[1], topic)
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
# When unsubscribing and resubscribing in a short time frame, the backoff period should be triggered
|
||||
nodes[1].unsubscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Backoff is set to 5 seconds, and the amount of sleeping time since the unsubsribe until now is 3-4s~
|
||||
# Meaning, the subscription shouldn't have been processed yet because it's still in backoff period
|
||||
# When publishing under this condition
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should not be received:
|
||||
check:
|
||||
validatorFut.toState().isPending()
|
||||
handlerFut1.toState().isPending()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
validatorFut.reset()
|
||||
handlerFut0.reset()
|
||||
handlerFut1.reset()
|
||||
|
||||
# If we wait backoff period to end, around 1-2s
|
||||
await waitForMesh(nodes[0], nodes[1], topic, 3.seconds)
|
||||
|
||||
discard await nodes[0].publish("foobar", "Hello!".toBytes())
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Then the message should be received
|
||||
check:
|
||||
validatorFut.toState().isCompleted()
|
||||
handlerFut1.toState().isCompleted()
|
||||
handlerFut0.toState().isPending()
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
"foobar" in gossip2.topics
|
||||
"foobar" in gossip1.gossipsub
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
let
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.topics
|
||||
"foobar" in gossip2.topics
|
||||
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
|
||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "GossipSub invalid topic subscription":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# We must subscribe before setting the validator
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
let invalidDetected = newFuture[void]()
|
||||
gossip.subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GossipSub test directPeers":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
let invalidDetected = newFuture[void]()
|
||||
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
|
||||
if topic == "foobar":
|
||||
try:
|
||||
invalidDetected.complete()
|
||||
except:
|
||||
raise newException(Defect, "Exception during subscriptionValidator")
|
||||
false
|
||||
else:
|
||||
true
|
||||
|
||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||
### await connectNodesStar(nodes)
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await invalidDetected.wait(10.seconds)
|
||||
|
||||
asyncTest "GRAFT messages correctly add peers to mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foobar"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
|
||||
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
|
||||
)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
|
||||
await n0.stop()
|
||||
await n1.stop()
|
||||
|
||||
# Second part of the hack
|
||||
# Set values so peers can be GRAFTed
|
||||
let newDValues =
|
||||
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
|
||||
n0.parameters.applyDValues(newDValues)
|
||||
n1.parameters.applyDValues(newDValues)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 1), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
# Then the peers are GRAFTed
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received GRAFT for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
nodes[0].subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a GRAFT message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not GRAFTed
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "PRUNE messages correctly removes peers from mesh":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
backoff = 1
|
||||
pruneMessage = ControlMessage(
|
||||
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
|
||||
)
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And both subscribe to the topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When another PRUNE message is sent
|
||||
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is PRUNEd
|
||||
check:
|
||||
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
asyncTest "Received PRUNE for non-subscribed topic":
|
||||
# Given 2 nodes
|
||||
let
|
||||
topic = "foo"
|
||||
pruneMessage =
|
||||
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
|
||||
.toGossipSub()
|
||||
n0 = nodes[0]
|
||||
n1 = nodes[1]
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# And the nodes are connected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And only node0 subscribes to the topic
|
||||
n0.subscribe(topic, voidTopicHandler)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
|
||||
# When a PRUNE message is sent
|
||||
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
|
||||
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Then the peer is not PRUNEd
|
||||
check:
|
||||
n0.topics.hasKey(topic)
|
||||
not n1.topics.hasKey(topic)
|
||||
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
|
||||
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
|
||||
861
tests/pubsub/testgossipsubmessagehandling.nim
Normal file
861
tests/pubsub/testgossipsubmessagehandling.nim
Normal file
@@ -0,0 +1,861 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, enumerate]
|
||||
import stew/byteutils
|
||||
import utils
|
||||
import sugar
|
||||
import chronicles
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
const MsgIdSuccess = "msg id gen success"
|
||||
|
||||
proc setupTest(): Future[
|
||||
tuple[
|
||||
gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]
|
||||
]
|
||||
] {.async.} =
|
||||
let nodes = generateNodes(2, gossip = true, verifySignature = false)
|
||||
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await nodes[1].switch.connect(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var receivedMessages = new(HashSet[seq[byte]])
|
||||
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
receivedMessages[].incl(data)
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip0: GossipSub = GossipSub(nodes[0])
|
||||
var gossip1: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
return (gossip0, gossip1, receivedMessages)
|
||||
|
||||
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
|
||||
await allFuturesThrowing(gossip0.switch.stop(), gossip1.switch.stop())
|
||||
|
||||
proc createMessages(
|
||||
gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int
|
||||
): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
|
||||
var iwantMessageIds = newSeq[MessageId]()
|
||||
var sentMessages = initHashSet[seq[byte]]()
|
||||
|
||||
for i, size in enumerate([size1, size2]):
|
||||
let data = newSeqWith(size, i.byte)
|
||||
sentMessages.incl(data)
|
||||
|
||||
let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
|
||||
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
|
||||
iwantMessageIds.add(iwantMessageId)
|
||||
gossip1.mcache.put(iwantMessageId, msg)
|
||||
|
||||
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
|
||||
peer.sentIHaves[^1].incl(iwantMessageId)
|
||||
|
||||
return (iwantMessageIds, sentMessages)
|
||||
|
||||
suite "GossipSub Message Handling":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Drop messages of topics without subscription":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.handler = handler
|
||||
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0 .. 5:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
inc seqno
|
||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||
|
||||
check gossipSub.mcache.msgs.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "subscription limits":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.topicsHigh = 10
|
||||
|
||||
var tooManyTopics: seq[string]
|
||||
for i in 0 .. gossipSub.topicsHigh + 10:
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||
|
||||
check:
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "invalid message bytes":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
|
||||
let peerId = randomPeerId()
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
|
||||
expect(CatchableError):
|
||||
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
|
||||
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
|
||||
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize div 2 + 1
|
||||
let (iwantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: iwantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
|
||||
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
|
||||
# Expected: No messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
|
||||
let messageSize = gossip1.maxMessageSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, messageSize, messageSize)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
await sleepAsync(300.milliseconds)
|
||||
checkUntilTimeout:
|
||||
receivedMessages[].len == 0
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
|
||||
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
|
||||
# Expected: Both messages should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let size1 = gossip1.maxMessageSize div 2
|
||||
let size2 = gossip1.maxMessageSize div 3
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == sentMessages
|
||||
check receivedMessages[].len == 2
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
|
||||
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
|
||||
# Expected: Only the smaller message should be received.
|
||||
let (gossip0, gossip1, receivedMessages) = await setupTest()
|
||||
let maxSize = gossip1.maxMessageSize
|
||||
let size1 = maxSize div 2
|
||||
let size2 = maxSize + 10
|
||||
let (bigIWantMessageIds, sentMessages) =
|
||||
createMessages(gossip0, gossip1, size1, size2)
|
||||
|
||||
gossip1.broadcast(
|
||||
gossip1.mesh["foobar"],
|
||||
RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
ihave: @[ControlIHave(topicID: "foobar", messageIDs: bigIWantMessageIds)]
|
||||
)
|
||||
)
|
||||
),
|
||||
isHighPriority = false,
|
||||
)
|
||||
|
||||
var smallestSet: HashSet[seq[byte]]
|
||||
let seqs = toSeq(sentMessages)
|
||||
if seqs[0] < seqs[1]:
|
||||
smallestSet.incl(seqs[0])
|
||||
else:
|
||||
smallestSet.incl(seqs[1])
|
||||
|
||||
checkUntilTimeout:
|
||||
receivedMessages[] == smallestSet
|
||||
check receivedMessages[].len == 1
|
||||
|
||||
await teardownTest(gossip0, gossip1)
|
||||
|
||||
asyncTest "messages are not sent back to source or forwarding peer":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
let (handlerFut0, handler0) = createCompleteHandler()
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are connected in a ring
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
await connectNodes(nodes[2], nodes[0])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
|
||||
await waitForPeersInTable(
|
||||
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
|
||||
)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
|
||||
let results =
|
||||
await waitForStates(@[handlerFut0, handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isPending()
|
||||
results[1].isCompleted()
|
||||
results[2].isCompleted()
|
||||
|
||||
asyncTest "GossipSub validation should succeed":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
var subs: seq[Future[void]]
|
||||
subs &= waitSub(nodes[1], nodes[0], "foobar")
|
||||
subs &= waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
await allFuturesThrowing(subs)
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
check topic == "foobar"
|
||||
validatorFut.complete(true)
|
||||
result = ValidationResult.Accept
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) and (await handlerFut)
|
||||
|
||||
asyncTest "GossipSub validation should fail (reject)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
|
||||
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = ValidationResult.Reject
|
||||
validatorFut.complete(true)
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) == true
|
||||
|
||||
asyncTest "GossipSub validation should fail (ignore)":
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check false # if we get here, it should fail
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
gossip1.mesh["foobar"].len == 1 and "foobar" notin gossip1.fanout
|
||||
gossip2.mesh["foobar"].len == 1 and "foobar" notin gossip2.fanout
|
||||
|
||||
var validatorFut = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = ValidationResult.Ignore
|
||||
validatorFut.complete(true)
|
||||
|
||||
nodes[1].addValidator("foobar", validator)
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check (await validatorFut) == true
|
||||
|
||||
asyncTest "GossipSub validation one fails and one succeeds":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foo"
|
||||
handlerFut.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
|
||||
var passed, failed: Future[bool] = newFuture[bool]()
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result =
|
||||
if topic == "foo":
|
||||
passed.complete(true)
|
||||
ValidationResult.Accept
|
||||
else:
|
||||
failed.complete(true)
|
||||
ValidationResult.Reject
|
||||
|
||||
nodes[1].addValidator("foo", "bar", validator)
|
||||
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check ((await passed) and (await failed) and (await handlerFut))
|
||||
|
||||
let gossip1 = GossipSub(nodes[0])
|
||||
let gossip2 = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foo" notin gossip1.mesh and gossip1.fanout["foo"].len == 1
|
||||
"foo" notin gossip2.mesh and "foo" notin gossip2.fanout
|
||||
"bar" notin gossip1.mesh and gossip1.fanout["bar"].len == 1
|
||||
"bar" notin gossip2.mesh and "bar" notin gossip2.fanout
|
||||
|
||||
asyncTest "GossipSub's observers should run after message is sent, received and validated":
|
||||
var
|
||||
recvCounter = 0
|
||||
sendCounter = 0
|
||||
validatedCounter = 0
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc recvCounter
|
||||
|
||||
proc onSend(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc sendCounter
|
||||
|
||||
proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) =
|
||||
inc validatedCounter
|
||||
|
||||
let obs0 = PubSubObserver(onSend: onSend)
|
||||
let obs1 = PubSubObserver(onRecv: onRecv, onValidated: onValidated)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].addObserver(obs0)
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = if topic == "foo": ValidationResult.Accept else: ValidationResult.Reject
|
||||
|
||||
nodes[1].addValidator("foo", "bar", validator)
|
||||
|
||||
# Send message that will be accepted by the receiver's validator
|
||||
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 1
|
||||
validatedCounter == 1
|
||||
sendCounter == 1
|
||||
|
||||
# Send message that will be rejected by the receiver's validator
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 2
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check await passed
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" in gossip2.gossipsub
|
||||
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||
# 3 nodes: A, B, C
|
||||
# A publishes, C relays, B is having a long validation
|
||||
# so B should not send to anyone
|
||||
|
||||
let nodes = generateNodes(3, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var cRelayed: Future[void] = newFuture[void]()
|
||||
var bFinished: Future[void] = newFuture[void]()
|
||||
var
|
||||
aReceived = 0
|
||||
cReceived = 0
|
||||
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||
inc aReceived
|
||||
check aReceived < 2
|
||||
|
||||
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||
inc cReceived
|
||||
check cReceived < 2
|
||||
cRelayed.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handlerA)
|
||||
nodes[1].subscribe("foobar", handlerB)
|
||||
nodes[2].subscribe("foobar", handlerC)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||
|
||||
proc slowValidator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
try:
|
||||
await cRelayed
|
||||
# Empty A & C caches to detect duplicates
|
||||
gossip1.seen = TimedCache[SaltedId].init()
|
||||
gossip3.seen = TimedCache[SaltedId].init()
|
||||
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||
checkUntilTimeout(
|
||||
try:
|
||||
gossip2.validationSeen[msgId].len > 0
|
||||
except KeyError:
|
||||
false
|
||||
)
|
||||
result = ValidationResult.Accept
|
||||
bFinished.complete()
|
||||
except CatchableError:
|
||||
raiseAssert "err on slowValidator"
|
||||
|
||||
nodes[1].addValidator("foobar", slowValidator)
|
||||
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.getOrDefault("foobar").len == 2
|
||||
gossip2.mesh.getOrDefault("foobar").len == 2
|
||||
gossip3.mesh.getOrDefault("foobar").len == 2
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
|
||||
|
||||
await bFinished
|
||||
|
||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||
var passed: Future[bool] = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
passed.complete(true)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||
gossip1.parameters.floodPublish = true
|
||||
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||
gossip2.parameters.floodPublish = true
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], nodes[1], "foobar")
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||
|
||||
check await passed.wait(10.seconds)
|
||||
|
||||
check:
|
||||
"foobar" in gossip1.gossipsub
|
||||
"foobar" notin gossip2.gossipsub
|
||||
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
|
||||
gossip1.parameters.floodPublish = true
|
||||
gossip1.parameters.heartbeatInterval = milliseconds(700)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
|
||||
|
||||
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
|
||||
let
|
||||
nodes = setupNodes(20)
|
||||
gossip1 = GossipSub(nodes[0])
|
||||
|
||||
gossip1.parameters.floodPublish = true
|
||||
gossip1.parameters.heartbeatInterval = milliseconds(700)
|
||||
gossip1.parameters.bandwidthEstimatebps = 0
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodes(nodes[1 ..^ 1], nodes[0])
|
||||
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1
|
||||
|
||||
await wait(seenFut, 1.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesSparse(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
capture dialer, i:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
try:
|
||||
if peerName notin seen:
|
||||
seen[peerName] = 0
|
||||
seen[peerName].inc
|
||||
except KeyError:
|
||||
raiseAssert "seen checked before"
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1
|
||||
|
||||
await wait(seenFut, 60.seconds)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesSparse(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
let dgossip = GossipSub(dialer)
|
||||
dgossip.parameters.dHigh = 2
|
||||
dgossip.parameters.dLow = 1
|
||||
dgossip.parameters.d = 1
|
||||
dgossip.parameters.dOut = 1
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
info "seen up", count = seen.len
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
await waitSub(nodes[0], dialer, "foobar")
|
||||
|
||||
# we want to test ping pong deliveries via control Iwant/Ihave, so we publish just in a tap
|
||||
let publishedTo = nodes[0].publish(
|
||||
"foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)
|
||||
).await
|
||||
check:
|
||||
publishedTo != 0
|
||||
publishedTo != runs
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
asyncTest "GossipSub directPeers: always forward messages":
|
||||
let nodes = generateNodes(3, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[2]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
nodes[0].subscribe("foobar", noop)
|
||||
nodes[1].subscribe("foobar", noop)
|
||||
nodes[2].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut.wait(2.seconds)
|
||||
|
||||
# peer shouldn't be in our mesh
|
||||
check "foobar" notin GossipSub(nodes[0]).mesh
|
||||
check "foobar" notin GossipSub(nodes[1]).mesh
|
||||
check "foobar" notin GossipSub(nodes[2]).mesh
|
||||
|
||||
asyncTest "GossipSub directPeers: send message to unsubscribed direct peer":
|
||||
# Given 2 nodes
|
||||
let
|
||||
numberOfNodes = 2
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
node0 = nodes[0]
|
||||
node1 = nodes[1]
|
||||
g0 = GossipSub(node0)
|
||||
g1 = GossipSub(node1)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# With message observers
|
||||
var
|
||||
messageReceived0 = newFuture[bool]()
|
||||
messageReceived1 = newFuture[bool]()
|
||||
|
||||
proc observer0(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived0.complete(true)
|
||||
|
||||
proc observer1(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for message in msgs.messages:
|
||||
if message.topic == "foobar":
|
||||
messageReceived1.complete(true)
|
||||
|
||||
node0.addObserver(PubSubObserver(onRecv: observer0))
|
||||
node1.addObserver(PubSubObserver(onRecv: observer1))
|
||||
|
||||
# Connect them as direct peers
|
||||
await g0.addDirectPeer(node1.peerInfo.peerId, node1.peerInfo.addrs)
|
||||
await g1.addDirectPeer(node0.peerInfo.peerId, node0.peerInfo.addrs)
|
||||
|
||||
# When node 0 sends a message
|
||||
let message = "Hello!".toBytes()
|
||||
let publishResult = await node0.publish("foobar", message)
|
||||
|
||||
# None should receive the message as they are not subscribed to the topic
|
||||
let results = await waitForStates(@[messageReceived0, messageReceived1])
|
||||
check:
|
||||
publishResult == 0
|
||||
results[0].isPending()
|
||||
results[1].isPending()
|
||||
@@ -1,361 +0,0 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import sequtils, tables, sets, sugar
|
||||
import chronos, stew/byteutils
|
||||
import chronicles
|
||||
import metrics
|
||||
import
|
||||
utils,
|
||||
../../libp2p/[
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/gossipsub,
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/rpc/messages,
|
||||
]
|
||||
import ../helpers, ../utils/[futures]
|
||||
from ../../libp2p/protocols/pubsub/mcache import window
|
||||
|
||||
proc voidTopicHandler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc createCompleteHandler(): (
|
||||
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
|
||||
) =
|
||||
var fut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
fut.complete(true)
|
||||
|
||||
return (fut, handler)
|
||||
|
||||
proc addIHaveObservers(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIHaves[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iHave = msgs.control.get.ihave
|
||||
if iHave.len > 0:
|
||||
for msg in iHave:
|
||||
if msg.topicID == topic:
|
||||
receivedIHaves[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
proc addIDontWantObservers(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIDontWants[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iDontWant = msgs.control.get.idontwant
|
||||
if iDontWant.len > 0:
|
||||
receivedIDontWants[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
suite "Gossipsub Parameters":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "dont prune peers if mesh len is less than d_high":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitSubAllNodes(nodes, topic)
|
||||
|
||||
let expectedNumberOfPeers = numberOfNodes - 1
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len == expectedNumberOfPeers
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "prune peers if mesh len is higher than d_high":
|
||||
let
|
||||
numberofNodes = 15
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberofNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await waitSubAllNodes(nodes, topic)
|
||||
|
||||
# Give it time for a heartbeat
|
||||
await sleepAsync(DURATION_TIMEOUT_EXTENDED)
|
||||
|
||||
let
|
||||
expectedNumberOfPeers = numberofNodes - 1
|
||||
dHigh = 12
|
||||
d = 6
|
||||
dLow = 4
|
||||
|
||||
for i in 0 ..< numberofNodes:
|
||||
var gossip = GossipSub(nodes[i])
|
||||
|
||||
check:
|
||||
gossip.gossipsub[topic].len == expectedNumberOfPeers
|
||||
gossip.mesh[topic].len >= dLow and gossip.mesh[topic].len <= dHigh
|
||||
gossip.fanout.len == 0
|
||||
|
||||
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
|
||||
let
|
||||
numberOfNodes = 5
|
||||
topic = "foobar"
|
||||
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are interconnected
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) > 0
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# At least one of the nodes should have received an iHave message
|
||||
# The check is made this way because the mesh structure changes from run to run
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
anyIt(receivedIHavesRef[], it > 0)
|
||||
|
||||
asyncTest "messages are not sent back to source or forwarding peer":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
let (handlerFut0, handler0) = createCompleteHandler()
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are connected in a ring
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
await connectNodes(nodes[2], nodes[0])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Nodes 1 and 2 should receive the message, but node 0 shouldn't receive it back
|
||||
let results = await waitForStates(@[handlerFut0, handlerFut1, handlerFut2])
|
||||
check:
|
||||
results[0].isPending()
|
||||
results[1].isCompleted()
|
||||
results[2].isCompleted()
|
||||
|
||||
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await sleepAsync(1.seconds)
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
check (await nodes[0].publish(topic, message)) == 1
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2])
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# None of the nodes should have received an iHave message
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == 0
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes, gossip = true, dValues = some(dValues), gossipFactor = some(0.5)
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# At least 8 of the nodes should have received an iHave message
|
||||
# That's because the gossip factor is 0.5 over 16 available nodes
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len >= 8
|
||||
|
||||
asyncTest "adaptive gossip dissemination, with dLazy priority":
|
||||
let
|
||||
numberOfNodes = 20
|
||||
topic = "foobar"
|
||||
dValues = DValues(
|
||||
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
|
||||
)
|
||||
nodes = generateNodes(
|
||||
numberOfNodes,
|
||||
gossip = true,
|
||||
dValues = some(dValues),
|
||||
gossipFactor = some(0.float),
|
||||
)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iHave messages
|
||||
var receivedIHavesRef = new seq[int]
|
||||
addIHaveObservers(nodes, topic, receivedIHavesRef)
|
||||
|
||||
# And are connected to node 0
|
||||
for i in 1 ..< numberOfNodes:
|
||||
await connectNodes(nodes[0], nodes[i])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a message
|
||||
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 3
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# At least 6 of the nodes should have received an iHave message
|
||||
# That's because the dLazy is 6
|
||||
let receivedIHaves = receivedIHavesRef[]
|
||||
check:
|
||||
filterIt(receivedIHaves, it > 0).len == dValues.dLazy.get()
|
||||
|
||||
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# All nodes are checking for iDontWant messages
|
||||
var receivedIDontWantsRef = new seq[int]
|
||||
addIDontWantObservers(nodes, receivedIDontWantsRef)
|
||||
|
||||
# And are connected in a line
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[1], nodes[2])
|
||||
|
||||
# And subscribed to the same topic
|
||||
subscribeAllNodes(nodes, topic, voidTopicHandler)
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# When node 0 sends a large message
|
||||
let largeMsg = newSeq[byte](1000)
|
||||
check (await nodes[0].publish(topic, largeMsg)) == 1
|
||||
await sleepAsync(DURATION_TIMEOUT)
|
||||
|
||||
# Only node 2 should have received the iDontWant message
|
||||
let receivedIDontWants = receivedIDontWantsRef[]
|
||||
check:
|
||||
receivedIDontWants[0] == 0
|
||||
receivedIDontWants[1] == 0
|
||||
receivedIDontWants[2] == 1
|
||||
418
tests/pubsub/testgossipsubscoring.nim
Normal file
418
tests/pubsub/testgossipsubscoring.nim
Normal file
@@ -0,0 +1,418 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils]
|
||||
import stew/byteutils
|
||||
import metrics
|
||||
import utils
|
||||
import ../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
|
||||
import ../../libp2p/protocols/pubsub/rpc/[messages]
|
||||
import ../../libp2p/muxers/muxer
|
||||
import ../helpers, ../utils/[futures]
|
||||
|
||||
suite "GossipSub Scoring":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "Disconnect bad peers":
|
||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||
gossipSub.parameters.disconnectBadPeers = true
|
||||
gossipSub.parameters.appSpecificWeight = 1.0
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0 ..< 30:
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerId = randomPeerId()
|
||||
conn.peerId = peerId
|
||||
let peer = gossipSub.getPubSubPeer(peerId)
|
||||
peer.sendConn = conn
|
||||
peer.handler = handler
|
||||
peer.appScore = gossipSub.parameters.graylistThreshold - 1
|
||||
gossipSub.gossipsub.mgetOrPut(topic, initHashSet[PubSubPeer]()).incl(peer)
|
||||
gossipSub.switch.connManager.storeMuxer(Muxer(connection: conn))
|
||||
|
||||
gossipSub.updateScores()
|
||||
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
check:
|
||||
# test our disconnect mechanics
|
||||
gossipSub.gossipsub.peers(topic) == 0
|
||||
# also ensure we cleanup properly the peersInIP table
|
||||
gossipSub.peersInIP.len == 0
|
||||
|
||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||
await gossipSub.switch.stop()
|
||||
|
||||
asyncTest "flood publish to all peers with score above threshold, regardless of subscription":
|
||||
let
|
||||
numberOfNodes = 3
|
||||
topic = "foobar"
|
||||
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
|
||||
g0 = GossipSub(nodes[0])
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
# Nodes 1 and 2 are connected to node 0
|
||||
await connectNodes(nodes[0], nodes[1])
|
||||
await connectNodes(nodes[0], nodes[2])
|
||||
|
||||
let (handlerFut1, handler1) = createCompleteHandler()
|
||||
let (handlerFut2, handler2) = createCompleteHandler()
|
||||
|
||||
# Nodes are subscribed to the same topic
|
||||
nodes[1].subscribe(topic, handler1)
|
||||
nodes[2].subscribe(topic, handler2)
|
||||
await waitForHeartbeat()
|
||||
|
||||
# Given node 2's score is below the threshold
|
||||
for peer in g0.gossipsub.getOrDefault(topic):
|
||||
if peer.peerId == nodes[2].peerInfo.peerId:
|
||||
peer.score = (g0.parameters.publishThreshold - 1)
|
||||
|
||||
# When node 0 publishes a message to topic "foo"
|
||||
let message = "Hello!".toBytes()
|
||||
check (await nodes[0].publish(topic, message)) == 1
|
||||
await waitForHeartbeat(2)
|
||||
|
||||
# Then only node 1 should receive the message
|
||||
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
|
||||
check:
|
||||
results[0].isCompleted(true)
|
||||
results[1].isPending()
|
||||
|
||||
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
|
||||
let nodes =
|
||||
generateNodes(2, gossip = true, overheadRateLimit = Opt.some((20, 1.millis)))
|
||||
|
||||
await startNodes(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
proc handle(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
let gossip0 = GossipSub(nodes[0])
|
||||
let gossip1 = GossipSub(nodes[1])
|
||||
|
||||
gossip0.subscribe("foobar", handle)
|
||||
gossip1.subscribe("foobar", handle)
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# Avoid being disconnected by failing signature verification
|
||||
gossip0.verifySignature = false
|
||||
gossip1.verifySignature = false
|
||||
|
||||
return (nodes, gossip0, gossip1)
|
||||
|
||||
proc currentRateLimitHits(): float64 =
|
||||
try:
|
||||
libp2p_gossipsub_peers_rate_limit_hits.valueByName(
|
||||
"libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"]
|
||||
)
|
||||
except KeyError:
|
||||
0
|
||||
|
||||
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](10))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh["foobar"],
|
||||
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
check currentRateLimitHits() == rateLimitHits
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
# Simulate sending an undecodable message
|
||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(33, 1.byte), isHighPriority = true
|
||||
)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(
|
||||
newSeqWith(35, 1.byte), isHighPriority = true
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let msg = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
let msg2 = RPCMsg(
|
||||
control: some(
|
||||
ControlMessage(
|
||||
prune:
|
||||
@[
|
||||
ControlPrune(
|
||||
topicID: "foobar",
|
||||
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
|
||||
backoff: 123'u64,
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
|
||||
let rateLimitHits = currentRateLimitHits()
|
||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||
|
||||
let topic = "foobar"
|
||||
proc execValidator(
|
||||
topic: string, message: messages.Message
|
||||
): Future[ValidationResult] {.async: (raw: true).} =
|
||||
let res = newFuture[ValidationResult]()
|
||||
res.complete(ValidationResult.Reject)
|
||||
res
|
||||
|
||||
gossip0.addValidator(topic, execValidator)
|
||||
gossip1.addValidator(topic, execValidator)
|
||||
|
||||
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
|
||||
|
||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
||||
await waitForHeartbeat()
|
||||
|
||||
check currentRateLimitHits() == rateLimitHits + 1
|
||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||
|
||||
# Disconnect peer when rate limiting is enabled
|
||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||
gossip0.broadcast(
|
||||
gossip0.mesh[topic],
|
||||
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||
check currentRateLimitHits() == rateLimitHits + 2
|
||||
|
||||
await stopNodes(nodes)
|
||||
|
||||
asyncTest "GossipSub directPeers: don't kick direct peer with low score":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
await GossipSub(nodes[0]).addDirectPeer(
|
||||
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
|
||||
)
|
||||
await GossipSub(nodes[1]).addDirectPeer(
|
||||
nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs
|
||||
)
|
||||
|
||||
GossipSub(nodes[1]).parameters.disconnectBadPeers = true
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
handlerFut.complete()
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
# peer shouldn't be in our mesh
|
||||
check:
|
||||
GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score <
|
||||
GossipSub(nodes[1]).parameters.graylistThreshold
|
||||
GossipSub(nodes[1]).updateScores()
|
||||
|
||||
handlerFut = newFuture[void]()
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1
|
||||
|
||||
# Without directPeers, this would fail
|
||||
await handlerFut.wait(1.seconds)
|
||||
|
||||
asyncTest "GossipSub peers disconnections mechanics":
|
||||
var runs = 10
|
||||
|
||||
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
var seen: Table[string, int]
|
||||
var seenFut = newFuture[void]()
|
||||
for i in 0 ..< nodes.len:
|
||||
let dialer = nodes[i]
|
||||
var handler: TopicHandler
|
||||
closureScope:
|
||||
var peerName = $dialer.peerInfo.peerId
|
||||
handler = proc(topic: string, data: seq[byte]) {.async.} =
|
||||
seen.mgetOrPut(peerName, 0).inc()
|
||||
check topic == "foobar"
|
||||
if not seenFut.finished() and seen.len >= runs:
|
||||
seenFut.complete()
|
||||
|
||||
dialer.subscribe("foobar", handler)
|
||||
|
||||
await waitSubGraph(nodes, "foobar")
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
tryPublish await wait(
|
||||
nodes[0].publish("foobar", toBytes("from node " & $nodes[0].peerInfo.peerId)),
|
||||
1.minutes,
|
||||
), 1, 5.seconds, 3.minutes
|
||||
|
||||
await wait(seenFut, 5.minutes)
|
||||
check:
|
||||
seen.len >= runs
|
||||
for k, v in seen.pairs:
|
||||
check:
|
||||
v >= 1
|
||||
|
||||
for node in nodes:
|
||||
var gossip = GossipSub(node)
|
||||
check:
|
||||
"foobar" in gossip.gossipsub
|
||||
gossip.fanout.len == 0
|
||||
gossip.mesh["foobar"].len > 0
|
||||
|
||||
# Removing some subscriptions
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].unsubscribeAll("foobar")
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
# Adding again subscriptions
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for i in 0 ..< runs:
|
||||
if i mod 3 != 0:
|
||||
nodes[i].subscribe("foobar", handler)
|
||||
|
||||
# Waiting 2 heartbeats
|
||||
|
||||
for _ in 0 .. 1:
|
||||
let evnt = newAsyncEvent()
|
||||
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||
await evnt.wait()
|
||||
|
||||
# ensure peer stats are stored properly and kept properly
|
||||
check:
|
||||
GossipSub(nodes[0]).peerStats.len == runs - 1 # minus self
|
||||
|
||||
asyncTest "GossipSub scoring - decayInterval":
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
|
||||
var gossip = GossipSub(nodes[0])
|
||||
const testDecayInterval = 50.milliseconds
|
||||
gossip.parameters.decayInterval = testDecayInterval
|
||||
|
||||
startNodesAndDeferStop(nodes)
|
||||
|
||||
var handlerFut = newFuture[void]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
handlerFut.complete()
|
||||
|
||||
await connectNodesStar(nodes)
|
||||
|
||||
nodes[0].subscribe("foobar", handler)
|
||||
nodes[1].subscribe("foobar", handler)
|
||||
|
||||
tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1
|
||||
|
||||
await handlerFut
|
||||
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries =
|
||||
100
|
||||
gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9
|
||||
|
||||
# We should have decayed 5 times, though allowing 4..6
|
||||
await sleepAsync(testDecayInterval * 5)
|
||||
check:
|
||||
gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in
|
||||
50.0 .. 66.0
|
||||
@@ -1,5 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testfloodsub, testgossipsub, testgossipsub2, testgossipsubparameters, testmcache,
|
||||
testgossipsubfanout, testgossipsubgossip, testgossipsubmeshmanagement,
|
||||
testgossipsubmessagehandling, testgossipsubscoring, testfloodsub, testmcache,
|
||||
testtimedcache, testmessage
|
||||
|
||||
@@ -4,7 +4,7 @@ const
|
||||
libp2p_pubsub_verify {.booldefine.} = true
|
||||
libp2p_pubsub_anonymize {.booldefine.} = false
|
||||
|
||||
import hashes, random, tables, sets, sequtils
|
||||
import hashes, random, tables, sets, sequtils, sugar
|
||||
import chronos, stew/[byteutils, results], chronos/ratelimit
|
||||
import
|
||||
../../libp2p/[
|
||||
@@ -12,18 +12,26 @@ import
|
||||
protocols/pubsub/errors,
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/pubsubpeer,
|
||||
protocols/pubsub/peertable,
|
||||
protocols/pubsub/gossipsub,
|
||||
protocols/pubsub/floodsub,
|
||||
protocols/pubsub/rpc/messages,
|
||||
protocols/secure/secure,
|
||||
]
|
||||
import ../helpers
|
||||
import ../helpers, ../utils/futures
|
||||
import chronicles
|
||||
|
||||
export builders
|
||||
|
||||
randomize()
|
||||
|
||||
const TEST_GOSSIPSUB_HEARTBEAT_INTERVAL* = 60.milliseconds
|
||||
const HEARTBEAT_TIMEOUT* = # TEST_GOSSIPSUB_HEARTBEAT_INTERVAL + 20%
|
||||
int64(float64(TEST_GOSSIPSUB_HEARTBEAT_INTERVAL.milliseconds) * 1.2).milliseconds
|
||||
|
||||
proc waitForHeartbeat*(multiplier: int = 1) {.async.} =
|
||||
await sleepAsync(HEARTBEAT_TIMEOUT * multiplier)
|
||||
|
||||
type
|
||||
TestGossipSub* = ref object of GossipSub
|
||||
DValues* = object
|
||||
@@ -70,7 +78,7 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
$m.data.hash & $m.topic.hash
|
||||
ok mid.toBytes()
|
||||
|
||||
proc applyDValues(parameters: var GossipSubParams, dValues: Option[DValues]) =
|
||||
proc applyDValues*(parameters: var GossipSubParams, dValues: Option[DValues]) =
|
||||
if dValues.isNone:
|
||||
return
|
||||
let values = dValues.get
|
||||
@@ -105,6 +113,7 @@ proc generateNodes*(
|
||||
Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
gossipSubVersion: string = "",
|
||||
sendIDontWantOnPublish: bool = false,
|
||||
heartbeatInterval: Duration = TEST_GOSSIPSUB_HEARTBEAT_INTERVAL,
|
||||
floodPublish: bool = false,
|
||||
dValues: Option[DValues] = DValues.none(),
|
||||
gossipFactor: Option[float] = float.none(),
|
||||
@@ -125,6 +134,7 @@ proc generateNodes*(
|
||||
maxMessageSize = maxMessageSize,
|
||||
parameters = (
|
||||
var p = GossipSubParams.init()
|
||||
p.heartbeatInterval = heartbeatInterval
|
||||
p.floodPublish = floodPublish
|
||||
p.historyLength = 20
|
||||
p.historyGossip = 20
|
||||
@@ -158,18 +168,21 @@ proc generateNodes*(
|
||||
switch.mount(pubsub)
|
||||
result.add(pubsub)
|
||||
|
||||
proc connectNodes*(dialer: PubSub, target: PubSub) {.async.} =
|
||||
proc toGossipSub*(nodes: seq[PubSub]): seq[GossipSub] =
|
||||
return nodes.mapIt(GossipSub(it))
|
||||
|
||||
proc connectNodes*[T: PubSub](dialer: T, target: T) {.async.} =
|
||||
doAssert dialer.switch.peerInfo.peerId != target.switch.peerInfo.peerId,
|
||||
"Could not connect same peer"
|
||||
await dialer.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc connectNodesStar*(nodes: seq[PubSub]) {.async.} =
|
||||
proc connectNodesStar*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
for dialer in nodes:
|
||||
for node in nodes:
|
||||
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
|
||||
await connectNodes(dialer, node)
|
||||
|
||||
proc connectNodesSparse*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||
proc connectNodesSparse*[T: PubSub](nodes: seq[T], degree: int = 2) {.async.} =
|
||||
if nodes.len < degree:
|
||||
raise
|
||||
(ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
|
||||
@@ -260,26 +273,183 @@ proc waitForMesh*(
|
||||
trace "waitForMesh sleeping..."
|
||||
await activeWait(5.milliseconds, timeoutMoment, "waitForMesh timeout!")
|
||||
|
||||
proc startNodes*(nodes: seq[PubSub]) {.async.} =
|
||||
type PeerTableType* {.pure.} = enum
|
||||
Gossipsub = "gossipsub"
|
||||
Mesh = "mesh"
|
||||
Fanout = "fanout"
|
||||
|
||||
proc waitForPeersInTable*(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
timeout = 5.seconds,
|
||||
) {.async.} =
|
||||
## Wait until each node in `nodes` has at least the corresponding number of peers from `peerCounts`
|
||||
## in the specified table (mesh, gossipsub, or fanout) for the given topic
|
||||
|
||||
doAssert nodes.len == peerCounts.len, "Node count must match peer count expectations"
|
||||
|
||||
# Helper proc to check current state and update satisfaction status
|
||||
proc checkState(
|
||||
nodes: seq[auto],
|
||||
topic: string,
|
||||
peerCounts: seq[int],
|
||||
table: PeerTableType,
|
||||
satisfied: var seq[bool],
|
||||
): bool =
|
||||
for i in 0 ..< nodes.len:
|
||||
if not satisfied[i]:
|
||||
let fsub = GossipSub(nodes[i])
|
||||
let currentCount =
|
||||
case table
|
||||
of PeerTableType.Mesh:
|
||||
fsub.mesh.getOrDefault(topic).len
|
||||
of PeerTableType.Gossipsub:
|
||||
fsub.gossipsub.getOrDefault(topic).len
|
||||
of PeerTableType.Fanout:
|
||||
fsub.fanout.getOrDefault(topic).len
|
||||
satisfied[i] = currentCount >= peerCounts[i]
|
||||
return satisfied.allIt(it)
|
||||
|
||||
let timeoutMoment = Moment.now() + timeout
|
||||
var
|
||||
satisfied = newSeq[bool](nodes.len)
|
||||
allSatisfied = false
|
||||
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied) # Initial check
|
||||
# Continue checking until all requirements are met or timeout
|
||||
while not allSatisfied:
|
||||
await activeWait(
|
||||
5.milliseconds,
|
||||
timeoutMoment,
|
||||
"Timeout waiting for peer counts in " & $table & " for topic " & topic,
|
||||
)
|
||||
allSatisfied = checkState(nodes, topic, peerCounts, table, satisfied)
|
||||
|
||||
proc startNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
|
||||
|
||||
proc stopNodes*(nodes: seq[PubSub]) {.async.} =
|
||||
proc stopNodes*[T: PubSub](nodes: seq[T]) {.async.} =
|
||||
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
|
||||
|
||||
template startNodesAndDeferStop*(nodes: seq[PubSub]): untyped =
|
||||
template startNodesAndDeferStop*[T: PubSub](nodes: seq[T]): untyped =
|
||||
await startNodes(nodes)
|
||||
defer:
|
||||
await stopNodes(nodes)
|
||||
|
||||
proc subscribeAllNodes*(nodes: seq[PubSub], topic: string, topicHandler: TopicHandler) =
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandler: TopicHandler
|
||||
) =
|
||||
for node in nodes:
|
||||
node.subscribe(topic, topicHandler)
|
||||
|
||||
proc subscribeAllNodes*(
|
||||
nodes: seq[PubSub], topic: string, topicHandlers: seq[TopicHandler]
|
||||
proc subscribeAllNodes*[T: PubSub](
|
||||
nodes: seq[T], topic: string, topicHandlers: seq[TopicHandler]
|
||||
) =
|
||||
if nodes.len != topicHandlers.len:
|
||||
raise (ref CatchableError)(msg: "nodes and topicHandlers count needs to match!")
|
||||
|
||||
for i in 0 ..< nodes.len:
|
||||
nodes[i].subscribe(topic, topicHandlers[i])
|
||||
|
||||
template tryPublish*(
|
||||
call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds
|
||||
): untyped =
|
||||
var
|
||||
expiration = Moment.now() + timeout
|
||||
pubs = 0
|
||||
while pubs < require and Moment.now() < expiration:
|
||||
pubs = pubs + call
|
||||
await sleepAsync(wait)
|
||||
|
||||
doAssert pubs >= require, "Failed to publish!"
|
||||
|
||||
proc noop*(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
discard
|
||||
|
||||
proc voidTopicHandler*(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc createCompleteHandler*(): (
|
||||
Future[bool], proc(topic: string, data: seq[byte]) {.async.}
|
||||
) =
|
||||
var fut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
fut.complete(true)
|
||||
|
||||
return (fut, handler)
|
||||
|
||||
proc addIHaveObservers*(nodes: seq[auto], topic: string, receivedIHaves: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIHaves[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIhaves = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iHave = msgs.control.get.ihave
|
||||
if iHave.len > 0:
|
||||
for msg in iHave:
|
||||
if msg.topicID == topic:
|
||||
receivedIHaves[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIhaves)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
proc addIDontWantObservers*(nodes: seq[auto], receivedIDontWants: ref seq[int]) =
|
||||
let numberOfNodes = nodes.len
|
||||
receivedIDontWants[] = repeat(0, numberOfNodes)
|
||||
|
||||
for i in 0 ..< numberOfNodes:
|
||||
var pubsubObserver: PubSubObserver
|
||||
capture i:
|
||||
let checkForIDontWant = proc(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
if msgs.control.isSome:
|
||||
let iDontWant = msgs.control.get.idontwant
|
||||
if iDontWant.len > 0:
|
||||
receivedIDontWants[i] += 1
|
||||
pubsubObserver = PubSubObserver(onRecv: checkForIDontWant)
|
||||
nodes[i].addObserver(pubsubObserver)
|
||||
|
||||
# TODO: refactor helper methods from testgossipsub.nim
|
||||
proc setupNodes*(count: int): seq[PubSub] =
|
||||
generateNodes(count, gossip = true)
|
||||
|
||||
proc connectNodes*(nodes: seq[PubSub], target: PubSub) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
for node in nodes:
|
||||
node.subscribe("foobar", handler)
|
||||
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||
|
||||
proc baseTestProcedure*(
|
||||
nodes: seq[PubSub],
|
||||
gossip1: GossipSub,
|
||||
numPeersFirstMsg: int,
|
||||
numPeersSecondMsg: int,
|
||||
) {.async.} =
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
check topic == "foobar"
|
||||
|
||||
block setup:
|
||||
for i in 0 ..< 50:
|
||||
if (await nodes[0].publish("foobar", ("Hello!" & $i).toBytes())) == 19:
|
||||
break setup
|
||||
await sleepAsync(10.milliseconds)
|
||||
check false
|
||||
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](2_500_000))) == numPeersFirstMsg
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_001))) == numPeersSecondMsg
|
||||
|
||||
# Now try with a mesh
|
||||
gossip1.subscribe("foobar", handler)
|
||||
checkUntilTimeout:
|
||||
gossip1.mesh.peers("foobar") > 5
|
||||
|
||||
# use a different length so that the message is not equal to the last
|
||||
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
|
||||
|
||||
proc `$`*(peer: PubSubPeer): string =
|
||||
shortLog(peer)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testnative, testdaemon, ./pubsub/testgossipinternal, ./pubsub/testpubsub, testinterop
|
||||
import testnative, testdaemon, ./pubsub/testpubsub, testinterop
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
import unittest2
|
||||
import nimcrypto/utils
|
||||
import ../libp2p/crypto/[crypto, ecnist]
|
||||
import stew/results
|
||||
import results
|
||||
|
||||
const
|
||||
TestsCount = 10 # number of random tests
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
import unittest2
|
||||
import ../libp2p/protobuf/minprotobuf
|
||||
import ../libp2p/varint
|
||||
import stew/byteutils, strutils, sequtils
|
||||
import stew/byteutils, strutils
|
||||
|
||||
suite "MinProtobuf test suite":
|
||||
const VarintVectors = [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import unittest2
|
||||
import ../libp2p/multibase
|
||||
import stew/results
|
||||
import results
|
||||
|
||||
const GoTestVectors = [
|
||||
["identity", "\x00Decentralize everything!!!", "Decentralize everything!!!"],
|
||||
|
||||
@@ -42,6 +42,10 @@ proc main() {.async.} =
|
||||
discard switchBuilder.withTcpTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
|
||||
)
|
||||
of "quic-v1":
|
||||
discard switchBuilder.withQuicTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder
|
||||
.withTransport(
|
||||
@@ -55,16 +59,12 @@ proc main() {.async.} =
|
||||
case secureChannel
|
||||
of "noise":
|
||||
discard switchBuilder.withNoise()
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
case muxer
|
||||
of "yamux":
|
||||
discard switchBuilder.withYamux()
|
||||
of "mplex":
|
||||
discard switchBuilder.withMplex()
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
"ws",
|
||||
"quic-v1"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import unittest2
|
||||
|
||||
import times
|
||||
import times, base64
|
||||
import ../../../libp2p/transports/tls/certificate
|
||||
import ../../../libp2p/transports/tls/certificate_ffi
|
||||
import ../../../libp2p/crypto/crypto
|
||||
import ../../../libp2p/peerid
|
||||
|
||||
@@ -107,6 +108,56 @@ suite "Test vectors":
|
||||
# should not verify
|
||||
check not cert.verify()
|
||||
|
||||
test "CSR generation":
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
check cert_init_drbg("seed".cstring, 4, certCtx.addr) == CERT_SUCCESS
|
||||
check cert_generate_key(certCtx, certKey.addr) == CERT_SUCCESS
|
||||
|
||||
check cert_signing_req("my.domain.string".cstring, certKey, derCSR.addr) ==
|
||||
CERT_SUCCESS
|
||||
|
||||
check cert_signing_req(
|
||||
"my.big.domain.string.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaa".cstring,
|
||||
# 253 characters, no labels longer than 63, okay
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == CERT_SUCCESS
|
||||
|
||||
check cert_signing_req(
|
||||
"my.domain.".cstring, # domain ending in ".", okay
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == CERT_SUCCESS
|
||||
|
||||
check cert_signing_req(
|
||||
"my.big.domain.string.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".cstring,
|
||||
# 254 characters, too long
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == -48 # CERT_ERROR_CN_TOO_LONG
|
||||
|
||||
check cert_signing_req(
|
||||
"my.big.domain.string.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".cstring,
|
||||
# 64 character label, too long
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == -49 # CERT_ERROR_CN_LABEL_TOO_LONG
|
||||
|
||||
check cert_signing_req(
|
||||
"my..empty.label.domain".cstring, # domain with empty label
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == -50 # CERT_ERROR_CN_EMPTY_LABEL
|
||||
|
||||
check cert_signing_req(
|
||||
"".cstring, # domain with empty cn
|
||||
certKey,
|
||||
derCSR.addr,
|
||||
) == -51 # CERT_ERROR_CN_EMPTY
|
||||
|
||||
suite "utilities test":
|
||||
test "parseCertTime":
|
||||
var dt = parseCertTime("Mar 19 11:54:31 2025 GMT")
|
||||
|
||||
Reference in New Issue
Block a user