Compare commits

..

2 Commits

Author SHA1 Message Date
Diego
06b472d30c Add run and stop 2022-11-21 10:40:54 +01:00
Diego
1e46642c96 Add services 2022-11-21 10:27:08 +01:00
35 changed files with 432 additions and 804 deletions

View File

@@ -1,131 +0,0 @@
name: Install Nim
inputs:
os:
description: "Operating system to build for"
required: true
cpu:
description: "CPU to build for"
default: "amd64"
nim_branch:
description: "Nim version"
default: "version-1-6"
shell:
description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail"
runs:
using: "composite"
steps:
- name: Install build dependencies (Linux i386)
shell: ${{ inputs.shell }}
if: inputs.os == 'Linux' && inputs.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: inputs.os == 'Windows' && inputs.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: inputs.os == 'Windows' && inputs.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: inputs.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v3
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
shell: ${{ inputs.shell }}
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
inputs.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
shell: ${{ inputs.shell }}
if: >
inputs.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
shell: ${{ inputs.shell }}
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ inputs.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Restore Nim from cache
id: nim-cache
uses: actions/cache@v3
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
run: |
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries

View File

@@ -7,10 +7,6 @@ on:
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build:
timeout-minutes: 90
@@ -49,20 +45,111 @@ jobs:
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Build Nim and Nimble
run: |
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Setup Go
uses: actions/setup-go@v2
@@ -73,20 +160,9 @@ jobs:
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Run tests
run: |
nim --version
nimble --version
nimble install_pinned
nimble test

View File

@@ -9,10 +9,6 @@ on:
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
Coverage:
runs-on: ubuntu-20.04
@@ -22,31 +18,15 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: linux
cpu: amd64
shell: bash
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Run
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
export PATH="$PATH:$PWD/Nim/bin"
nimble install_pinned
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
nimble testnative
nimble testpubsub
@@ -58,8 +38,7 @@ jobs:
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
genhtml coverage/coverage.f.info --output-directory coverage/output
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
#- uses: actions/upload-artifact@master
# with:
# name: coverage
# path: coverage
- uses: actions/upload-artifact@master
with:
name: coverage
path: coverage

View File

@@ -5,13 +5,7 @@ on:
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
@@ -52,14 +46,107 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Build Nim and Nimble
run: |
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Setup Go
uses: actions/setup-go@v2

20
.pinned
View File

@@ -1,16 +1,16 @@
bearssl;https://github.com/status-im/nim-bearssl@#a647994910904b0103a05db3a5ec1ecfc4d91a88
bearssl;https://github.com/status-im/nim-bearssl@#f4c4233de453cb7eac0ce3f3ffad6496295f83ab
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#75d030ff71264513fb9701c75a326cd36fcb4692
chronos;https://github.com/status-im/nim-chronos@#6525f4ce1d1a7eba146e5f1a53f6f105077ae686
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#b42daf41d8eb4fbce40add6836bed838f8d85b6f
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
faststreams;https://github.com/status-im/nim-faststreams@#6112432b3a81d9db116cd5d64c39648881cfff29
httputils;https://github.com/status-im/nim-http-utils@#e88e231dfcef4585fe3b2fbd9b664dbd28a88040
json_serialization;https://github.com/status-im/nim-json-serialization@#e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4
metrics;https://github.com/status-im/nim-metrics@#0a6477268e850d7bc98347b3875301524871765f
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
serialization;https://github.com/status-im/nim-serialization@#d77417cba6896c26287a68e6a95762e45a1b87e5
stew;https://github.com/status-im/nim-stew@#7184d2424dc3945657884646a72715d494917aad
secp256k1;https://github.com/status-im/nim-secp256k1@#c7f1a37d9b0f17292649bfed8bf6cef83cf4221f
serialization;https://github.com/status-im/nim-serialization@#60a5bd8ac0461dfadd3069fd9c01a7734f205995
stew;https://github.com/status-im/nim-stew@#23da07c9b59c0ba3d4efa7e4e6e2c4121ae5a156
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#691f069b209d372b1240d5ae1f57fb7bbafeaba7
websock;https://github.com/status-im/nim-websock@#acbe30e9ca1e51dcbbfe4c552ee6f16c7eede538
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8

View File

@@ -1,5 +1,6 @@
# nim-libp2p examples
# nim-libp2p documentation
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
Welcome to the nim-libp2p documentation!
We recommand to follow the tutorials on the website, but feel free to grok the sources here!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).

View File

@@ -1,6 +0,0 @@
# nim-libp2p documentation
Welcome to the nim-libp2p documentation!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).

View File

@@ -32,7 +32,7 @@ proc new(T: typedesc[TestProto]): T =
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)
return T(codecs: @[TestCodec], handler: handle)
## This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
## In our handle, we simply read a message from the connection and `echo` it.

View File

@@ -107,7 +107,7 @@ type
metricGetter: MetricCallback
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
let res = MetricProto(metricGetter: cb)
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
let
metrics = await res.metricGetter()
@@ -115,8 +115,8 @@ proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
await conn.writeLp(asProtobuf.buffer)
await conn.close()
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb
res.codecs = @["/metric-getter/1.0.0"]
res.handler = handle
return res
proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =

View File

@@ -36,7 +36,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
return T(codecs: @[DumbCodec], handler: handle)
## ## Bootnodes
## The first time a p2p program is ran, he needs to know how to join

View File

@@ -157,7 +157,7 @@ proc new(T: typedesc[GameProto], g: Game): T =
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
return T(codecs: @["/tron/1.0.0"], handler: handle)
proc networking(g: Game) {.async.} =
# Create our switch, similar to the GossipSub example and

View File

@@ -139,13 +139,9 @@ task install_pinned, "Reads the lockfile":
# Remove the automatically installed deps
# (inefficient you say?)
let nimblePkgs =
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for dependency in listDirs(nimblePkgs):
let filename = dependency.extractFilename
if toInstall.anyIt(filename.startsWith(it[0]) and
filename.endsWith(it[1].split('#')[^1])) == false:
let allowedDirectories = toInstall.mapIt(it[0] & "-" & it[1].split('@')[1])
for dependency in listDirs("nimbledeps/pkgs"):
if dependency.extractFilename notin allowedDirectories:
rmDir(dependency)
task unpin, "Restore global package use":

View File

@@ -26,7 +26,7 @@ import
switch, peerid, peerinfo, stream/connection, multiaddress,
crypto/crypto, transports/[transport, tcptransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, secure/plaintext, rendezvous],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
nameresolving/nameresolver,
@@ -40,7 +40,6 @@ type
SecureProtocol* {.pure.} = enum
Noise,
PlainText,
Secio {.deprecated.}
SwitchBuilder* = ref object
@@ -135,11 +134,6 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.secureManagers.add(SecureProtocol.Noise)
b
proc withPlainText*(b: SwitchBuilder): SwitchBuilder {.public.} =
warn "Using plain text encryption!"
b.secureManagers.add(SecureProtocol.PlainText)
b
proc withTransport*(b: SwitchBuilder, prov: TransportProvider): SwitchBuilder {.public.} =
## Use a custom transport
runnableExamples:
@@ -215,13 +209,8 @@ proc build*(b: SwitchBuilder): Switch
let
seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
if b.secureManagers.len == 0:
b.secureManagers &= SecureProtocol.Noise
var
secureManagerInstances: seq[Secure]
if SecureProtocol.PlainText in b.secureManagers:
secureManagerInstances.add(PlainText.new(seckey))
if SecureProtocol.Noise in b.secureManagers:
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
@@ -245,6 +234,9 @@ proc build*(b: SwitchBuilder): Switch
transports.add(tProvider(muxedUpgrade))
transports
if b.secureManagers.len == 0:
b.secureManagers &= SecureProtocol.Noise
if isNil(b.rng):
b.rng = newRng()

View File

@@ -36,8 +36,7 @@ method connect*(
method connect*(
self: Dial,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
addrs: seq[MultiAddress]): Future[PeerId] {.async, base.} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")

View File

@@ -219,23 +219,11 @@ method connect*(
method connect*(
self: Dialer,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async.} =
addrs: seq[MultiAddress],
): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
let fullAddress = parseFullAddress(address)
if fullAddress.isOk:
return (await self.internalConnect(
Opt.some(fullAddress.get()[0]),
@[fullAddress.get()[1]],
false)).peerId
else:
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).peerId
return (await self.internalConnect(Opt.none(PeerId), addrs, false)).peerId
proc negotiateStream(
self: Dialer,

View File

@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[strutils, sequtils, tables]
import std/[strutils, sequtils]
import chronos, chronicles, stew/byteutils
import stream/connection,
protocols/protocol
@@ -21,7 +21,7 @@ logScope:
topics = "libp2p multistream"
const
MsgSize* = 1024
MsgSize* = 64*1024
Codec* = "/multistream/1.0.0"
MSCodec* = "\x13" & Codec & "\n"
@@ -33,20 +33,17 @@ type
MultiStreamError* = object of LPError
HandlerHolder* = ref object
HandlerHolder* = object
protos*: seq[string]
protocol*: LPProtocol
match*: Matcher
openedStreams: CountTable[PeerId]
MultistreamSelect* = ref object of RootObj
handlers*: seq[HandlerHolder]
codec*: string
proc new*(T: typedesc[MultistreamSelect]): T =
T(
codec: MSCodec,
)
T(codec: MSCodec)
template validateSuffix(str: string): untyped =
if str.endsWith("\n"):
@@ -172,22 +169,9 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await conn.writeLp(ms & "\n")
conn.protocol = ms
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
await conn.writeLp(ms & "\n")
conn.protocol = ms
await h.protocol.handler(conn, ms)
return
debug "no handlers", conn, protocol = ms
await conn.write(Na)

View File

@@ -184,11 +184,6 @@ func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
## Create new peer id from private key ``seckey``.
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
## Create new peer id with random public key.
let randomKey = PrivateKey.random(Secp256k1, rng[])[]
PeerId.init(randomKey).orError(cstring("failed to generate random key"))
func match*(pid: PeerId, pubkey: PublicKey): bool =
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
let p = PeerId.init(pubkey)

View File

@@ -16,7 +16,8 @@ runnableExamples:
# Create a custom book type
type MoodBook = ref object of PeerBook[string]
var somePeerId = PeerId.random().get()
var somePeerId: PeerId
discard somePeerId.init("")
peerStore[MoodBook][somePeerId] = "Happy"
doAssert peerStore[MoodBook][somePeerId] == "Happy"
@@ -152,9 +153,6 @@ proc updatePeerInfo*(
if info.addrs.len > 0:
peerStore[AddressBook][info.peerId] = info.addrs
if info.pubkey.isSome:
peerStore[KeyBook][info.peerId] = info.pubkey.get()
if info.agentVersion.isSome:
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string

View File

@@ -12,14 +12,9 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import chronos, stew/results
import chronos
import ../stream/connection
export results
const
DefaultMaxIncomingStreams* = 10
type
LPProtoHandler* = proc (
conn: Connection,
@@ -31,17 +26,11 @@ type
codecs*: seq[string]
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
started*: bool
maxIncomingStreams: Opt[int]
method init*(p: LPProtocol) {.base, gcsafe.} = discard
method start*(p: LPProtocol) {.async, base.} = p.started = true
method stop*(p: LPProtocol) {.async, base.} = p.started = false
proc maxIncomingStreams*(p: LPProtocol): int =
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
p.maxIncomingStreams = Opt.some(val)
func codec*(p: LPProtocol): string =
assert(p.codecs.len > 0, "Codecs sequence was empty!")
@@ -51,16 +40,3 @@ func `codec=`*(p: LPProtocol, codec: string) =
# always insert as first codec
# if we use this abstraction
p.codecs.insert(codec, 0)
proc new*(
T: type LPProtocol,
codecs: seq[string],
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
T(
codecs: codecs,
handler: handler,
maxIncomingStreams:
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
else: maxIncomingStreams
)

View File

@@ -304,15 +304,14 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
if ? pb.getRepeatedField(2, msgpbs):
trace "decodeMessages: read messages", count = len(msgpbs)
for item in msgpbs:
# size is constrained at the network level
msgs.add(? decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
msgs.add(? decodeMessage(initProtoBuffer(item)))
else:
trace "decodeMessages: no messages found"
ok(msgs)
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
var pb = initProtoBuffer(maxSize = uint.high)
var pb = initProtoBuffer()
for item in msg.subscriptions:
pb.write(1, item)
for item in msg.messages:
@@ -325,7 +324,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
var pb = initProtoBuffer(msg)
var rpcMsg = ok(RPCMsg())
assign(rpcMsg.get().messages, ? pb.decodeMessages())
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())

View File

@@ -15,57 +15,20 @@ else:
import chronos
import secure, ../../stream/connection
const PlainTextCodec* = "/plaintext/2.0.0"
const PlainTextCodec* = "/plaintext/1.0.0"
type
PlainText* = ref object of Secure
localPublicKey: PublicKey
PlainTextError* = object of LPError
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
## plain text doesn't do anything
PlainTextConnection* = ref object of SecureConn
method readMessage*(sconn: PlainTextConnection): Future[seq[byte]] {.async.} =
var buffer: array[32768, byte]
let length = await sconn.stream.readOnce(addr buffer[0], buffer.len)
return @(buffer[0 ..< length])
method write*(sconn: PlainTextConnection, message: seq[byte]): Future[void] =
sconn.stream.write(message)
method handshake*(p: PlainText, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
var exchange = initProtoBuffer()
exchange.write(2, p.localPublicKey)
await conn.writeLp(exchange.buffer)
let
remoteData = await conn.readLp(1024)
remotePb = initProtoBuffer(remoteData)
var remotePk: PublicKey
remotePb.getRequiredField(2, remotePk).tryGet()
let remotePeerId = PeerId.init(remotePk).valueOr:
raise newException(PlainTextError, "Invalid remote peer id: " & $error)
if peerId.isSome:
if peerId.get() != remotePeerId:
raise newException(PlainTextError, "Plain text handshake, peer id don't match! " & $remotePeerId & " != " & $peerId)
var res = PlainTextConnection.new(conn, conn.peerId, conn.observedAddr)
return res
method init*(p: PlainText) {.gcsafe.} =
procCall Secure(p).init()
p.codec = PlainTextCodec
p.handler = handle
proc new*(
T: typedesc[PlainText],
privateKey: PrivateKey
): T =
let pk = privateKey.getPublicKey()
.expect("Expected valid Private Key")
let plainText = T(localPublicKey: pk)
proc new*(T: typedesc[PlainText]): T =
let plainText = T()
plainText.init()
plainText

View File

@@ -0,0 +1,32 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import ../switch
import chronos
import std/tables
type
HPService* = ref object of Service
newPeerHandler: PeerEventHandler
proc askPeer(s: Switch, peerId: PeerId): Future[void] {.async.} =
echo "Asking peer " & $(peerId)
proc h(switch: Switch) =
for p in switch.peerStore[AddressBook].book.keys:
discard askPeer(switch, p)
method setup*(self: HPService, switch: Switch) {.async.} =
self.newPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] =
return askPeer(switch, peerId)
switch.connManager.addPeerEventHandler(self.newPeerHandler, PeerEventKind.Joined)
method run*(self: HPService, switch: Switch) {.async, gcsafe, public.} =
h(switch)
method stop*(self: HPService, switch: Switch) {.async, gcsafe, public.} =
if not isNil(self.newPeerHandler):
switch.connManager.removePeerEventHandler(self.newPeerHandler, PeerEventKind.Joined)

View File

@@ -74,6 +74,15 @@ type
peerStore*: PeerStore
nameResolver*: NameResolver
started: bool
services*: seq[Service]
Service* = ref object of RootObj
method setup*(self: Service, switch: Switch) {.base, async, gcsafe, public.} = discard
method run*(self: Service, switch: Switch) {.base, async, gcsafe, public.} = discard
method stop*(self: Service, switch: Switch) {.base, async, gcsafe, public.} = discard
proc addConnEventHandler*(s: Switch,
handler: ConnEventHandler,
@@ -108,6 +117,9 @@ method addTransport*(s: Switch, t: Transport) =
s.transports &= t
s.dialer.addTransport(t)
method addService*(switch: Switch, service: Service) =
switch.services.add(service)
proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} =
## returns true if the peer has one or more
## associated connections
@@ -130,15 +142,10 @@ method connect*(
method connect*(
s: Switch,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] =
addrs: seq[MultiAddress]): Future[PeerId] =
## Connects to a peer and retrieve its PeerId
##
## If the P2P part is missing from the MA and `allowUnknownPeerId` is set
## to true, this will discover the PeerId while connecting. This exposes
## you to MiTM attacks, so it shouldn't be used without care!
s.dialer.connect(address, allowUnknownPeerId)
s.dialer.connect(addrs)
method dial*(
s: Switch,
@@ -294,6 +301,9 @@ proc stop*(s: Switch) {.async, public.} =
if not a.finished:
a.cancel()
for service in s.services:
await service.stop(s)
await s.ms.stop()
trace "Switch stopped"
@@ -301,10 +311,6 @@ proc stop*(s: Switch) {.async, public.} =
proc start*(s: Switch) {.async, gcsafe, public.} =
## Start listening on every transport
if s.started:
warn "Switch has already been started"
return
trace "starting switch for peer", peerInfo = s.peerInfo
var startFuts: seq[Future[void]]
for t in s.transports:
@@ -335,6 +341,10 @@ proc start*(s: Switch) {.async, gcsafe, public.} =
await s.ms.start()
for service in s.services:
await service.setup(s)
await service.run(s)
s.started = true
debug "Started libp2p node", peer = s.peerInfo
@@ -346,7 +356,8 @@ proc newSwitch*(peerInfo: PeerInfo,
connManager: ConnManager,
ms: MultistreamSelect,
nameResolver: NameResolver = nil,
peerStore = PeerStore.new()): Switch
peerStore = PeerStore.new(),
services = newSeq[Service]()): Switch
{.raises: [Defect, LPError], public.} =
if secureManagers.len == 0:
raise newException(LPError, "Provide at least one secure manager")
@@ -358,8 +369,10 @@ proc newSwitch*(peerInfo: PeerInfo,
connManager: connManager,
peerStore: peerStore,
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
nameResolver: nameResolver)
nameResolver: nameResolver,
services: services)
switch.connManager.peerStore = peerStore
switch.mount(identity)
return switch

View File

@@ -35,18 +35,14 @@ logScope:
export transport, websock, results
const
WsTransportTrackerName* = "libp2p.wstransport"
DefaultHeadersTimeout = 3.seconds
type
WsStream = ref object of Connection
session: WSSession
method initStream*(s: WsStream) =
if s.objName.len == 0:
s.objName = "WsStream"
procCall Connection(s).initStream()
proc new*(T: type WsStream,
session: WSSession,
dir: Direction,
@@ -133,7 +129,7 @@ method start*(
factories = self.factories,
rng = self.rng)
for i, ma in addrs:
let isWss =
if WSS.match(ma):
@@ -260,7 +256,7 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
try:
let
wstransp = await self.wsserver.handleRequest(req).wait(self.handshakeTimeout)
wstransp = await self.wsserver.handleRequest(req)
isSecure = self.httpservers[index].secure
return await self.connHandler(wstransp, isSecure, Direction.In)
@@ -277,8 +273,6 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
debug "AsyncStream Error", exc = exc.msg
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
except AsyncTimeoutError as exc:
debug "Timed out", exc = exc.msg
except TransportUseClosedError as exc:
debug "Server was closed", exc = exc.msg
raise newTransportClosedError(exc)

View File

@@ -58,7 +58,10 @@ type
SomeVarint* = PBSomeVarint | LPSomeVarint
SomeUVarint* = PBSomeUVarint | LPSomeUVarint
template toUleb[T: uint64|uint32|uint16|uint8|uint](x: T): T = x
template toUleb(x: uint64): uint64 = x
template toUleb(x: uint32): uint32 = x
template toUleb(x: uint16): uint16 = x
template toUleb(x: uint8): uint8 = x
func toUleb(x: zint64): uint64 =
let v = cast[uint64](x)

View File

@@ -20,8 +20,8 @@ markdown_extensions:
- pymdownx.superfences
theme:
logo: https://libp2p.io/img/logo_small.png
favicon: https://libp2p.io/img/logo_small.png
logo: https://docs.libp2p.io/images/logo_small.png
favicon: https://docs.libp2p.io/images/logo_small.png
name: material
features:
- navigation.instant
@@ -41,7 +41,7 @@ theme:
nav:
- Tutorials:
- 'Introduction': index.md
- 'Introduction': README.md
- 'Simple connection': tutorial_1_connect.md
- 'Create a custom protocol': tutorial_2_customproto.md
- 'Protobuf': tutorial_3_protobuf.md

View File

@@ -2,68 +2,53 @@
"version": 1,
"packages": {
"unittest2": {
"version": "0.0.5",
"vcsRevision": "da8398c45cafd5bd7772da1fc96e3924a18d3823",
"url": "https://github.com/status-im/nim-unittest2",
"version": "0.0.4",
"vcsRevision": "f180f596c88dfd266f746ed6f8dbebce39c824db",
"url": "https://github.com/status-im/nim-unittest2.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "b3f8493a4948989ef3e645a38b23aad77e851e26"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
"sha1": "fa309c41eaf6ef57895b9e603f2620a2f6e11780"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "7184d2424dc3945657884646a72715d494917aad",
"url": "https://github.com/status-im/nim-stew",
"vcsRevision": "6ad35b876fb6ebe0dfee0f697af173acc47906ee",
"url": "https://github.com/status-im/nim-stew.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"dependencies": [],
"checksums": {
"sha1": "f3125ed2fd126dfd3edbaea14275abd9fa57d703"
"sha1": "46d58c4feb457f3241e3347778334e325dce5268"
}
},
"bearssl": {
"version": "0.2.0",
"vcsRevision": "a647994910904b0103a05db3a5ec1ecfc4d91a88",
"version": "0.1.5",
"vcsRevision": "ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "d634751df2716ea9975912a2d5d0a090bb6bcfa9"
"sha1": "383abd5becc77bf8e365b780a29d20529e1d9c4c"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "a85bd52ae0a956983ca6b3267c72961d2ec0245f",
"url": "https://github.com/status-im/nim-http-utils",
"vcsRevision": "689da19e9e9cfff4ced85e2b25c6b2b5598ed079",
"url": "https://github.com/status-im/nim-http-utils.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
"stew"
],
"checksums": {
"sha1": "92933b21bcd29335f68e377e2b2193fa331e28b3"
"sha1": "4ad3ad68d13c50184180ab4b2eacc0bd7ed2ed44"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "75d030ff71264513fb9701c75a326cd36fcb4692",
"url": "https://github.com/status-im/nim-chronos",
"vcsRevision": "17fed89c99beac5a92d3668d0d3e9b0e4ac13936",
"url": "https://github.com/status-im/nim-chronos.git",
"downloadMethod": "git",
"dependencies": [
"stew",
@@ -72,27 +57,52 @@
"unittest2"
],
"checksums": {
"sha1": "57a674ba3c1a57a694fa7810d93ceb68f338a861"
"sha1": "f6fffc87571e5f76af2a77c4ebcc0e00909ced4e"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "71e0f0e354e1f4c59e3dc92153989c8b723c3440",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "86da251fe532ef2163da30343688ab1c148c0340"
}
},
"testutils": {
"version": "0.4.2",
"vcsRevision": "aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "94427e0cce0e0c5841edcd3a6530b4e6b857a3cb"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "b42daf41d8eb4fbce40add6836bed838f8d85b6f",
"url": "https://github.com/status-im/nim-faststreams",
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
"url": "https://github.com/status-im/nim-faststreams.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"testutils",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "62f7ac8fb200a8ecb9e6c63f5553a7dad66ae613"
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "d77417cba6896c26287a68e6a95762e45a1b87e5",
"url": "https://github.com/status-im/nim-serialization",
"vcsRevision": "fcd0eadadde0ee000a63df8ab21dc4e9f015a790",
"url": "https://github.com/status-im/nim-serialization.git",
"downloadMethod": "git",
"dependencies": [
"faststreams",
@@ -100,72 +110,70 @@
"stew"
],
"checksums": {
"sha1": "e17244c6654de22254acb9bcf71d8ddbeca8b2aa"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "21e99a2e9d9f80e68bef65c80ef781613005fccb",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "ab1c994bbcd6b04f2500f05d8ea4e463f33dd310"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "24e006df85927f64916e60511620583b11403178",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a4db2105de265930f1578bb7957f49fa39b10d9b"
"sha1": "fef59519892cac70cccd81b612085caaa5e3e6cf"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "a7d815ed92f200f490c95d3cfd722089cc923ce6",
"url": "https://github.com/status-im/nim-json-serialization",
"vcsRevision": "c5f0e2465e8375dfc7aa0f56ccef67cb680bc6b0",
"url": "https://github.com/status-im/nim-json-serialization.git",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "50fc34a992ef3df68a7bee88af096bb8ed42572f"
"sha1": "d89d79d0679a3a41b350e3ad4be56c0308cc5ec6"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
"url": "https://github.com/status-im/nim-chronicles",
"version": "0.10.2",
"vcsRevision": "1682096306ddba8185dcfac360a8c3f952d721e4",
"url": "https://github.com/status-im/nim-chronicles.git",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
"sha1": "9a5bebb76b0f7d587a31e621d260119279e91c76"
}
},
"asynctest": {
"version": "0.3.1",
"vcsRevision": "5347c59b4b057443a014722aa40800cd8bb95c69",
"url": "https://github.com/status-im/asynctest.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "53e0b610d13700296755a4ebe789882cae47a3b9"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "6a6670afba6b97b29b920340e2641978c05ab4d8",
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "2621e46369be2a6846713e8c3d681a5bba3e0325"
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "691f069b209d372b1240d5ae1f57fb7bbafeaba7",
"vcsRevision": "73edde4417f7b45003113b7a34212c3ccd95b9fd",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
@@ -173,35 +181,36 @@
"httputils",
"chronicles",
"stew",
"asynctest",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "c71edfce064e7c0cadde0e687c6edc0caaf9ec07"
"sha1": "ec2b137543f280298ca48de9ed4461a033ba88d3"
}
},
"dnsclient": {
"version": "0.3.2",
"vcsRevision": "fcd7443634b950eaea574e5eaa00a628ae029823",
"url": "https://github.com/ba0f3/dnsclient.nim",
"version": "0.1.2",
"vcsRevision": "fbb76f8af8a33ab818184a7d4406d9fee20993be",
"url": "https://github.com/ba0f3/dnsclient.nim.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "146aa4a8d512a3a786c5bf54311b79900166d9d7"
"sha1": "663239a914c814204b30dda6e0902cc0fbd0b8ee"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "fd173fdff863ce2e211cf64c9a03bc7539fe40b0",
"url": "https://github.com/status-im/nim-secp256k1",
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
"url": "https://github.com/status-im/nim-secp256k1.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "657c79f6f2b1b6da92a9cda81ffc9f95d26443cb"
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
}
}
}

View File

@@ -362,35 +362,3 @@ suite "FloodSub":
)
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
inc(messageReceived)
let
bigNode1 = generateNodes(1, maxMessageSize = 20000000)
bigNode2 = generateNodes(1, maxMessageSize = 20000000)
# start switches
nodesFut = await allFinished(
bigNode1[0].switch.start(),
bigNode2[0].switch.start(),
)
await subscribeNodes(bigNode1 & bigNode2)
bigNode2[0].subscribe("foo", handler)
await waitSub(bigNode1[0], bigNode2[0], "foo")
let bigMessage = newSeq[byte](19000000)
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkExpiring: messageReceived == 1
await allFuturesThrowing(
bigNode1[0].switch.stop(),
bigNode2[0].switch.stop()
)
await allFuturesThrowing(nodesFut)

View File

@@ -152,8 +152,8 @@ suite "Identify":
identifyPush2 {.threadvar.}: IdentifyPush
conn {.threadvar.}: Connection
asyncSetup:
switch1 = newStandardSwitch(sendSignedPeerRecord=true)
switch2 = newStandardSwitch(sendSignedPeerRecord=true)
switch1 = newStandardSwitch()
switch2 = newStandardSwitch()
proc updateStore1(peerId: PeerId, info: IdentifyInfo) {.async.} =
switch1.peerStore.updatePeerInfo(info)
@@ -177,21 +177,13 @@ suite "Identify":
check:
switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs
switch1.peerStore[KeyBook][switch2.peerInfo.peerId] == switch2.peerInfo.publicKey
switch2.peerStore[KeyBook][switch1.peerInfo.peerId] == switch1.peerInfo.publicKey
switch1.peerStore[AgentBook][switch2.peerInfo.peerId] == switch2.peerInfo.agentVersion
switch2.peerStore[AgentBook][switch1.peerInfo.peerId] == switch1.peerInfo.agentVersion
switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs
switch1.peerStore[ProtoVersionBook][switch2.peerInfo.peerId] == switch2.peerInfo.protoVersion
switch2.peerStore[ProtoVersionBook][switch1.peerInfo.peerId] == switch1.peerInfo.protoVersion
switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
switch2.peerStore[ProtoBook][switch1.peerInfo.peerId] == switch1.peerInfo.protocols
switch1.peerStore[SPRBook][switch2.peerInfo.peerId] == switch2.peerInfo.signedPeerRecord.envelope
switch2.peerStore[SPRBook][switch1.peerInfo.peerId] == switch1.peerInfo.signedPeerRecord.envelope
#switch1.peerStore.signedPeerRecordBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.signedPeerRecord.get()
#switch2.peerStore.signedPeerRecordBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.signedPeerRecord.get()
# no longer sent by default
proc closeAll() {.async.} =
await conn.close()

View File

@@ -278,79 +278,6 @@ suite "Multistream select":
await handlerWait.wait(30.seconds)
asyncTest "e2e - streams limit":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let blocker = newFuture[void]()
# Start 5 streams which are blocked by `blocker`
# Try to start a new one, which should fail
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
var protocol: LPProtocol = LPProtocol.new(
@["/test/proto/1.0.0"],
testHandler,
maxIncomingStreams = 5
)
protocol.handler = testHandler
let msListen = MultistreamSelect.new()
msListen.addHandler("/test/proto/1.0.0", protocol)
let transport1 = TcpTransport.new(upgrade = Upgrade())
await transport1.start(ma)
proc acceptedOne(c: Connection) {.async.} =
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
var handlerWait = acceptHandler()
let msDial = MultistreamSelect.new()
let transport2 = TcpTransport.new(upgrade = Upgrade())
proc connector {.async.} =
let conn = await transport2.dial(transport1.addrs[0])
check: (await msDial.select(conn, "/test/proto/1.0.0")) == true
check: string.fromBytes(await conn.readLp(1024)) == "Hello!"
await conn.close()
# Fill up the 5 allowed streams
var dialers: seq[Future[void]]
for _ in 0..<5:
dialers.add(connector())
# This one will fail during negotiation
expect(CatchableError):
try: waitFor(connector().wait(1.seconds))
except AsyncTimeoutError as exc:
check false
raise exc
# check that the dialers aren't finished
check: (await dialers[0].withTimeout(10.milliseconds)) == false
# unblock the dialers
blocker.complete()
await allFutures(dialers)
# now must work
waitFor(connector())
await transport2.stop()
await transport1.stop()
await handlerWait.cancelAndWait()
asyncTest "e2e - ls":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]

View File

@@ -32,7 +32,6 @@ import testtcptransport,
testconnmngr,
testswitch,
testnoise,
testplaintext,
testpeerinfo,
testpeerstore,
testping,

View File

@@ -12,7 +12,7 @@
import unittest2
import nimcrypto/utils, stew/base58
import ../libp2p/crypto/crypto, ../libp2p/peerid
import bearssl/hash
when defined(nimHasUsed): {.used.}
const
@@ -236,15 +236,3 @@ suite "Peer testing suite":
ekey2 == pubkey
ekey3 == pubkey
ekey4 == pubkey
test "Test PeerId.random() proc":
# generate a random peer with a deterministic ssed
var rng = (ref HmacDrbgContext)()
hmacDrbgInit(rng[], addr sha256Vtable, nil, 0)
var randomPeer1 = PeerId.random(rng)
check:
$randomPeer1.get() == "16Uiu2HAmCxpSTFDNdWiu1MLScu7inPhcbbGfPvuvRPD1e51gw1Xr"
# generate a random peer with a new random seed
var randomPeer2 = PeerId.random()
check:
randomPeer2.isErr() != true

View File

@@ -1,111 +0,0 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import tables
import chronos, stew/byteutils
import chronicles
import ../libp2p/[switch,
errors,
multistream,
stream/bufferstream,
protocols/identify,
stream/connection,
transports/transport,
transports/tcptransport,
multiaddress,
peerinfo,
crypto/crypto,
protocols/protocol,
muxers/muxer,
muxers/mplex/mplex,
protocols/secure/plaintext,
protocols/secure/secure,
upgrademngrs/muxedupgrade,
connmanager]
import ./helpers
suite "Plain text":
teardown:
checkTrackers()
asyncTest "e2e: handle write & read":
let
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverPlainText = PlainText.new(serverPrivKey)
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
await transport1.start(server)
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverPlainText.secure(conn, false, Opt.none(PeerId))
try:
await sconn.writeLp("Hello 1!")
await sconn.writeLp("Hello 2!")
finally:
await sconn.close()
await conn.close()
let
acceptFut = acceptHandler()
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientPlainText = PlainText.new(clientPrivKey)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientPlainText.secure(conn, true, Opt.some(serverInfo.peerId))
discard await sconn.readLp(100)
var msg = await sconn.readLp(100)
await sconn.close()
await conn.close()
await acceptFut
await transport1.stop()
await transport2.stop()
check string.fromBytes(msg) == "Hello 2!"
asyncTest "e2e: wrong peerid":
let
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverPlainText = PlainText.new(serverPrivKey)
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
await transport1.start(server)
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
try:
discard await serverPlainText.secure(conn, false, Opt.none(PeerId))
finally:
await conn.close()
let
acceptFut = acceptHandler()
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientPlainText = PlainText.new(clientPrivKey)
conn = await transport2.dial(transport1.addrs[0])
expect(CatchableError):
discard await clientPlainText.secure(conn, true, Opt.some(clientInfo.peerId))
await conn.close()
await acceptFut
await transport1.stop()
await transport2.stop()

View File

@@ -10,7 +10,6 @@ import ../libp2p/[errors,
builders,
stream/bufferstream,
stream/connection,
multicodec,
multiaddress,
peerinfo,
crypto/crypto,
@@ -27,6 +26,8 @@ import ../libp2p/[errors,
transports/wstransport]
import ./helpers
import ../libp2p/services/hpservice
const
TestCodec = "/test/proto/1.0.0"
@@ -77,46 +78,6 @@ suite "Switch":
check not switch1.isConnected(switch2.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e plaintext encryption":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.writeLp("Hello!")
finally:
await conn.close()
done.complete()
let testProto = new TestProto
testProto.codec = TestCodec
testProto.handler = handle
let switch1 = newStandardSwitch(secureManagers=[PlainText])
switch1.mount(testProto)
let switch2 = newStandardSwitch(secureManagers=[PlainText])
await switch1.start()
await switch2.start()
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
check switch1.isConnected(switch2.peerInfo.peerId)
check switch2.isConnected(switch1.peerInfo.peerId)
await conn.writeLp("Hello!")
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.close()
await allFuturesThrowing(
done.wait(5.seconds),
switch1.stop(),
switch2.stop())
check not switch1.isConnected(switch2.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e use switch dial proto string with custom matcher":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
@@ -254,40 +215,12 @@ suite "Switch":
"dnsaddr=" & $switch1.peerInfo.addrs[0] & "/p2p/" & $switch1.peerInfo.peerId,
]
check: (await switch2.connect(MultiAddress.init("/dnsaddr/test.io/").tryGet(), true)) == switch1.peerInfo.peerId
check: (await switch2.connect(@[MultiAddress.init("/dnsaddr/test.io/").tryGet()])) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
# via direct ip
check not switch2.isConnected(switch1.peerInfo.peerId)
check: (await switch2.connect(switch1.peerInfo.addrs[0], true)) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
await allFuturesThrowing(
switch1.stop(),
switch2.stop()
)
asyncTest "e2e connect to peer with known PeerId":
let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
await switch1.start()
await switch2.start()
# via direct ip
check not switch2.isConnected(switch1.peerInfo.peerId)
# without specifying allow unknown, will fail
expect(DialFailedError):
discard await switch2.connect(switch1.peerInfo.addrs[0])
# with invalid PeerId, will fail
let fakeMa = concat(switch1.peerInfo.addrs[0], MultiAddress.init(multiCodec("p2p"), PeerId.random.tryGet().data).tryGet()).tryGet()
expect(CatchableError):
discard (await switch2.connect(fakeMa))
# real thing works
check (await switch2.connect(switch1.peerInfo.fullAddrs.tryGet()[0])) == switch1.peerInfo.peerId
check: (await switch2.connect(switch1.peerInfo.addrs)) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
@@ -358,6 +291,8 @@ suite "Switch":
switch2.addConnEventHandler(hook, ConnEventKind.Connected)
switch2.addConnEventHandler(hook, ConnEventKind.Disconnected)
switch1.addService(HPService.new())
await switch1.start()
await switch2.start()
@@ -1085,13 +1020,3 @@ suite "Switch":
expect LPError:
await switch.start()
# test is that this doesn't leak
asyncTest "starting two times does not crash":
let switch = newStandardSwitch(
addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
)
await switch.start()
await switch.start()
await allFuturesThrowing(switch.stop())

View File

@@ -13,15 +13,8 @@ createDir("nimbledeps")
discard execCmd("nimble install -dy")
var allDeps: Table[string, string]
let nimblePkgs =
if dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for (_, dependency) in walkDir(nimblePkgs):
let
jsonContent = parseJson(readFile(dependency & "/nimblemeta.json"))
fileContent =
if "metaData" in jsonContent: jsonContent["metaData"]
else: jsonContent
for (_, dependency) in walkDir("nimbledeps/pkgs"):
let fileContent = parseJson(readFile(dependency & "/nimblemeta.json"))
let url = fileContent.getOrDefault("url").getStr("")
var version = fileContent.getOrDefault("vcsRevision").getStr("")
var packageName = dependency.split('/')[^1].split('-')[0]