chore(formatting): format the whole codebase using nph 0.5.1 (#1118)

This commit is contained in:
diegomrsantos
2024-06-11 17:18:06 +02:00
committed by GitHub
parent d0af3fbe85
commit dc83a1e9b6
198 changed files with 12682 additions and 11013 deletions

View File

@@ -90,3 +90,25 @@ jobs:
nim --version
nimble --version
nimble test
lint:
name: "Lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
- name: Check nph formatting
# Pin nph to a specific version to avoid sudden style differences.
# Updating nph version should be accompanied with running the new
# version on the fluffy directory.
run: |
VERSION="v0.5.1"
ARCHIVE="nph-linux_x64.tar.gz"
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
tar -xzf ${ARCHIVE}
shopt -s extglob # Enable extended globbing
./nph examples libp2p tests tools *.@(nim|nims|nimble)
git diff --exit-code

View File

@@ -135,7 +135,7 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph).
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors

View File

@@ -7,14 +7,17 @@ if dirExists("nimbledeps/pkgs2"):
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--styleCheck:usages
--styleCheck:
usages
switch("warningAsError", "UseBase:on")
--styleCheck:error
--styleCheck:
error
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
if defined(windows) and not defined(vcc):
--define:nimRawSetjmp
--define:
nimRawSetjmp
# begin Nimble config (version 1)
when fileExists("nimble.paths"):

View File

@@ -7,19 +7,19 @@
##
## More informations [here](https://docs.libp2p.io/concepts/circuit-relay/).
import chronos, stew/byteutils
import libp2p,
libp2p/protocols/connectivity/relay/[relay, client]
import libp2p, libp2p/protocols/connectivity/relay/[relay, client]
# Helper to create a circuit relay node
proc createCircuitRelaySwitch(r: Relay): Switch =
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(r)
.build()
SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(r)
.build()
proc main() {.async.} =
# Create a custom protocol
@@ -56,8 +56,11 @@ proc main() {.async.} =
let
# Create a relay address to swDst using swRel as the relay
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
$swRel.peerInfo.peerId & "/p2p-circuit").get()
addrs = MultiAddress
.init(
$swRel.peerInfo.addrs[0] & "/p2p/" & $swRel.peerInfo.peerId & "/p2p-circuit"
)
.get()
# Connect Dst to the relay
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
@@ -66,7 +69,7 @@ proc main() {.async.} =
let rsvp = await clDst.reserve(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
# Src dial Dst using the relay
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[ addrs ], customProtoCodec)
let conn = await swSrc.dial(swDst.peerInfo.peerId, @[addrs], customProtoCodec)
await conn.writeLp("test1")
var msg = string.fromBytes(await conn.readLp(1024))

View File

@@ -1,15 +1,12 @@
when not(compileOption("threads")):
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
import
strformat, strutils,
stew/byteutils,
chronos,
libp2p
import strformat, strutils, stew/byteutils, chronos, libp2p
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
const Help = """
const Help =
"""
Commands: /[?|help|connect|disconnect|exit]
help: Prints this help
connect: dials a remote peer
@@ -17,12 +14,11 @@ const Help = """
exit: closes the chat
"""
type
Chat = ref object
switch: Switch # a single entry point for dialing and listening to peer
stdinReader: StreamTransport # transport streams between read & write file descriptor
conn: Connection # connection to the other peer
connected: bool # if the node is connected to another peer
type Chat = ref object
switch: Switch # a single entry point for dialing and listening to peer
stdinReader: StreamTransport # transport streams between read & write file descriptor
conn: Connection # connection to the other peer
connected: bool # if the node is connected to another peer
##
# Stdout helpers, to write the prompt
@@ -41,8 +37,7 @@ proc writeStdout(c: Chat, str: string) =
##
const ChatCodec = "/nim-libp2p/chat/1.0.0"
type
ChatProto = ref object of LPProtocol
type ChatProto = ref object of LPProtocol
proc new(T: typedesc[ChatProto], c: Chat): T =
let chatproto = T()
@@ -77,9 +72,9 @@ proc handlePeer(c: Chat, conn: Connection) {.async.} =
strData = await conn.readLp(1024)
str = string.fromBytes(strData)
c.writeStdout $conn.peerId & ": " & $str
except LPStreamEOFError:
defer: c.writeStdout $conn.peerId & " disconnected"
defer:
c.writeStdout $conn.peerId & " disconnected"
await c.conn.close()
c.connected = false
@@ -88,10 +83,7 @@ proc dialPeer(c: Chat, address: string) {.async.} =
let
multiAddr = MultiAddress.init(address).tryGet()
# split the peerId part /p2p/...
peerIdBytes = multiAddr[multiCodec("p2p")]
.tryGet()
.protoAddress()
.tryGet()
peerIdBytes = multiAddr[multiCodec("p2p")].tryGet().protoAddress().tryGet()
remotePeer = PeerId.init(peerIdBytes).tryGet()
# split the wire address
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
@@ -124,7 +116,6 @@ proc readLoop(c: Chat) {.async.} =
let address = await c.stdinReader.readLine()
if address.len > 0:
await c.dialPeer(address)
elif line.startsWith("/exit"):
if c.connected and c.conn.closed.not:
await c.conn.close()
@@ -171,16 +162,18 @@ proc main() {.async.} =
var switch = SwitchBuilder
.new()
.withRng(rng) # Give the application RNG
.withRng(rng)
# Give the application RNG
.withAddress(localAddress)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.build()
let chat = Chat(
switch: switch,
stdinReader: stdinReader)
let chat = Chat(switch: switch, stdinReader: stdinReader)
switch.mount(ChatProto.new(chat))

View File

@@ -2,8 +2,7 @@ import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
import ../hexdump
const
PubSubTopic = "test-net"
const PubSubTopic = "test-net"
proc dumpSubscribedPeers(api: DaemonAPI) {.async.} =
var peers = await api.pubsubListPeers(PubSubTopic)
@@ -37,12 +36,12 @@ proc main() {.async.} =
asyncSpawn monitor(api)
proc pubsubLogger(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.async.} =
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ", msglen,
" bytes from peer ", message.peer.pretty()
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty()
echo dumpHex(message.data)
await api.dumpSubscribedPeers()
result = true

View File

@@ -2,18 +2,16 @@ import chronos, nimcrypto, strutils
import ../../libp2p/daemon/daemonapi
## nim c -r --threads:on chat.nim
when not(compileOption("threads")):
when not (compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
const
ServerProtocols = @["/test-chat-stream"]
const ServerProtocols = @["/test-chat-stream"]
type
CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
type CustomData = ref object
api: DaemonAPI
remotes: seq[StreamTransport]
consoleFd: AsyncFD
serveFut: Future[void]
proc threadMain(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
@@ -82,7 +80,7 @@ proc serveThread(udata: CustomData) {.async.} =
relay = true
break
if relay:
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
echo peer.pretty(), " * ", " [", addresses.join(", "), "]"
else:
echo peer.pretty(), " [", addresses.join(", "), "]"
elif line.startsWith("/exit"):

View File

@@ -1,26 +1,25 @@
import chronos, nimcrypto, strutils, os
import ../../libp2p/daemon/daemonapi
const
PubSubTopic = "test-net"
const PubSubTopic = "test-net"
proc main(bn: string) {.async.} =
echo "= Starting P2P node"
var bootnodes = bn.split(",")
var api = await newDaemonApi({DHTFull, PSGossipSub, WaitBootstrap},
bootstrapNodes = bootnodes,
peersRequired = 1)
var api = await newDaemonApi(
{DHTFull, PSGossipSub, WaitBootstrap}, bootstrapNodes = bootnodes, peersRequired = 1
)
var id = await api.identity()
echo "= P2P node ", id.peer.pretty(), " started:"
for item in id.addresses:
echo item
proc pubsubLogger(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.async.} =
proc pubsubLogger(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async.} =
let msglen = len(message.data)
echo "= Recieved pubsub message with length ", msglen,
" bytes from peer ", message.peer.pretty(), ": "
echo "= Recieved pubsub message with length ",
msglen, " bytes from peer ", message.peer.pretty(), ": "
var strdata = cast[string](message.data)
echo strdata
result = true

View File

@@ -1,5 +1,5 @@
import chronos # an efficient library for async
import stew/byteutils # various utils
import chronos # an efficient library for async
import stew/byteutils # various utils
import libp2p
##
@@ -7,11 +7,9 @@ import libp2p
##
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
type
TestProto = ref object of LPProtocol # declare a custom protocol
type TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@@ -28,11 +26,16 @@ proc new(T: typedesc[TestProto]): T =
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.withRng(rng)
# Give the application RNG
.withAddress(ma)
# Our local address(es)
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.build()
result = switch
@@ -73,7 +76,8 @@ proc main() {.async.} =
# use the second node to dial the first node
# using the first node peerid and address
# and specify our custom protocol codec
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
# conn is now a fully setup connection, we talk directly to the node1 custom protocol handler
await conn.writeLp("Hello p2p!") # writeLp send a length prefixed buffer over the wire
@@ -84,6 +88,7 @@ proc main() {.async.} =
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
waitFor(main())

View File

@@ -35,26 +35,24 @@ proc dumpHex*(pbytes: pointer, nbytes: int, items = 1, ascii = true): string =
var asciiText = ""
while i < nbytes:
if i %% 16 == 0:
result = result & toHex(cast[BiggestInt](slider),
sizeof(BiggestInt) * 2) & ": "
result = result & toHex(cast[BiggestInt](slider), sizeof(BiggestInt) * 2) & ": "
var k = 0
while k < items:
var ch = cast[ptr char](cast[uint](slider) + k.uint)[]
if ord(ch) > 31 and ord(ch) < 127: asciiText &= ch else: asciiText &= "."
if ord(ch) > 31 and ord(ch) < 127:
asciiText &= ch
else:
asciiText &= "."
inc(k)
case items:
case items
of 1:
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]),
hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint8](slider)[]), hexSize)
of 2:
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]),
hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint16](slider)[]), hexSize)
of 4:
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]),
hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint32](slider)[]), hexSize)
of 8:
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]),
hexSize)
result = result & toHex(cast[BiggestInt](cast[ptr uint64](slider)[]), hexSize)
else:
raise newException(ValueError, "Wrong items size!")
result = result & " "

View File

@@ -38,11 +38,16 @@ import libp2p/protocols/ping
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.withRng(rng)
# Give the application RNG
.withAddress(ma)
# Our local address(es)
.withTcpTransport()
# Use TCP as transport
.withMplex()
# Use Mplex as muxer
.withNoise()
# Use Noise as secure manager
.build()
return switch
@@ -57,7 +62,7 @@ proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
pingProtocol = Ping.new(rng=rng)
pingProtocol = Ping.new(rng = rng)
## We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
## The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
##
@@ -78,7 +83,8 @@ proc main() {.async.} =
## We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
##
## We'll **dial** the first switch from the second one, by specifying its **Peer ID**, its **MultiAddress** and the **`Ping` protocol codec**:
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
## We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
# ping the other node and echo the ping duration
echo "ping: ", await pingProtocol.ping(conn)
@@ -86,7 +92,8 @@ proc main() {.async.} =
# We must close the connection ourselves when we're done with it
await conn.close()
## And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
waitFor(main())

View File

@@ -48,22 +48,24 @@ proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1 = newStandardSwitch(rng = rng)
switch2 = newStandardSwitch(rng = rng)
switch1.mount(testProto)
await switch1.start()
await switch2.start()
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
let conn =
await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
await testProto.hello(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
## This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
##

View File

@@ -57,8 +57,8 @@ proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
#
# We are just checking the error, and ignoring whether the value
# is present or not (default values are valid).
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
discard ?pb.getField(1, res.name)
discard ?pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
@@ -72,10 +72,10 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ? pb.getRepeatedField(1, metrics)
discard ?pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ? Metric.decode(metric)
res.metrics &= ?Metric.decode(metric)
ok(res)
## ## Results instead of exceptions
@@ -102,7 +102,7 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
## ## Creating the protocol
## We'll next create a protocol, like in the last tutorial, to request these metrics from our host
type
MetricCallback = proc: Future[MetricList] {.raises: [], gcsafe.}
MetricCallback = proc(): Future[MetricList] {.raises: [], gcsafe.}
MetricProto = ref object of LPProtocol
metricGetter: MetricCallback
@@ -128,19 +128,19 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
## We can now create our main procedure:
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
proc randomMetricGenerator(): Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16
for i in 0 ..< metricCount + 1:
result.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
result.metrics.add(
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
)
return result
let
metricProto1 = MetricProto.new(randomMetricGenerator)
metricProto2 = MetricProto.new(randomMetricGenerator)
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1 = newStandardSwitch(rng = rng)
switch2 = newStandardSwitch(rng = rng)
switch1.mount(metricProto1)
@@ -148,14 +148,17 @@ proc main() {.async.} =
await switch2.start()
let
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs)
conn = await switch2.dial(
switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs
)
metrics = await metricProto2.fetch(conn)
await conn.close()
for metric in metrics.metrics:
echo metric.name, " = ", metric.value
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
await allFutures(switch1.stop(), switch2.stop())
# close connections and shutdown all transports
waitFor(main())

View File

@@ -40,8 +40,8 @@ proc encode(m: Metric): ProtoBuffer =
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
var res: Metric
let pb = initProtoBuffer(buf)
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
discard ?pb.getField(1, res.name)
discard ?pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
@@ -56,11 +56,11 @@ proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError]
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ? pb.getRepeatedField(1, metrics)
discard ?pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ? Metric.decode(metric)
? pb.getRequiredField(2, res.hostname)
res.metrics &= ?Metric.decode(metric)
?pb.getRequiredField(2, res.hostname)
ok(res)
## This is exactly like the previous structure, except that we added
@@ -73,11 +73,14 @@ type Node = tuple[switch: Switch, gossip: GossipSub, hostname: string]
proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
# This procedure will handle one of the node of the network
node.gossip.addValidator(["metrics"],
node.gossip.addValidator(
["metrics"],
proc(topic: string, message: Message): Future[ValidationResult] {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr: return ValidationResult.Reject
if decoded.isErr:
return ValidationResult.Reject
return ValidationResult.Accept
,
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
@@ -87,23 +90,24 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
# `John` will be responsible to log the metrics, the rest of the nodes
# will just forward them in the network
if node.hostname == "John":
node.gossip.subscribe("metrics",
proc (topic: string, data: seq[byte]) {.async.} =
node.gossip.subscribe(
"metrics",
proc(topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
,
)
else:
node.gossip.subscribe("metrics", nil)
# Create random metrics 10 times and broadcast them
for _ in 0..<10:
for _ in 0 ..< 10:
await sleepAsync(500.milliseconds)
var metricList = MetricList(hostname: node.hostname)
let metricCount = rng[].generate(uint32) mod 4
for i in 0 ..< metricCount + 1:
metricList.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
metricList.metrics.add(
Metric(name: "metric_" & $i, value: float(rng[].generate(uint16)) / 1000.0)
)
discard await node.gossip.publish("metrics", encode(metricList).buffer)
await node.switch.stop()
@@ -111,13 +115,13 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
## For our main procedure, we'll create a few nodes, and connect them together.
## Note that they are not all interconnected, but GossipSub will take care of
## broadcasting to the full network nonetheless.
proc main {.async.} =
proc main() {.async.} =
let rng = newRng()
var nodes: seq[Node]
for hostname in ["John", "Walter", "David", "Thuy", "Amy"]:
let
switch = newStandardSwitch(rng=rng)
switch = newStandardSwitch(rng = rng)
gossip = GossipSub.init(switch = switch, triggerSelf = true)
switch.mount(gossip)
await switch.start()
@@ -127,11 +131,12 @@ proc main {.async.} =
for index, node in nodes:
# Connect to a few neighbors
for otherNodeIdx in index - 1 .. index + 2:
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index: continue
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index:
continue
let otherNode = nodes[otherNodeIdx]
await node.switch.connect(
otherNode.switch.peerInfo.peerId,
otherNode.switch.peerInfo.addrs)
otherNode.switch.peerInfo.peerId, otherNode.switch.peerInfo.addrs
)
var allFuts: seq[Future[void]]
for node in nodes:

View File

@@ -20,14 +20,15 @@ import libp2p/discovery/discoverymngr
##
## Note that other discovery methods such as [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) or [discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) exist.
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
# Create a really simple protocol to log one message received then close the stream
const DumbCodec = "/dumb/proto/1.0.0"
@@ -36,6 +37,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
## ## Bootnodes
@@ -58,7 +60,7 @@ proc main() {.async.} =
switches: seq[Switch] = @[]
discManagers: seq[DiscoveryManager] = @[]
for i in 0..5:
for i in 0 .. 5:
let rdv = RendezVous.new()
# Create a remote future to await at the end of the program
let switch = createSwitch(rdv)
@@ -93,7 +95,7 @@ proc main() {.async.} =
# Use the discovery manager to find peers on the OddClub topic to greet them
let queryOddClub = dm.request(RdvNamespace("OddClub"))
for _ in 0..2:
for _ in 0 .. 2:
let
# getPeer give you a PeerAttribute containing informations about the peer.
res = await queryOddClub.getPeer()
@@ -109,7 +111,7 @@ proc main() {.async.} =
# Maybe it was because he wanted to join the EvenGang
let queryEvenGang = dm.request(RdvNamespace("EvenGang"))
for _ in 0..2:
for _ in 0 .. 2:
let
res = await queryEvenGang.getPeer()
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)

View File

@@ -51,7 +51,7 @@ proc new(_: type[Game]): Game =
tickTime: -3.0, # 3 seconds of "warm-up" time
localPlayer: Player(x: 4, y: 16, currentDir: 3, nextDir: 3, color: 8),
remotePlayer: Player(x: 27, y: 16, currentDir: 1, nextDir: 1, color: 12),
peerFound: newFuture[Connection]()
peerFound: newFuture[Connection](),
)
for pos in 0 .. result.gameMap.high:
if pos mod mapSize in [0, mapSize - 1] or pos div mapSize in [0, mapSize - 1]:
@@ -81,7 +81,8 @@ proc update(g: Game, dt: float32) =
# This is a hacky way to make this happen
waitFor(sleepAsync(1.milliseconds))
# Don't do anything if we are still waiting for an opponent
if not(g.peerFound.finished()) or isNil(g.tickFinished): return
if not (g.peerFound.finished()) or isNil(g.tickFinished):
return
g.tickTime += dt
# Update the wanted direction, making sure we can't go backward
@@ -98,7 +99,8 @@ proc tick(g: Game, p: Player) =
# Move player and check if he lost
p.x += directions[p.currentDir][1]
p.y += directions[p.currentDir][2]
if g.gameMap[p.y * mapSize + p.x] != 0: p.lost = true
if g.gameMap[p.y * mapSize + p.x] != 0:
p.lost = true
g.gameMap[p.y * mapSize + p.x] = p.color
proc mainLoop(g: Game, peer: Connection) {.async.} =
@@ -123,16 +125,23 @@ proc draw(g: Game) =
for pos, color in g.gameMap:
setColor(color)
boxFill(pos mod 32 * 4, pos div 32 * 4, 4, 4)
let text = if not(g.peerFound.finished()): "Matchmaking.."
elif g.tickTime < -1.5: "Welcome to Etron"
elif g.tickTime < 0.0: "- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
elif g.remotePlayer.lost and g.localPlayer.lost: "DEUCE"
elif g.localPlayer.lost: "YOU LOOSE"
elif g.remotePlayer.lost: "YOU WON"
else: ""
let text =
if not (g.peerFound.finished()):
"Matchmaking.."
elif g.tickTime < -1.5:
"Welcome to Etron"
elif g.tickTime < 0.0:
"- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
elif g.remotePlayer.lost and g.localPlayer.lost:
"DEUCE"
elif g.localPlayer.lost:
"YOU LOOSE"
elif g.remotePlayer.lost:
"YOU WON"
else:
""
printc(text, screenWidth div 2, screenHeight div 2)
## ## Matchmaking
## To find an opponent, we will broadcast our address on a
## GossipSub topic, and wait for someone to connect to us.
@@ -144,7 +153,8 @@ proc draw(g: Game) =
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
defer:
await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
@@ -157,6 +167,7 @@ proc new(T: typedesc[GameProto], g: Game): T =
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
proc networking(g: Game) {.async.} =
@@ -164,9 +175,10 @@ proc networking(g: Game) {.async.} =
# the Discovery examples combined
let
rdv = RendezVous.new()
switch = SwitchBuilder.new()
switch = SwitchBuilder
.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withYamux()
.withNoise()
@@ -174,9 +186,7 @@ proc networking(g: Game) {.async.} =
.build()
dm = DiscoveryManager()
gameProto = GameProto.new(g)
gossip = GossipSub.init(
switch = switch,
triggerSelf = false)
gossip = GossipSub.init(switch = switch, triggerSelf = false)
dm.add(RendezVousInterface.new(rdv))
switch.mount(gossip)
@@ -184,10 +194,11 @@ proc networking(g: Game) {.async.} =
gossip.subscribe(
"/tron/matchmaking",
proc (topic: string, data: seq[byte]) {.async.} =
proc(topic: string, data: seq[byte]) {.async.} =
# If we are still looking for an opponent,
# try to match anyone broadcasting its address
if g.peerFound.finished or g.hasCandidate: return
if g.peerFound.finished or g.hasCandidate:
return
g.hasCandidate = true
try:
@@ -204,10 +215,12 @@ proc networking(g: Game) {.async.} =
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard
,
)
await switch.start()
defer: await switch.stop()
defer:
await switch.stop()
# As explained in the last tutorial, we need a bootnode to be able
# to find peers. We could use any libp2p running rendezvous (or any
@@ -243,7 +256,8 @@ proc networking(g: Game) {.async.} =
# We now wait for someone to connect to us (or for us to connect to someone)
let peerConn = await g.peerFound
defer: await peerConn.closeWithEof()
defer:
await peerConn.closeWithEof()
await g.mainLoop(peerConn)
@@ -252,7 +266,17 @@ let
netFut = networking(game)
nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(proc = discard, proc(dt: float32) = game.update(dt), proc = game.draw())
nico.run(
proc() =
discard
,
proc(dt: float32) =
game.update(dt)
,
proc() =
game.draw()
,
)
waitFor(netFut.cancelAndWait())
## And that's it! If you want to run this code locally, the simplest way is to use the

View File

@@ -21,7 +21,8 @@ when defined(nimdoc):
## that can help you get started.
# Import stuff for doc
import libp2p/[
import
libp2p/[
protobuf/minprotobuf,
switch,
stream/lpstream,
@@ -33,37 +34,39 @@ when defined(nimdoc):
peerid,
peerinfo,
peerstore,
multiaddress]
multiaddress,
]
proc dummyPrivateProc*() =
## A private proc example
discard
else:
import
libp2p/[protobuf/minprotobuf,
muxers/muxer,
muxers/mplex/mplex,
stream/lpstream,
stream/bufferstream,
stream/connection,
transports/transport,
transports/tcptransport,
protocols/secure/noise,
cid,
multihash,
multicodec,
errors,
switch,
peerid,
peerinfo,
multiaddress,
builders,
crypto/crypto,
protocols/pubsub]
libp2p/[
protobuf/minprotobuf,
muxers/muxer,
muxers/mplex/mplex,
stream/lpstream,
stream/bufferstream,
stream/connection,
transports/transport,
transports/tcptransport,
protocols/secure/noise,
cid,
multihash,
multicodec,
errors,
switch,
peerid,
peerinfo,
multiaddress,
builders,
crypto/crypto,
protocols/pubsub,
]
export
minprotobuf, switch, peerid, peerinfo,
connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport,
tcptransport, noise, errors, cid, multihash,
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
multicodec, builders, pubsub

View File

@@ -1,23 +1,16 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.3.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
packageName = "libp2p"
version = "1.3.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.4.1",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
"chronicles >= 0.10.2",
"chronos >= 4.0.0",
"metrics",
"secp256k1",
"stew#head",
"websock",
"unittest2"
"nimcrypto >= 0.4.1", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.1.4",
"chronicles >= 0.10.2", "chronos >= 4.0.0", "metrics", "secp256k1", "stew#head",
"websock", "unittest2"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -27,13 +20,13 @@ let verbose = getEnv("V", "") notin ["", "0"]
let cfg =
" --styleCheck:usages --styleCheck:error" &
(if verbose: "" else: " --verbosity:0 --hints:off") &
" --skipParentCfg --skipUserCfg -f" &
" --threads:on --opt:speed"
" --skipParentCfg --skipUserCfg -f" & " --threads:on --opt:speed"
import hashes, strutils
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
proc runTest(
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
) =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
@@ -52,7 +45,8 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
rmFile "examples/" & filename.toExe
proc tutorialToMd(filename: string) =
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang &
" -r --verbosity:0 --hints:off tools/markdown_builder.nim "
writeFile(filename.replace(".nim", ".md"), markdown)
task testnative, "Runs libp2p native tests":
@@ -65,24 +59,37 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest("pubsub/testpubsub", sign = false, verify = false, moreoptions = "-d:libp2p_pubsub_anonymize=true")
runTest(
"pubsub/testpubsub",
sign = false,
verify = false,
moreoptions = "-d:libp2p_pubsub_anonymize=true",
)
task testpubsub_slim, "Runs pubsub tests":
runTest("pubsub/testgossipinternal", sign = false, verify = false, moreoptions = "-d:pubsub_internal_testing")
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\"")
runTest("testpkifilter",
moreoptions = "-d:libp2p_pki_schemes=")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest(
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
)
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
exec "nimble testnative"
@@ -117,7 +124,8 @@ task examples_build, "Build the samples":
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nimpng@#HEAD"
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
@@ -135,7 +143,9 @@ task pin, "Create a lockfile":
import sequtils
import os
task install_pinned, "Reads the lockfile":
let toInstall = readFile(PinFile).splitWhitespace().mapIt((it.split(";", 1)[0], it.split(";", 1)[1]))
let toInstall = readFile(PinFile).splitWhitespace().mapIt(
(it.split(";", 1)[0], it.split(";", 1)[1])
)
# [('packageName', 'packageFullUri')]
rmDir("nimbledeps")
@@ -145,8 +155,7 @@ task install_pinned, "Reads the lockfile":
# Remove the automatically installed deps
# (inefficient you say?)
let nimblePkgs =
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs" else: "nimbledeps/pkgs2"
for dependency in listDirs(nimblePkgs):
let
fileName = dependency.extractFilename
@@ -154,14 +163,12 @@ task install_pinned, "Reads the lockfile":
packageName = fileName.split('-')[0]
if toInstall.anyIt(
it[0] == packageName and
(
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
)
) == false or
fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
rmDir(dependency)
it[0] == packageName and (
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
)
) == false or fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
rmDir(dependency)
task unpin, "Restore global package use":
rmDir("nimbledeps")

View File

@@ -9,30 +9,33 @@
## This module contains a Switch Building helper.
runnableExamples:
let switch =
SwitchBuilder.new()
.withRng(rng)
.withAddresses(multiaddress)
# etc
.build()
let switch = SwitchBuilder.new().withRng(rng).withAddresses(multiaddress)
# etc
.build()
{.push raises: [].}
import options, tables, chronos, chronicles, sequtils
import
options, tables, chronos, chronicles, sequtils
import
switch, peerid, peerinfo, stream/connection, multiaddress,
crypto/crypto, transports/[transport, tcptransport],
switch,
peerid,
peerinfo,
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
connmanager,
upgrademngrs/muxedupgrade,
observedaddrmanager,
nameresolving/nameresolver,
errors, utility
errors,
utility
import services/wildcardresolverservice
export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
@@ -66,9 +69,8 @@ type
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
let address = MultiAddress
.init("/ip4/127.0.0.1/tcp/0")
.expect("Should initialize to default")
let address =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should initialize to default")
SwitchBuilder(
privKey: none(PrivateKey),
@@ -80,23 +82,30 @@ proc new*(T: type[SwitchBuilder]): T {.public.} =
maxConnsPerPeer: MaxConnectionsPerPeer,
protoVersion: ProtoVersion,
agentVersion: AgentVersion,
enableWildcardResolver: true)
enableWildcardResolver: true,
)
proc withPrivateKey*(b: SwitchBuilder, privateKey: PrivateKey): SwitchBuilder {.public.} =
proc withPrivateKey*(
b: SwitchBuilder, privateKey: PrivateKey
): SwitchBuilder {.public.} =
## Set the private key of the switch. Will be used to
## generate a PeerId
b.privKey = some(privateKey)
b
proc withAddresses*(b: SwitchBuilder, addresses: seq[MultiAddress], enableWildcardResolver: bool = true): SwitchBuilder {.public.} =
proc withAddresses*(
b: SwitchBuilder, addresses: seq[MultiAddress], enableWildcardResolver: bool = true
): SwitchBuilder {.public.} =
## | Set the listening addresses of the switch
## | Calling it multiple time will override the value
b.addresses = addresses
b.enableWildcardResolver = enableWildcardResolver
b
proc withAddress*(b: SwitchBuilder, address: MultiAddress, enableWildcardResolver: bool = true): SwitchBuilder {.public.} =
proc withAddress*(
b: SwitchBuilder, address: MultiAddress, enableWildcardResolver: bool = true
): SwitchBuilder {.public.} =
## | Set the listening address of the switch
## | Calling it multiple time will override the value
b.withAddresses(@[address], enableWildcardResolver)
@@ -106,27 +115,23 @@ proc withSignedPeerRecord*(b: SwitchBuilder, sendIt = true): SwitchBuilder {.pub
b
proc withMplex*(
b: SwitchBuilder,
inTimeout = 5.minutes,
outTimeout = 5.minutes,
maxChannCount = 200): SwitchBuilder {.public.} =
b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes, maxChannCount = 200
): SwitchBuilder {.public.} =
## | Uses `Mplex <https://docs.libp2p.io/concepts/stream-multiplexing/#mplex>`_ as a multiplexer
## | `Timeout` is the duration after which a inactive connection will be closed
proc newMuxer(conn: Connection): Muxer =
Mplex.new(
conn,
inTimeout,
outTimeout,
maxChannCount)
Mplex.new(conn, inTimeout, outTimeout, maxChannCount)
assert b.muxers.countIt(it.codec == MplexCodec) == 0, "Mplex build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(b: SwitchBuilder,
proc withYamux*(
b: SwitchBuilder,
windowSize: int = YamuxDefaultWindowSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes): SwitchBuilder =
outTimeout: Duration = 5.minutes,
): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer =
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
@@ -138,24 +143,36 @@ proc withNoise*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.secureManagers.add(SecureProtocol.Noise)
b
proc withTransport*(b: SwitchBuilder, prov: TransportProvider): SwitchBuilder {.public.} =
proc withTransport*(
b: SwitchBuilder, prov: TransportProvider
): SwitchBuilder {.public.} =
## Use a custom transport
runnableExamples:
let switch =
SwitchBuilder.new()
.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade): Transport =
TcpTransport.new(flags, upgr)
)
.build()
b.transports.add(prov)
b
proc withTcpTransport*(b: SwitchBuilder, flags: set[ServerFlags] = {}): SwitchBuilder {.public.} =
b.withTransport(proc(upgr: Upgrade): Transport = TcpTransport.new(flags, upgr))
proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade): Transport =
TcpTransport.new(flags, upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
proc withMaxConnections*(b: SwitchBuilder, maxConnections: int): SwitchBuilder {.public.} =
proc withMaxConnections*(
b: SwitchBuilder, maxConnections: int
): SwitchBuilder {.public.} =
## Maximum concurrent connections of the switch. You should either use this, or
## `withMaxIn <#withMaxIn,SwitchBuilder,int>`_ & `withMaxOut<#withMaxOut,SwitchBuilder,int>`_
b.maxConnections = maxConnections
@@ -171,7 +188,9 @@ proc withMaxOut*(b: SwitchBuilder, maxOut: int): SwitchBuilder {.public.} =
b.maxOut = maxOut
b
proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder {.public.} =
proc withMaxConnsPerPeer*(
b: SwitchBuilder, maxConnsPerPeer: int
): SwitchBuilder {.public.} =
b.maxConnsPerPeer = maxConnsPerPeer
b
@@ -179,15 +198,21 @@ proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
b.peerStoreCapacity = Opt.some(capacity)
b
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
proc withProtoVersion*(
b: SwitchBuilder, protoVersion: string
): SwitchBuilder {.public.} =
b.protoVersion = protoVersion
b
proc withAgentVersion*(b: SwitchBuilder, agentVersion: string): SwitchBuilder {.public.} =
proc withAgentVersion*(
b: SwitchBuilder, agentVersion: string
): SwitchBuilder {.public.} =
b.agentVersion = agentVersion
b
proc withNameResolver*(b: SwitchBuilder, nameResolver: NameResolver): SwitchBuilder {.public.} =
proc withNameResolver*(
b: SwitchBuilder, nameResolver: NameResolver
): SwitchBuilder {.public.} =
b.nameResolver = nameResolver
b
@@ -199,7 +224,9 @@ proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder
b.circuitRelay = r
b
proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): SwitchBuilder =
proc withRendezVous*(
b: SwitchBuilder, rdv: RendezVous = RendezVous.new()
): SwitchBuilder =
b.rdv = rdv
b
@@ -207,31 +234,26 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
proc withObservedAddrManager*(
b: SwitchBuilder, observedAddrManager: ObservedAddrManager
): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [LPError], public.} =
proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
if b.rng == nil: # newRng could fail
raise newException(Defect, "Cannot initialize RNG")
let pkRes = PrivateKey.random(b.rng[])
let
seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
var
secureManagerInstances: seq[Secure]
var secureManagerInstances: seq[Secure]
if SecureProtocol.Noise in b.secureManagers:
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
let
peerInfo = PeerInfo.new(
seckey,
b.addresses,
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let peerInfo = PeerInfo.new(
seckey, b.addresses, protoVersion = b.protoVersion, agentVersion = b.agentVersion
)
let identify =
if b.observedAddrManager != nil:
@@ -240,16 +262,16 @@ proc build*(b: SwitchBuilder): Switch
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
connManager =
ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
let
transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports
if b.secureManagers.len == 0:
b.secureManagers &= SecureProtocol.Noise
@@ -274,7 +296,8 @@ proc build*(b: SwitchBuilder): Switch
ms = ms,
nameResolver = b.nameResolver,
peerStore = peerStore,
services = b.services)
services = b.services,
)
switch.mount(identify)
@@ -298,9 +321,7 @@ proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
secureManagers: openArray[SecureProtocol] = [
SecureProtocol.Noise,
],
secureManagers: openArray[SecureProtocol] = [SecureProtocol.Noise],
transportFlags: set[ServerFlags] = {},
rng = newRng(),
inTimeout: Duration = 5.minutes,
@@ -311,10 +332,14 @@ proc newStandardSwitch*(
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000
peerStoreCapacity = 1000,
): Switch {.raises: [LPError], public.} =
## Helper for common switch configurations.
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
let addrs =
when addrs is MultiAddress:
@[addrs]
else:
addrs
var b = SwitchBuilder
.new()
.withAddresses(addrs)
@@ -324,7 +349,7 @@ proc newStandardSwitch*(
.withMaxIn(maxIn)
.withMaxOut(maxOut)
.withMaxConnsPerPeer(maxConnsPerPeer)
.withPeerStore(capacity=peerStoreCapacity)
.withPeerStore(capacity = peerStoreCapacity)
.withMplex(inTimeout, outTimeout)
.withTcpTransport(transportFlags)
.withNameResolver(nameResolver)

View File

@@ -19,10 +19,16 @@ export results
type
CidError* {.pure.} = enum
Error, Incorrect, Unsupported, Overrun
Error
Incorrect
Unsupported
Overrun
CidVersion* = enum
CIDvIncorrect, CIDv0, CIDv1, CIDvReserved
CIDvIncorrect
CIDv0
CIDv1
CIDvReserved
Cid* = object
cidver*: CidVersion
@@ -30,54 +36,51 @@ type
hpos*: int
data*: VBuffer
const
ContentIdsList = [
multiCodec("raw"),
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),
multiCodec("eth-tx-trie"),
multiCodec("eth-tx"),
multiCodec("eth-tx-receipt-trie"),
multiCodec("eth-tx-receipt"),
multiCodec("eth-state-trie"),
multiCodec("eth-account-snapshot"),
multiCodec("eth-storage-trie"),
multiCodec("bitcoin-block"),
multiCodec("bitcoin-tx"),
multiCodec("zcash-block"),
multiCodec("zcash-tx"),
multiCodec("stellar-block"),
multiCodec("stellar-tx"),
multiCodec("decred-block"),
multiCodec("decred-tx"),
multiCodec("dash-block"),
multiCodec("dash-tx"),
multiCodec("torrent-info"),
multiCodec("torrent-file"),
multiCodec("ed25519-pub")
]
const ContentIdsList = [
multiCodec("raw"),
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),
multiCodec("eth-tx-trie"),
multiCodec("eth-tx"),
multiCodec("eth-tx-receipt-trie"),
multiCodec("eth-tx-receipt"),
multiCodec("eth-state-trie"),
multiCodec("eth-account-snapshot"),
multiCodec("eth-storage-trie"),
multiCodec("bitcoin-block"),
multiCodec("bitcoin-tx"),
multiCodec("zcash-block"),
multiCodec("zcash-tx"),
multiCodec("stellar-block"),
multiCodec("stellar-tx"),
multiCodec("decred-block"),
multiCodec("decred-tx"),
multiCodec("dash-block"),
multiCodec("dash-tx"),
multiCodec("torrent-info"),
multiCodec("torrent-file"),
multiCodec("ed25519-pub"),
]
proc initCidCodeTable(): Table[int, MultiCodec] {.compileTime.} =
for item in ContentIdsList:
result[int(item)] = item
const
CodeContentIds = initCidCodeTable()
const CodeContentIds = initCidCodeTable()
template orError*(exp: untyped, err: untyped): untyped =
(exp.mapErr do (_: auto) -> auto: err)
exp.mapErr do(_: auto) -> auto:
err
proc decode(data: openArray[byte]): Result[Cid, CidError] =
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
ok(Cid(
cidver: CIDv0,
mcodec: multiCodec("dag-pb"),
hpos: 0,
data: initVBuffer(data)))
ok(
Cid(cidver: CIDv0, mcodec: multiCodec("dag-pb"), hpos: 0, data: initVBuffer(data))
)
else:
var version, codec: uint64
var res, offset: int
@@ -98,21 +101,18 @@ proc decode(data: openArray[byte]): Result[Cid, CidError] =
err(CidError.Incorrect)
else:
offset += res
var mcodec = CodeContentIds.getOrDefault(cast[int](codec),
InvalidMultiCodec)
var mcodec =
CodeContentIds.getOrDefault(cast[int](codec), InvalidMultiCodec)
if mcodec == InvalidMultiCodec:
err(CidError.Incorrect)
else:
if not MultiHash.validate(vb.buffer.toOpenArray(vb.offset,
vb.buffer.high)):
if not MultiHash.validate(
vb.buffer.toOpenArray(vb.offset, vb.buffer.high)
):
err(CidError.Incorrect)
else:
vb.finish()
ok(Cid(
cidver: CIDv1,
mcodec: mcodec,
hpos: offset,
data: vb))
ok(Cid(cidver: CIDv1, mcodec: mcodec, hpos: offset, data: vb))
proc decode(data: openArray[char]): Result[Cid, CidError] =
var buffer: seq[byte]
@@ -172,7 +172,9 @@ proc mhash*(cid: Cid): Result[MultiHash, CidError] =
if cid.cidver notin {CIDv0, CIDv1}:
err(CidError.Incorrect)
else:
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(CidError.Incorrect)
MultiHash.init(cid.data.buffer.toOpenArray(cid.hpos, cid.data.high)).orError(
CidError.Incorrect
)
proc contentType*(cid: Cid): Result[MultiCodec, CidError] =
## Returns content type part of CID
@@ -185,12 +187,15 @@ proc version*(cid: Cid): CidVersion =
## Returns CID version
result = cid.cidver
proc init*[T: char|byte](ctype: typedesc[Cid], data: openArray[T]): Result[Cid, CidError] =
proc init*[T: char | byte](
ctype: typedesc[Cid], data: openArray[T]
): Result[Cid, CidError] =
## Create new content identifier using array of bytes or string ``data``.
decode(data)
proc init*(ctype: typedesc[Cid], version: CidVersion, content: MultiCodec,
hash: MultiHash): Result[Cid, CidError] =
proc init*(
ctype: typedesc[Cid], version: CidVersion, content: MultiCodec, hash: MultiHash
): Result[Cid, CidError] =
## Create new content identifier using content type ``content`` and
## MultiHash ``hash`` using version ``version``.
##
@@ -213,8 +218,7 @@ proc init*(ctype: typedesc[Cid], version: CidVersion, content: MultiCodec,
res.data.finish()
return ok(res)
elif version == CIDv1:
let mcodec = CodeContentIds.getOrDefault(cast[int](content),
InvalidMultiCodec)
let mcodec = CodeContentIds.getOrDefault(cast[int](content), InvalidMultiCodec)
if mcodec == InvalidMultiCodec:
return err(CidError.Incorrect)
res.mcodec = mcodec
@@ -233,11 +237,9 @@ proc `==`*(a: Cid, b: Cid): bool =
## are equal, ``false`` otherwise.
if a.mcodec == b.mcodec:
var ah, bh: MultiHash
if MultiHash.decode(
a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
if MultiHash.decode(a.data.buffer.toOpenArray(a.hpos, a.data.high), ah).isErr:
return false
if MultiHash.decode(
b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
if MultiHash.decode(b.data.buffer.toOpenArray(b.hpos, b.data.high), bh).isErr:
return false
result = (ah == bh)

View File

@@ -11,12 +11,7 @@
import std/[tables, sequtils, sets]
import pkg/[chronos, chronicles, metrics]
import peerinfo,
peerstore,
stream/connection,
muxers/muxer,
utils/semaphore,
errors
import peerinfo, peerstore, stream/connection, muxers/muxer, utils/semaphore, errors
logScope:
topics = "libp2p connmanager"
@@ -32,12 +27,13 @@ type
AlreadyExpectingConnectionError* = object of LPError
ConnEventKind* {.pure.} = enum
Connected, # A connection was made and securely upgraded - there may be
# more than one concurrent connection thus more than one upgrade
# event per peer.
Disconnected # Peer disconnected - this event is fired once per upgrade
# when the associated connection is terminated.
Connected
# A connection was made and securely upgraded - there may be
# more than one concurrent connection thus more than one upgrade
# event per peer.
Disconnected
# Peer disconnected - this event is fired once per upgrade
# when the associated connection is terminated.
ConnEvent* = object
case kind*: ConnEventKind
@@ -47,19 +43,18 @@ type
discard
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void]
{.gcsafe, raises: [].}
proc(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe, raises: [].}
PeerEventKind* {.pure.} = enum
Left,
Left
Joined
PeerEvent* = object
case kind*: PeerEventKind
of PeerEventKind.Joined:
initiator*: bool
else:
discard
of PeerEventKind.Joined:
initiator*: bool
else:
discard
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
@@ -81,11 +76,13 @@ type
proc newTooManyConnectionsError(): ref TooManyConnectionsError {.inline.} =
result = newException(TooManyConnectionsError, "Too many connections")
proc new*(C: type ConnManager,
maxConnsPerPeer = MaxConnectionsPerPeer,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1): ConnManager =
proc new*(
C: type ConnManager,
maxConnsPerPeer = MaxConnectionsPerPeer,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
): ConnManager =
var inSema, outSema: AsyncSemaphore
if maxIn > 0 or maxOut > 0:
inSema = newAsyncSemaphore(maxIn)
@@ -96,9 +93,7 @@ proc new*(C: type ConnManager,
else:
raiseAssert "Invalid connection counts!"
C(maxConnsPerPeer: maxConnsPerPeer,
inSema: inSema,
outSema: outSema)
C(maxConnsPerPeer: maxConnsPerPeer, inSema: inSema, outSema: outSema)
proc connCount*(c: ConnManager, peerId: PeerId): int =
c.muxed.getOrDefault(peerId).len
@@ -113,22 +108,21 @@ proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
return c.muxed
proc addConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
proc addConnEventHandler*(
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler): return
if isNil(handler):
return
c.connEvents[kind].incl(handler)
proc removeConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
c.connEvents[kind].excl(handler)
proc removeConnEventHandler*(
c: ConnManager, handler: ConnEventHandler, kind: ConnEventKind
) =
c.connEvents[kind].excl(handler)
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async.} =
proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -141,27 +135,24 @@ proc triggerConnEvent*(c: ConnManager,
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Exception in triggerConnEvents",
msg = exc.msg, peer = peerId, event = $event
warn "Exception in triggerConnEvents", msg = exc.msg, peer = peerId, event = $event
proc addPeerEventHandler*(c: ConnManager,
handler: PeerEventHandler,
kind: PeerEventKind) =
proc addPeerEventHandler*(
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler): return
if isNil(handler):
return
c.peerEvents[kind].incl(handler)
proc removePeerEventHandler*(c: ConnManager,
handler: PeerEventHandler,
kind: PeerEventKind) =
proc removePeerEventHandler*(
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
) =
c.peerEvents[kind].excl(handler)
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async.} =
proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
return
@@ -179,11 +170,16 @@ proc triggerPeerEvents*(c: ConnManager,
except CatchableError as exc: # handlers should not raise!
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
proc expectConnection*(c: ConnManager, p: PeerId, dir: Direction): Future[Muxer] {.async.} =
proc expectConnection*(
c: ConnManager, p: PeerId, dir: Direction
): Future[Muxer] {.async.} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
raise newException(AlreadyExpectingConnectionError, "Already expecting an incoming connection from that peer")
raise newException(
AlreadyExpectingConnectionError,
"Already expecting an incoming connection from that peer",
)
let future = newFuture[Muxer]()
c.expectedConnectionsOverLimit[key] = future
@@ -211,7 +207,7 @@ proc closeMuxer(muxer: Muxer) {.async.} =
trace "Cleaning up muxer", m = muxer
await muxer.close()
if not(isNil(muxer.handler)):
if not (isNil(muxer.handler)):
try:
await muxer.handler # TODO noraises?
except CatchableError as exc:
@@ -231,16 +227,14 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
libp2p_peers.set(c.muxed.len.int64)
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
if not(c.peerStore.isNil):
if not (c.peerStore.isNil):
c.peerStore.cleanup(peerId)
await c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
await c.triggerConnEvent(peerId, ConnEvent(kind: ConnEventKind.Disconnected))
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler",
mux, msg = exc.msg
warn "Unexpected exception peer cleanup handler", mux, msg = exc.msg
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
## connection close even handler
@@ -251,19 +245,14 @@ proc onClose(c: ConnManager, mux: Muxer) {.async.} =
await mux.connection.join()
trace "Connection closed, cleaning up", mux
except CatchableError as exc:
debug "Unexpected exception in connection manager's cleanup",
errMsg = exc.msg, mux
debug "Unexpected exception in connection manager's cleanup", errMsg = exc.msg, mux
finally:
await c.muxCleanup(mux)
proc selectMuxer*(c: ConnManager,
peerId: PeerId,
dir: Direction): Muxer =
proc selectMuxer*(c: ConnManager, peerId: PeerId, dir: Direction): Muxer =
## Select a connection for the provided peer and direction
##
let conns = toSeq(
c.muxed.getOrDefault(peerId))
.filterIt( it.connection.dir == dir )
let conns = toSeq(c.muxed.getOrDefault(peerId)).filterIt(it.connection.dir == dir)
if conns.len > 0:
return conns[0]
@@ -280,9 +269,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
trace "connection not found", peerId
return mux
proc storeMuxer*(c: ConnManager,
muxer: Muxer)
{.raises: [CatchableError].} =
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
## store the connection and muxer
##
@@ -304,10 +291,9 @@ proc storeMuxer*(c: ConnManager,
let key = (peerId, dir)
let expectedConn = c.expectedConnectionsOverLimit.getOrDefault(key)
if expectedConn != nil and not expectedConn.finished:
expectedConn.complete(muxer)
expectedConn.complete(muxer)
else:
debug "Too many connections for peer",
conns = c.muxed.getOrDefault(peerId).len
debug "Too many connections for peer", conns = c.muxed.getOrDefault(peerId).len
raise newTooManyConnectionsError()
@@ -322,36 +308,39 @@ proc storeMuxer*(c: ConnManager,
libp2p_peers.set(c.muxed.len.int64)
asyncSpawn c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In))
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In)
)
if newPeer:
asyncSpawn c.triggerPeerEvents(
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out))
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out)
)
asyncSpawn c.onClose(muxer)
trace "Stored muxer",
muxer, direction = $muxer.connection.dir, peers = c.muxed.len
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
proc getOutgoingSlot*(
c: ConnManager, forceDial = false
): ConnectionSlot {.raises: [TooManyConnectionsError].} =
if forceDial:
c.outSema.forceAcquire()
elif not c.outSema.tryAcquire():
trace "Too many outgoing connections!", count = c.outSema.count,
max = c.outSema.size
trace "Too many outgoing connections!",
count = c.outSema.count, max = c.outSema.size
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
case dir:
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
case dir
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
proc release*(cs: ConnectionSlot) =
if cs.direction == In:
@@ -380,30 +369,27 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
return
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async.} =
proc getStream*(c: ConnManager, muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
if not(isNil(muxer)):
if not (isNil(muxer)):
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
return await c.getStream(c.selectMuxer(peerId))
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId, dir: Direction
): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##
return await c.getStream(c.selectMuxer(peerId, dir))
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
## drop connections and cleanup resources for peer
##
@@ -435,4 +421,3 @@ proc close*(c: ConnManager) {.async.} =
await closeMuxer(mux)
trace "Closed ConnManager"

View File

@@ -48,17 +48,19 @@ proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag =
# this is reconciled at runtime
# we do this in the global scope / module init
proc encrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeAddr aad[0]
else:
nil
proc encrypt*(
_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte],
) =
let ad =
if aad.len > 0:
unsafeAddr aad[0]
else:
nil
poly1305CtmulRun(
unsafeAddr key[0],
@@ -69,20 +71,23 @@ proc encrypt*(_: type[ChaChaPoly],
uint(aad.len),
baseAddr(tag),
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
cast[Chacha20Run](chacha20CtRun),
#[encrypt]# 1.cint)
cast[Chacha20Run](chacha20CtRun), #[encrypt]#
1.cint,
)
proc decrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeAddr aad[0]
else:
nil
proc decrypt*(
_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openArray[byte],
aad: openArray[byte],
) =
let ad =
if aad.len > 0:
unsafeAddr aad[0]
else:
nil
poly1305CtmulRun(
unsafeAddr key[0],
@@ -93,5 +98,6 @@ proc decrypt*(_: type[ChaChaPoly],
uint(aad.len),
baseAddr(tag),
# cast is required to workaround https://github.com/nim-lang/Nim/issues/13905
cast[Chacha20Run](chacha20CtRun),
#[decrypt]# 0.cint)
cast[Chacha20Run](chacha20CtRun), #[decrypt]#
0.cint,
)

View File

@@ -14,12 +14,11 @@ from strutils import split, strip, cmpIgnoreCase
const libp2p_pki_schemes* {.strdefine.} = "rsa,ed25519,secp256k1,ecnist"
type
PKScheme* = enum
RSA = 0,
Ed25519,
Secp256k1,
ECDSA
type PKScheme* = enum
RSA = 0
Ed25519
Secp256k1
ECDSA
proc initSupportedSchemes(list: static string): set[PKScheme] =
var res: set[PKScheme]
@@ -85,7 +84,7 @@ export rijndael, twofish, sha2, hash, hmac, ncrutils, rand
type
DigestSheme* = enum
Sha256,
Sha256
Sha512
PublicKey* = object
@@ -148,15 +147,16 @@ type
data*: seq[byte]
CryptoError* = enum
KeyError,
SigError,
HashError,
KeyError
SigError
HashError
SchemeError
CryptoResult*[T] = Result[T, CryptoError]
template orError*(exp: untyped, err: untyped): untyped =
(exp.mapErr do (_: auto) -> auto: err)
exp.mapErr do(_: auto) -> auto:
err
proc newRng*(): ref HmacDrbgContext =
# You should only create one instance of the RNG per application / library
@@ -172,11 +172,9 @@ proc newRng*(): ref HmacDrbgContext =
return nil
rng
proc shuffle*[T](
rng: ref HmacDrbgContext,
x: var openArray[T]) =
if x.len == 0: return
proc shuffle*[T](rng: ref HmacDrbgContext, x: var openArray[T]) =
if x.len == 0:
return
var randValues = newSeqUninitialized[byte](len(x) * 2)
hmacDrbgGenerate(rng[], randValues)
@@ -187,9 +185,12 @@ proc shuffle*[T](
y = rand mod i
swap(x[i], x[y])
proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
proc random*(
T: typedesc[PrivateKey],
scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize,
): CryptoResult[PrivateKey] =
## Generate random private key for scheme ``scheme``.
##
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
@@ -197,7 +198,7 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
case scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(KeyError)
ok(PrivateKey(scheme: scheme, rsakey: rsakey))
else:
err(SchemeError)
@@ -209,7 +210,7 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
let eckey = ?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
ok(PrivateKey(scheme: scheme, eckey: eckey))
else:
err(SchemeError)
@@ -220,8 +221,9 @@ proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
else:
err(SchemeError)
proc random*(T: typedesc[PrivateKey], rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
proc random*(
T: typedesc[PrivateKey], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
): CryptoResult[PrivateKey] =
## Generate random private key using default public-key cryptography scheme.
##
## Default public-key cryptography schemes are following order:
@@ -235,17 +237,20 @@ proc random*(T: typedesc[PrivateKey], rng: var HmacDrbgContext,
let skkey = SkPrivateKey.random(rng)
ok(PrivateKey(scheme: PKScheme.Secp256k1, skkey: skkey))
elif supported(PKScheme.RSA):
let rsakey = ? RsaPrivateKey.random(rng, bits).orError(KeyError)
let rsakey = ?RsaPrivateKey.random(rng, bits).orError(KeyError)
ok(PrivateKey(scheme: PKScheme.RSA, rsakey: rsakey))
elif supported(PKScheme.ECDSA):
let eckey = ? ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
let eckey = ?ecnist.EcPrivateKey.random(Secp256r1, rng).orError(KeyError)
ok(PrivateKey(scheme: PKScheme.ECDSA, eckey: eckey))
else:
err(SchemeError)
proc random*(T: typedesc[KeyPair], scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
proc random*(
T: typedesc[KeyPair],
scheme: PKScheme,
rng: var HmacDrbgContext,
bits = RsaDefaultKeySize,
): CryptoResult[KeyPair] =
## Generate random key pair for scheme ``scheme``.
##
## ``bits`` is number of bits for RSA key, ``bits`` value must be in
@@ -253,39 +258,52 @@ proc random*(T: typedesc[KeyPair], scheme: PKScheme,
case scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey)))
let pair = ?RsaKeyPair.random(rng, bits).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, rsakey: pair.seckey),
pubkey: PublicKey(scheme: scheme, rsakey: pair.pubkey),
)
)
else:
err(SchemeError)
of PKScheme.Ed25519:
when supported(PKScheme.Ed25519):
let pair = EdKeyPair.random(rng)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey)))
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, edkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, edkey: pair.pubkey),
)
)
else:
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey)))
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, eckey: pair.seckey),
pubkey: PublicKey(scheme: scheme, eckey: pair.pubkey),
)
)
else:
err(SchemeError)
of PKScheme.Secp256k1:
when supported(PKScheme.Secp256k1):
let pair = SkKeyPair.random(rng)
ok(KeyPair(
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey)))
ok(
KeyPair(
seckey: PrivateKey(scheme: scheme, skkey: pair.seckey),
pubkey: PublicKey(scheme: scheme, skkey: pair.pubkey),
)
)
else:
err(SchemeError)
proc random*(T: typedesc[KeyPair], rng: var HmacDrbgContext,
bits = RsaDefaultKeySize): CryptoResult[KeyPair] =
proc random*(
T: typedesc[KeyPair], rng: var HmacDrbgContext, bits = RsaDefaultKeySize
): CryptoResult[KeyPair] =
## Generate random private pair of keys using default public-key cryptography
## scheme.
##
@@ -295,24 +313,36 @@ proc random*(T: typedesc[KeyPair], rng: var HmacDrbgContext,
## So will be used first available (supported) method.
when supported(PKScheme.Ed25519):
let pair = EdKeyPair.random(rng)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey)))
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.Ed25519, edkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Ed25519, edkey: pair.pubkey),
)
)
elif supported(PKScheme.Secp256k1):
let pair = SkKeyPair.random(rng)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey)))
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.Secp256k1, skkey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.Secp256k1, skkey: pair.pubkey),
)
)
elif supported(PKScheme.RSA):
let pair = ? RsaKeyPair.random(rng, bits).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey)))
let pair = ?RsaKeyPair.random(rng, bits).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.RSA, rsakey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.RSA, rsakey: pair.pubkey),
)
)
elif supported(PKScheme.ECDSA):
let pair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(KeyPair(
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey)))
let pair = ?EcKeyPair.random(Secp256r1, rng).orError(KeyError)
ok(
KeyPair(
seckey: PrivateKey(scheme: PKScheme.ECDSA, eckey: pair.seckey),
pubkey: PublicKey(scheme: PKScheme.ECDSA, eckey: pair.pubkey),
)
)
else:
err(SchemeError)
@@ -333,7 +363,7 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let eckey = ? key.eckey.getPublicKey().orError(KeyError)
let eckey = ?key.eckey.getPublicKey().orError(KeyError)
ok(PublicKey(scheme: ECDSA, eckey: eckey))
else:
err(SchemeError)
@@ -344,8 +374,9 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
else:
err(SchemeError)
proc toRawBytes*(key: PrivateKey | PublicKey,
data: var openArray[byte]): CryptoResult[int] =
proc toRawBytes*(
key: PrivateKey | PublicKey, data: var openArray[byte]
): CryptoResult[int] =
## Serialize private key ``key`` (using scheme's own serialization) and store
## it to ``data``.
##
@@ -404,7 +435,7 @@ proc toBytes*(key: PrivateKey, data: var openArray[byte]): CryptoResult[int] =
## Returns number of bytes (octets) needed to store private key ``key``.
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ? key.getRawBytes())
msg.write(2, ?key.getRawBytes())
msg.finish()
var blen = len(msg.buffer)
if len(data) >= blen:
@@ -418,7 +449,7 @@ proc toBytes*(key: PublicKey, data: var openArray[byte]): CryptoResult[int] =
## Returns number of bytes (octets) needed to store public key ``key``.
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ? key.getRawBytes())
msg.write(2, ?key.getRawBytes())
msg.finish()
var blen = len(msg.buffer)
if len(data) >= blen and blen > 0:
@@ -438,7 +469,7 @@ proc getBytes*(key: PrivateKey): CryptoResult[seq[byte]] =
## serialization).
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ? key.getRawBytes())
msg.write(2, ?key.getRawBytes())
msg.finish()
ok(msg.buffer)
@@ -447,7 +478,7 @@ proc getBytes*(key: PublicKey): CryptoResult[seq[byte]] =
## serialization).
var msg = initProtoBuffer()
msg.write(1, uint64(key.scheme))
msg.write(2, ? key.getRawBytes())
msg.write(2, ?key.getRawBytes())
msg.finish()
ok(msg.buffer)
@@ -455,8 +486,7 @@ proc getBytes*(sig: Signature): seq[byte] =
## Return signature ``sig`` in binary form.
result = sig.data
template initImpl[T: PrivateKey|PublicKey](
key: var T, data: openArray[byte]): bool =
template initImpl[T: PrivateKey | PublicKey](key: var T, data: openArray[byte]): bool =
## Initialize private key ``key`` from libp2p's protobuf serialized raw
## binary form.
##
@@ -469,7 +499,7 @@ template initImpl[T: PrivateKey|PublicKey](
var pb = initProtoBuffer(@data)
let r1 = pb.getField(1, id)
let r2 = pb.getField(2, buffer)
if not(r1.get(false) and r2.get(false)):
if not (r1.get(false) and r2.get(false)):
false
else:
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
@@ -480,7 +510,7 @@ template initImpl[T: PrivateKey|PublicKey](
var nkey = PrivateKey(scheme: scheme)
else:
var nkey = PublicKey(scheme: scheme)
case scheme:
case scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
if init(nkey.rsakey, buffer).isOk:
@@ -518,12 +548,13 @@ template initImpl[T: PrivateKey|PublicKey](
else:
false
{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
{.push warning[ProveField]: off.} # https://github.com/nim-lang/Nim/issues/22060
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
initImpl(key, data)
proc init*(key: var PublicKey, data: openArray[byte]): bool =
initImpl(key, data)
{.pop.}
proc init*(sig: var Signature, data: openArray[byte]): bool =
@@ -534,7 +565,7 @@ proc init*(sig: var Signature, data: openArray[byte]): bool =
sig.data = @data
result = true
proc init*[T: PrivateKey|PublicKey](key: var T, data: string): bool =
proc init*[T: PrivateKey | PublicKey](key: var T, data: string): bool =
## Initialize private/public key ``key`` from libp2p's protobuf serialized
## hexadecimal string representation.
##
@@ -548,8 +579,7 @@ proc init*(sig: var Signature, data: string): bool =
## Returns ``true`` on success.
sig.init(ncrutils.fromHex(data))
proc init*(t: typedesc[PrivateKey],
data: openArray[byte]): CryptoResult[PrivateKey] =
proc init*(t: typedesc[PrivateKey], data: openArray[byte]): CryptoResult[PrivateKey] =
## Create new private key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@@ -557,8 +587,7 @@ proc init*(t: typedesc[PrivateKey],
else:
ok(res)
proc init*(t: typedesc[PublicKey],
data: openArray[byte]): CryptoResult[PublicKey] =
proc init*(t: typedesc[PublicKey], data: openArray[byte]): CryptoResult[PublicKey] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@@ -566,8 +595,7 @@ proc init*(t: typedesc[PublicKey],
else:
ok(res)
proc init*(t: typedesc[Signature],
data: openArray[byte]): CryptoResult[Signature] =
proc init*(t: typedesc[Signature], data: openArray[byte]): CryptoResult[Signature] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@@ -583,24 +611,28 @@ proc init*(t: typedesc[PrivateKey], data: string): CryptoResult[PrivateKey] =
when supported(PKScheme.RSA):
proc init*(t: typedesc[PrivateKey], key: rsa.RsaPrivateKey): PrivateKey =
PrivateKey(scheme: RSA, rsakey: key)
proc init*(t: typedesc[PublicKey], key: rsa.RsaPublicKey): PublicKey =
PublicKey(scheme: RSA, rsakey: key)
when supported(PKScheme.Ed25519):
proc init*(t: typedesc[PrivateKey], key: EdPrivateKey): PrivateKey =
PrivateKey(scheme: Ed25519, edkey: key)
proc init*(t: typedesc[PublicKey], key: EdPublicKey): PublicKey =
PublicKey(scheme: Ed25519, edkey: key)
when supported(PKScheme.Secp256k1):
proc init*(t: typedesc[PrivateKey], key: SkPrivateKey): PrivateKey =
PrivateKey(scheme: Secp256k1, skkey: key)
proc init*(t: typedesc[PublicKey], key: SkPublicKey): PublicKey =
PublicKey(scheme: Secp256k1, skkey: key)
when supported(PKScheme.ECDSA):
proc init*(t: typedesc[PrivateKey], key: ecnist.EcPrivateKey): PrivateKey =
PrivateKey(scheme: ECDSA, eckey: key)
proc init*(t: typedesc[PublicKey], key: ecnist.EcPublicKey): PublicKey =
PublicKey(scheme: ECDSA, eckey: key)
@@ -669,9 +701,9 @@ proc `==`*(key1, key2: PrivateKey): bool =
else:
false
proc `$`*(key: PrivateKey|PublicKey): string =
proc `$`*(key: PrivateKey | PublicKey): string =
## Get string representation of private/public key ``key``.
case key.scheme:
case key.scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
$(key.rsakey)
@@ -693,9 +725,9 @@ proc `$`*(key: PrivateKey|PublicKey): string =
else:
"unsupported secp256k1 key"
func shortLog*(key: PrivateKey|PublicKey): string =
func shortLog*(key: PrivateKey | PublicKey): string =
## Get short string representation of private/public key ``key``.
case key.scheme:
case key.scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
($key.rsakey).shortLog
@@ -721,16 +753,15 @@ proc `$`*(sig: Signature): string =
## Get string representation of signature ``sig``.
result = ncrutils.toHex(sig.data)
proc sign*(key: PrivateKey,
data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
proc sign*(key: PrivateKey, data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
## Sign message ``data`` using private key ``key`` and return generated
## signature in raw binary form.
var res: Signature
case key.scheme:
case key.scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
let sig = ? key.rsakey.sign(data).orError(SigError)
res.data = ? sig.getBytes().orError(SigError)
let sig = ?key.rsakey.sign(data).orError(SigError)
res.data = ?sig.getBytes().orError(SigError)
ok(res)
else:
err(SchemeError)
@@ -743,8 +774,8 @@ proc sign*(key: PrivateKey,
err(SchemeError)
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
let sig = ? key.eckey.sign(data).orError(SigError)
res.data = ? sig.getBytes().orError(SigError)
let sig = ?key.eckey.sign(data).orError(SigError)
res.data = ?sig.getBytes().orError(SigError)
ok(res)
else:
err(SchemeError)
@@ -759,7 +790,7 @@ proc sign*(key: PrivateKey,
proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
## Verify signature ``sig`` using message ``message`` and public key ``key``.
## Return ``true`` if message signature is valid.
case key.scheme:
case key.scheme
of PKScheme.RSA:
when supported(PKScheme.RSA):
var signature: RsaSignature
@@ -797,12 +828,12 @@ proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
else:
false
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.}=
template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.} =
var ctx: hmactype
var j = 0
# We need to strip leading zeros, because Go bigint serialization do it.
var offset = 0
for i in 0..<len(secret):
for i in 0 ..< len(secret):
if secret[i] != 0x00'u8:
break
inc(offset)
@@ -823,8 +854,9 @@ template makeSecret(buffer, hmactype, secret, seed: untyped) {.dirty.}=
ctx.update(a.data)
a = ctx.finish()
proc stretchKeys*(cipherType: string, hashType: string,
sharedSecret: seq[byte]): Secret =
proc stretchKeys*(
cipherType: string, hashType: string, sharedSecret: seq[byte]
): Secret =
## Expand shared secret to cryptographic keys.
if cipherType == "AES-128":
result.ivsize = aes128.sizeBlock
@@ -850,37 +882,57 @@ template goffset*(secret, id, o: untyped): untyped =
id * (len(secret.data) shr 1) + o
template ivOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(secret.data, goffset(secret, id, 0),
goffset(secret, id, secret.ivsize - 1))
toOpenArray(
secret.data, goffset(secret, id, 0), goffset(secret, id, secret.ivsize - 1)
)
template keyOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(secret.data, goffset(secret, id, secret.ivsize),
goffset(secret, id, secret.ivsize + secret.keysize - 1))
toOpenArray(
secret.data,
goffset(secret, id, secret.ivsize),
goffset(secret, id, secret.ivsize + secret.keysize - 1),
)
template macOpenArray*(secret: Secret, id: int): untyped =
toOpenArray(secret.data, goffset(secret, id, secret.ivsize + secret.keysize),
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1))
toOpenArray(
secret.data,
goffset(secret, id, secret.ivsize + secret.keysize),
goffset(secret, id, secret.ivsize + secret.keysize + secret.macsize - 1),
)
proc iv*(secret: Secret, id: int): seq[byte] {.inline.} =
## Get array of bytes with with initial vector.
result = newSeq[byte](secret.ivsize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.ivsize)
proc key*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeq[byte](secret.keysize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
offset += secret.ivsize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.keysize)
proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
result = newSeq[byte](secret.macsize)
var offset = if id == 0: 0 else: (len(secret.data) div 2)
var offset =
if id == 0:
0
else:
(len(secret.data) div 2)
offset += secret.ivsize + secret.keysize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
proc getOrder*(
remotePubkey, localNonce: openArray[byte], localPubkey, remoteNonce: openArray[byte]
): CryptoResult[int] =
## Compare values and calculate `order` parameter.
var ctx: sha256
ctx.init()
@@ -891,9 +943,9 @@ proc getOrder*(remotePubkey, localNonce: openArray[byte],
ctx.update(localPubkey)
ctx.update(remoteNonce)
var digest2 = ctx.finish()
var mh1 = ? MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
var mh2 = ? MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
var res = 0;
var mh1 = ?MultiHash.init(multiCodec("sha2-256"), digest1).orError(HashError)
var mh2 = ?MultiHash.init(multiCodec("sha2-256"), digest2).orError(HashError)
var res = 0
for i in 0 ..< len(mh1.data.buffer):
res = int(mh1.data.buffer[i]) - int(mh2.data.buffer[i])
if res != 0:
@@ -926,40 +978,43 @@ proc selectBest*(order: int, p1, p2: string): string =
## Serialization/Deserialization helpers
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
inline, raises: [ResultError[CryptoError]].} =
proc write*(
vb: var VBuffer, pubkey: PublicKey
) {.inline, raises: [ResultError[CryptoError]].} =
## Write PublicKey value ``pubkey`` to buffer ``vb``.
vb.writeSeq(pubkey.getBytes().tryGet())
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
inline, raises: [ResultError[CryptoError]].} =
proc write*(
vb: var VBuffer, seckey: PrivateKey
) {.inline, raises: [ResultError[CryptoError]].} =
## Write PrivateKey value ``seckey`` to buffer ``vb``.
vb.writeSeq(seckey.getBytes().tryGet())
proc write*(vb: var VBuffer, sig: PrivateKey) {.
inline, raises: [ResultError[CryptoError]].} =
proc write*(
vb: var VBuffer, sig: PrivateKey
) {.inline, raises: [ResultError[CryptoError]].} =
## Write Signature value ``sig`` to buffer ``vb``.
vb.writeSeq(sig.getBytes().tryGet())
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
key: T) {.
inline, raises: [ResultError[CryptoError]].} =
proc write*[T: PublicKey | PrivateKey](
pb: var ProtoBuffer, field: int, key: T
) {.inline, raises: [ResultError[CryptoError]].} =
write(pb, field, key.getBytes().tryGet())
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
inline, raises: [].} =
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.inline, raises: [].} =
write(pb, field, sig.getBytes())
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
value: var T): ProtoResult[bool] =
proc getField*[T: PublicKey | PrivateKey](
pb: ProtoBuffer, field: int, value: var T
): ProtoResult[bool] =
## Deserialize public/private key from protobuf's message ``pb`` using field
## index ``field``.
##
## On success deserialized key will be stored in ``value``.
var buffer: seq[byte]
var key: T
let res = ? pb.getField(field, buffer)
if not(res):
let res = ?pb.getField(field, buffer)
if not (res):
ok(false)
else:
if key.init(buffer):
@@ -968,16 +1023,15 @@ proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
else:
err(ProtoError.IncorrectBlob)
proc getField*(pb: ProtoBuffer, field: int,
value: var Signature): ProtoResult[bool] =
proc getField*(pb: ProtoBuffer, field: int, value: var Signature): ProtoResult[bool] =
## Deserialize signature from protobuf's message ``pb`` using field index
## ``field``.
##
## On success deserialized signature will be stored in ``value``.
var buffer: seq[byte]
var sig: Signature
let res = ? pb.getField(field, buffer)
if not(res):
let res = ?pb.getField(field, buffer)
if not (res):
ok(false)
else:
if sig.init(buffer):

View File

@@ -22,8 +22,7 @@ import stew/results
from stew/assign2 import assign
export results
const
Curve25519KeySize* = 32
const Curve25519KeySize* = 32
type
Curve25519* = object
@@ -35,12 +34,12 @@ proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key =
assert s.len == Curve25519KeySize
assign(result, s)
proc getBytes*(key: Curve25519Key): seq[byte] = @key
proc getBytes*(key: Curve25519Key): seq[byte] =
@key
proc byteswap(buf: var Curve25519Key) {.inline.} =
for i in 0..<16:
let
x = buf[i]
for i in 0 ..< 16:
let x = buf[i]
buf[i] = buf[31 - i]
buf[31 - i] = x
@@ -48,31 +47,25 @@ proc mul*(_: type[Curve25519], point: var Curve25519Key, multiplier: Curve25519K
let defaultBrEc = ecGetDefault()
# multiplier needs to be big-endian
var
multiplierBs = multiplier
var multiplierBs = multiplier
multiplierBs.byteswap()
let
res = defaultBrEc.mul(
addr point[0],
Curve25519KeySize,
addr multiplierBs[0],
Curve25519KeySize,
EC_curve25519)
let res = defaultBrEc.mul(
addr point[0],
Curve25519KeySize,
addr multiplierBs[0],
Curve25519KeySize,
EC_curve25519,
)
assert res == 1
proc mulgen(_: type[Curve25519], dst: var Curve25519Key, point: Curve25519Key) =
let defaultBrEc = ecGetDefault()
var
rpoint = point
var rpoint = point
rpoint.byteswap()
let
size = defaultBrEc.mulgen(
addr dst[0],
addr rpoint[0],
Curve25519KeySize,
EC_curve25519)
let size =
defaultBrEc.mulgen(addr dst[0], addr rpoint[0], Curve25519KeySize, EC_curve25519)
assert size == Curve25519KeySize
@@ -82,8 +75,7 @@ proc public*(private: Curve25519Key): Curve25519Key =
proc random*(_: type[Curve25519Key], rng: var HmacDrbgContext): Curve25519Key =
var res: Curve25519Key
let defaultBrEc = ecGetDefault()
let len = ecKeygen(
addr rng.vtable, defaultBrEc, nil, addr res[0], EC_curve25519)
let len = ecKeygen(addr rng.vtable, defaultBrEc, nil, addr res[0], EC_curve25519)
# Per bearssl documentation, the keygen only fails if the curve is
# unrecognised -
doAssert len == Curve25519KeySize, "Could not generate curve"

View File

@@ -58,23 +58,22 @@ type
buffer*: seq[byte]
EcCurveKind* = enum
Secp256r1 = EC_secp256r1,
Secp384r1 = EC_secp384r1,
Secp256r1 = EC_secp256r1
Secp384r1 = EC_secp384r1
Secp521r1 = EC_secp521r1
EcPKI* = EcPrivateKey | EcPublicKey | EcSignature
EcError* = enum
EcRngError,
EcKeyGenError,
EcPublicKeyError,
EcKeyIncorrectError,
EcRngError
EcKeyGenError
EcPublicKeyError
EcKeyIncorrectError
EcSignatureError
EcResult*[T] = Result[T, EcError]
const
EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
const EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
proc `-`(x: uint32): uint32 {.inline.} =
result = (0xFFFF_FFFF'u32 - x) + 1'u32
@@ -88,7 +87,7 @@ proc CMP(x, y: uint32): int32 {.inline.} =
proc EQ0(x: int32): uint32 {.inline.} =
var q = cast[uint32](x)
result = not(q or -q) shr 31
result = not (q or -q) shr 31
proc NEQ(x, y: uint32): uint32 {.inline.} =
var q = cast[uint32](x xor y)
@@ -113,7 +112,7 @@ proc checkScalar(scalar: openArray[byte], curve: cint): uint32 =
for u in scalar:
z = z or u
if len(scalar) == int(orderlen):
for i in 0..<len(scalar):
for i in 0 ..< len(scalar):
c = c or (-(cast[int32](EQ0(c))) and CMP(scalar[i], order[i]))
else:
c = -1
@@ -126,8 +125,7 @@ proc checkPublic(key: openArray[byte], curve: cint): uint32 =
var impl = ecGetDefault()
var orderlen: uint = 0
discard impl.order(curve, orderlen)
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)),
addr x[0], uint(len(x)), curve)
result = impl.mul(unsafeAddr ckey[0], uint(len(ckey)), addr x[0], uint(len(x)), curve)
proc getOffset(pubkey: EcPublicKey): int {.inline.} =
let o = cast[uint](pubkey.key.q) - cast[uint](unsafeAddr pubkey.buffer[0])
@@ -145,21 +143,15 @@ proc getOffset(seckey: EcPrivateKey): int {.inline.} =
template getPublicKeyLength*(curve: EcCurveKind): int =
case curve
of Secp256r1:
PubKey256Length
of Secp384r1:
PubKey384Length
of Secp521r1:
PubKey521Length
of Secp256r1: PubKey256Length
of Secp384r1: PubKey384Length
of Secp521r1: PubKey521Length
template getPrivateKeyLength*(curve: EcCurveKind): int =
case curve
of Secp256r1:
SecKey256Length
of Secp384r1:
SecKey384Length
of Secp521r1:
SecKey521Length
of Secp256r1: SecKey256Length
of Secp384r1: SecKey384Length
of Secp521r1: SecKey521Length
proc copy*[T: EcPKI](dst: var T, src: T): bool =
## Copy EC `private key`, `public key` or `signature` ``src`` to ``dst``.
@@ -201,7 +193,7 @@ proc copy*[T: EcPKI](src: T): T {.inline.} =
if not copy(result, src):
raise newException(EcKeyIncorrectError, "Incorrect key or signature")
proc clear*[T: EcPKI|EcKeyPair](pki: var T) =
proc clear*[T: EcPKI | EcKeyPair](pki: var T) =
## Wipe and clear EC `private key`, `public key` or `signature` object.
doAssert(not isNil(pki))
when T is EcPrivateKey:
@@ -232,8 +224,8 @@ proc clear*[T: EcPKI|EcKeyPair](pki: var T) =
pki.pubkey.key.curve = 0
proc random*(
T: typedesc[EcPrivateKey], kind: EcCurveKind,
rng: var HmacDrbgContext): EcResult[EcPrivateKey] =
T: typedesc[EcPrivateKey], kind: EcCurveKind, rng: var HmacDrbgContext
): EcResult[EcPrivateKey] =
## Generate new random EC private key using BearSSL's HMAC-SHA256-DRBG
## algorithm.
##
@@ -241,9 +233,9 @@ proc random*(
## secp521r1).
var ecimp = ecGetDefault()
var res = new EcPrivateKey
if ecKeygen(addr rng.vtable, ecimp,
addr res.key, addr res.buffer[0],
safeConvert[cint](kind)) == 0:
if ecKeygen(
addr rng.vtable, ecimp, addr res.key, addr res.buffer[0], safeConvert[cint](kind)
) == 0:
err(EcKeyGenError)
else:
ok(res)
@@ -257,8 +249,7 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
if seckey.key.curve in EcSupportedCurvesCint:
var res = new EcPublicKey
assert res.buffer.len > getPublicKeyLength(cast[EcCurveKind](seckey.key.curve))
if ecComputePub(ecimp, addr res.key,
addr res.buffer[0], unsafeAddr seckey.key) == 0:
if ecComputePub(ecimp, addr res.key, addr res.buffer[0], unsafeAddr seckey.key) == 0:
err(EcKeyIncorrectError)
else:
ok(res)
@@ -266,23 +257,23 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] =
err(EcKeyIncorrectError)
proc random*(
T: typedesc[EcKeyPair], kind: EcCurveKind,
rng: var HmacDrbgContext): EcResult[T] =
T: typedesc[EcKeyPair], kind: EcCurveKind, rng: var HmacDrbgContext
): EcResult[T] =
## Generate new random EC private and public keypair using BearSSL's
## HMAC-SHA256-DRBG algorithm.
##
## ``kind`` elliptic curve kind of your choice (secp256r1, secp384r1 or
## secp521r1).
let
seckey = ? EcPrivateKey.random(kind, rng)
pubkey = ? seckey.getPublicKey()
seckey = ?EcPrivateKey.random(kind, rng)
pubkey = ?seckey.getPublicKey()
key = EcKeyPair(seckey: seckey, pubkey: pubkey)
ok(key)
proc `$`*(seckey: EcPrivateKey): string =
## Return string representation of EC private key.
if isNil(seckey) or seckey.key.curve == 0 or seckey.key.xlen == 0 or
len(seckey.buffer) == 0:
len(seckey.buffer) == 0:
result = "Empty or uninitialized ECNIST key"
else:
if seckey.key.curve notin EcSupportedCurvesCint:
@@ -298,7 +289,7 @@ proc `$`*(seckey: EcPrivateKey): string =
proc `$`*(pubkey: EcPublicKey): string =
## Return string representation of EC public key.
if isNil(pubkey) or pubkey.key.curve == 0 or pubkey.key.qlen == 0 or
len(pubkey.buffer) == 0:
len(pubkey.buffer) == 0:
result = "Empty or uninitialized ECNIST key"
else:
if pubkey.key.curve notin EcSupportedCurvesCint:
@@ -371,7 +362,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var offset, length: int
var pubkey = ? seckey.getPublicKey()
var pubkey = ?seckey.getPublicKey()
var b = Asn1Buffer.init()
var p = Asn1Composite.init(Asn1Tag.Sequence)
var c0 = Asn1Composite.init(0)
@@ -387,16 +378,14 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
if offset < 0:
return err(EcKeyIncorrectError)
length = int(pubkey.key.qlen)
c1.write(Asn1Tag.BitString,
pubkey.buffer.toOpenArray(offset, offset + length - 1))
c1.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
c1.finish()
offset = seckey.getOffset()
if offset < 0:
return err(EcKeyIncorrectError)
length = int(seckey.key.xlen)
p.write(1'u64)
p.write(Asn1Tag.OctetString,
seckey.buffer.toOpenArray(offset, offset + length - 1))
p.write(Asn1Tag.OctetString, seckey.buffer.toOpenArray(offset, offset + length - 1))
p.write(c0)
p.write(c1)
p.finish()
@@ -410,7 +399,6 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
else:
err(EcKeyIncorrectError)
proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
## to ``data``.
@@ -436,8 +424,7 @@ proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
if offset < 0:
return err(EcKeyIncorrectError)
let length = int(pubkey.key.qlen)
p.write(Asn1Tag.BitString,
pubkey.buffer.toOpenArray(offset, offset + length - 1))
p.write(Asn1Tag.BitString, pubkey.buffer.toOpenArray(offset, offset + length - 1))
p.finish()
b.write(p)
b.finish()
@@ -467,9 +454,9 @@ proc getBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeq[byte]()
let length = ? seckey.toBytes(res)
let length = ?seckey.toBytes(res)
res.setLen(length)
discard ? seckey.toBytes(res)
discard ?seckey.toBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -480,9 +467,9 @@ proc getBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeq[byte]()
let length = ? pubkey.toBytes(res)
let length = ?pubkey.toBytes(res)
res.setLen(length)
discard ? pubkey.toBytes(res)
discard ?pubkey.toBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -492,9 +479,9 @@ proc getBytes*(sig: EcSignature): EcResult[seq[byte]] =
if isNil(sig):
return err(EcSignatureError)
var res = newSeq[byte]()
let length = ? sig.toBytes(res)
let length = ?sig.toBytes(res)
res.setLen(length)
discard ? sig.toBytes(res)
discard ?sig.toBytes(res)
ok(res)
proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
@@ -503,9 +490,9 @@ proc getRawBytes*(seckey: EcPrivateKey): EcResult[seq[byte]] =
return err(EcKeyIncorrectError)
if seckey.key.curve in EcSupportedCurvesCint:
var res = newSeq[byte]()
let length = ? seckey.toRawBytes(res)
let length = ?seckey.toRawBytes(res)
res.setLen(length)
discard ? seckey.toRawBytes(res)
discard ?seckey.toRawBytes(res)
ok(res)
else:
err(EcKeyIncorrectError)
@@ -516,9 +503,9 @@ proc getRawBytes*(pubkey: EcPublicKey): EcResult[seq[byte]] =
return err(EcKeyIncorrectError)
if pubkey.key.curve in EcSupportedCurvesCint:
var res = newSeq[byte]()
let length = ? pubkey.toRawBytes(res)
let length = ?pubkey.toRawBytes(res)
res.setLen(length)
discard ? pubkey.toRawBytes(res)
discard ?pubkey.toRawBytes(res)
return ok(res)
else:
return err(EcKeyIncorrectError)
@@ -528,9 +515,9 @@ proc getRawBytes*(sig: EcSignature): EcResult[seq[byte]] =
if isNil(sig):
return err(EcSignatureError)
var res = newSeq[byte]()
let length = ? sig.toBytes(res)
let length = ?sig.toBytes(res)
res.setLen(length)
discard ? sig.toBytes(res)
discard ?sig.toBytes(res)
ok(res)
proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
@@ -550,8 +537,10 @@ proc `==`*(pubkey1, pubkey2: EcPublicKey): bool =
let op2 = pubkey2.getOffset()
if op1 == -1 or op2 == -1:
return false
return CT.isEqual(pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1))
return CT.isEqual(
pubkey1.buffer.toOpenArray(op1, pubkey1.key.qlen - 1),
pubkey2.buffer.toOpenArray(op2, pubkey2.key.qlen - 1),
)
proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
## Returns ``true`` if both keys ``seckey1`` and ``seckey2`` are equal.
@@ -570,8 +559,10 @@ proc `==`*(seckey1, seckey2: EcPrivateKey): bool =
let op2 = seckey2.getOffset()
if op1 == -1 or op2 == -1:
return false
return CT.isEqual(seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1))
return CT.isEqual(
seckey1.buffer.toOpenArray(op1, seckey1.key.xlen - 1),
seckey2.buffer.toOpenArray(op2, seckey2.key.xlen - 1),
)
proc `==`*(a, b: EcSignature): bool =
## Return ``true`` if both signatures ``sig1`` and ``sig2`` are equal.
@@ -605,26 +596,26 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error
var ab = Asn1Buffer.init(data)
field = ? ab.read()
field = ?ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ? ib.read()
field = ?ib.read()
if field.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
if field.vint != 1'u64:
return err(Asn1Error.Incorrect)
raw = ? ib.read()
raw = ?ib.read()
if raw.kind != Asn1Tag.OctetString:
return err(Asn1Error.Incorrect)
oid = ? ib.read()
oid = ?ib.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
@@ -658,19 +649,19 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
var ab = Asn1Buffer.init(data)
field = ? ab.read()
field = ?ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ? ib.read()
field = ?ib.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ob = field.getBuffer()
oid = ? ob.read()
oid = ?ob.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
@@ -678,7 +669,7 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
if oid != Asn1OidEcPublicKey:
return err(Asn1Error.Incorrect)
oid = ? ob.read()
oid = ?ob.read()
if oid.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
@@ -692,7 +683,7 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
else:
return err(Asn1Error.Incorrect)
raw = ? ib.read()
raw = ?ib.read()
if raw.kind != Asn1Tag.BitString:
return err(Asn1Error.Incorrect)
@@ -718,16 +709,14 @@ proc init*(sig: var EcSignature, data: openArray[byte]): Result[void, Asn1Error]
else:
err(Asn1Error.Incorrect)
proc init*[T: EcPKI](sospk: var T,
data: string): Result[void, Asn1Error] {.inline.} =
proc init*[T: EcPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
## Initialize EC `private key`, `public key` or `signature` ``sospk`` from
## ASN.1 DER hexadecimal string representation ``data``.
##
## Procedure returns ``Asn1Status``.
sospk.init(ncrutils.fromHex(data))
proc init*(t: typedesc[EcPrivateKey],
data: openArray[byte]): EcResult[EcPrivateKey] =
proc init*(t: typedesc[EcPrivateKey], data: openArray[byte]): EcResult[EcPrivateKey] =
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPrivateKey
@@ -737,8 +726,7 @@ proc init*(t: typedesc[EcPrivateKey],
else:
ok(key)
proc init*(t: typedesc[EcPublicKey],
data: openArray[byte]): EcResult[EcPublicKey] =
proc init*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPublicKey
@@ -748,8 +736,7 @@ proc init*(t: typedesc[EcPublicKey],
else:
ok(key)
proc init*(t: typedesc[EcSignature],
data: openArray[byte]): EcResult[EcSignature] =
proc init*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var sig: EcSignature
@@ -832,8 +819,7 @@ proc initRaw*(sig: var EcSignature, data: openArray[byte]): bool =
##
## Procedure returns ``true`` on success, ``false`` otherwise.
let length = len(data)
if (length == Sig256Length) or (length == Sig384Length) or
(length == Sig521Length):
if (length == Sig256Length) or (length == Sig384Length) or (length == Sig521Length):
result = true
if result:
sig = new EcSignature
@@ -846,8 +832,9 @@ proc initRaw*[T: EcPKI](sospk: var T, data: string): bool {.inline.} =
## Procedure returns ``true`` on success, ``false`` otherwise.
result = sospk.initRaw(ncrutils.fromHex(data))
proc initRaw*(t: typedesc[EcPrivateKey],
data: openArray[byte]): EcResult[EcPrivateKey] =
proc initRaw*(
t: typedesc[EcPrivateKey], data: openArray[byte]
): EcResult[EcPrivateKey] =
## Initialize EC private key from raw binary representation ``data`` and
## return constructed object.
var res: EcPrivateKey
@@ -856,8 +843,7 @@ proc initRaw*(t: typedesc[EcPrivateKey],
else:
ok(res)
proc initRaw*(t: typedesc[EcPublicKey],
data: openArray[byte]): EcResult[EcPublicKey] =
proc initRaw*(t: typedesc[EcPublicKey], data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from raw binary representation ``data`` and
## return constructed object.
var res: EcPublicKey
@@ -866,8 +852,7 @@ proc initRaw*(t: typedesc[EcPublicKey],
else:
ok(res)
proc initRaw*(t: typedesc[EcSignature],
data: openArray[byte]): EcResult[EcSignature] =
proc initRaw*(t: typedesc[EcSignature], data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var res: EcSignature
@@ -894,16 +879,19 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey =
let poffset = key.getOffset()
let soffset = sec.getOffset()
if poffset >= 0 and soffset >= 0:
let res = impl.mul(addr key.buffer[poffset],
key.key.qlen,
unsafeAddr sec.buffer[soffset],
sec.key.xlen,
key.key.curve)
let res = impl.mul(
addr key.buffer[poffset],
key.key.qlen,
unsafeAddr sec.buffer[soffset],
sec.key.xlen,
key.key.curve,
)
if res != 0:
result = key
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
data: var openArray[byte]): int =
proc toSecret*(
pubkey: EcPublicKey, seckey: EcPrivateKey, data: var openArray[byte]
): int =
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
## remote public key ``pubkey`` and local private key ``seckey`` and store
## shared secret to ``data``.
@@ -939,8 +927,9 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
result = newSeq[byte](res)
copyMem(addr result[0], addr data[0], res)
proc sign*[T: byte|char](seckey: EcPrivateKey,
message: openArray[T]): EcResult[EcSignature] {.gcsafe.} =
proc sign*[T: byte | char](
seckey: EcPrivateKey, message: openArray[T]
): EcResult[EcSignature] {.gcsafe.} =
## Get ECDSA signature of data ``message`` using private key ``seckey``.
if isNil(seckey):
return err(EcKeyIncorrectError)
@@ -957,8 +946,8 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
else:
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
let res = ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key,
addr sig.buffer[0])
let res =
ecdsaI31SignAsn1(impl, kv, addr hash[0], addr seckey.key, addr sig.buffer[0])
# Clear context with initial value
kv.init(addr hc.vtable)
if res != 0:
@@ -969,8 +958,9 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
else:
err(EcKeyIncorrectError)
proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
pubkey: EcPublicKey): bool {.inline.} =
proc verify*[T: byte | char](
sig: EcSignature, message: openArray[T], pubkey: EcPublicKey
): bool {.inline.} =
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.
##
@@ -988,30 +978,32 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
else:
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
let res = ecdsaI31VrfyAsn1(impl, addr hash[0], uint(len(hash)),
unsafeAddr pubkey.key,
addr sig.buffer[0], uint(len(sig.buffer)))
let res = ecdsaI31VrfyAsn1(
impl,
addr hash[0],
uint(len(hash)),
unsafeAddr pubkey.key,
addr sig.buffer[0],
uint(len(sig.buffer)),
)
# Clear context with initial value
kv.init(addr hc.vtable)
result = (res == 1)
type ECDHEScheme* = EcCurveKind
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): EcResult[EcKeyPair] =
proc ephemeral*(scheme: ECDHEScheme, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng)
keypair = ?EcKeyPair.random(Secp256r1, rng)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng)
keypair = ?EcKeyPair.random(Secp384r1, rng)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng)
keypair = ?EcKeyPair.random(Secp521r1, rng)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
proc ephemeral*(scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -16,18 +16,41 @@ import bearssl/[kdf, hash]
type HkdfResult*[len: static int] = array[len, byte]
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openArray[byte]; outputs: var openArray[HkdfResult[len]]) =
var
ctx: HkdfContext
proc hkdf*[T: sha256, len: static int](
_: type[T],
salt, ikm, info: openArray[byte],
outputs: var openArray[HkdfResult[len]],
) =
var ctx: HkdfContext
hkdfInit(
ctx, addr sha256Vtable,
if salt.len > 0: unsafeAddr salt[0] else: nil, csize_t(salt.len))
ctx,
addr sha256Vtable,
if salt.len > 0:
unsafeAddr salt[0]
else:
nil
,
csize_t(salt.len),
)
hkdfInject(
ctx, if ikm.len > 0: unsafeAddr ikm[0] else: nil, csize_t(ikm.len))
ctx,
if ikm.len > 0:
unsafeAddr ikm[0]
else:
nil
,
csize_t(ikm.len),
)
hkdfFlip(ctx)
for i in 0..outputs.high:
for i in 0 .. outputs.high:
discard hkdfProduce(
ctx,
if info.len > 0: unsafeAddr info[0]
else: nil, csize_t(info.len),
addr outputs[i][0], csize_t(outputs[i].len))
if info.len > 0:
unsafeAddr info[0]
else:
nil
,
csize_t(info.len),
addr outputs[i][0],
csize_t(outputs[i].len),
)

View File

@@ -19,35 +19,34 @@ import ../utility
type
Asn1Error* {.pure.} = enum
Overflow,
Incomplete,
Indefinite,
Incorrect,
NoSupport,
Overflow
Incomplete
Indefinite
Incorrect
NoSupport
Overrun
Asn1Result*[T] = Result[T, Asn1Error]
Asn1Class* {.pure.} = enum
Universal = 0x00,
Universal = 0x00
Application = 0x01
ContextSpecific = 0x02
Private = 0x03
Asn1Tag* {.pure.} = enum
## Protobuf's field types enum
NoSupport,
Boolean,
Integer,
BitString,
OctetString,
Null,
Oid,
Sequence,
NoSupport
Boolean
Integer
BitString
OctetString
Null
Oid
Sequence
Context
Asn1Buffer* = object of RootObj
## ASN.1's message representation object
Asn1Buffer* = object of RootObj ## ASN.1's message representation object
buffer*: seq[byte]
offset*: int
length*: int
@@ -73,37 +72,23 @@ type
idx*: int
const
Asn1OidSecp256r1* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8
]
Asn1OidSecp256r1* =
[0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x03'u8, 0x01'u8, 0x07'u8]
## Encoded OID for `secp256r1` curve (1.2.840.10045.3.1.7)
Asn1OidSecp384r1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8
]
Asn1OidSecp384r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x22'u8]
## Encoded OID for `secp384r1` curve (1.3.132.0.34)
Asn1OidSecp521r1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8
]
Asn1OidSecp521r1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x23'u8]
## Encoded OID for `secp521r1` curve (1.3.132.0.35)
Asn1OidSecp256k1* = [
0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8
]
Asn1OidSecp256k1* = [0x2B'u8, 0x81'u8, 0x04'u8, 0x00'u8, 0x0A'u8]
## Encoded OID for `secp256k1` curve (1.3.132.0.10)
Asn1OidEcPublicKey* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8
]
Asn1OidEcPublicKey* = [0x2A'u8, 0x86'u8, 0x48'u8, 0xCE'u8, 0x3D'u8, 0x02'u8, 0x01'u8]
## Encoded OID for Elliptic Curve Public Key (1.2.840.10045.2.1)
Asn1OidRsaEncryption* = [
0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8,
0x01'u8, 0x01'u8
]
Asn1OidRsaEncryption* =
[0x2A'u8, 0x86'u8, 0x48'u8, 0x86'u8, 0xF7'u8, 0x0D'u8, 0x01'u8, 0x01'u8, 0x01'u8]
## Encoded OID for RSA Encryption (1.2.840.113549.1.1.1)
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8]
## Encoded boolean ``TRUE``.
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8]
## Encoded boolean ``FALSE``.
Asn1Null* = [0x05'u8, 0x00'u8]
## Encoded ``NULL`` value.
Asn1True* = [0x01'u8, 0x01'u8, 0xFF'u8] ## Encoded boolean ``TRUE``.
Asn1False* = [0x01'u8, 0x01'u8, 0x00'u8] ## Encoded boolean ``FALSE``.
Asn1Null* = [0x05'u8, 0x00'u8] ## Encoded ``NULL`` value.
template toOpenArray*(ab: Asn1Buffer): untyped =
toOpenArray(ab.buffer, ab.offset, ab.buffer.high)
@@ -120,7 +105,7 @@ template isEmpty*(ab: Asn1Buffer): bool =
template isEnough*(ab: Asn1Buffer, length: int64): bool =
len(ab.buffer) >= ab.offset + length
proc len*[T: Asn1Buffer|Asn1Composite](abc: T): int {.inline.} =
proc len*[T: Asn1Buffer | Asn1Composite](abc: T): int {.inline.} =
len(abc.buffer) - abc.offset
proc len*(field: Asn1Field): int {.inline.} =
@@ -129,31 +114,22 @@ proc len*(field: Asn1Field): int {.inline.} =
template getPtr*(field: untyped): pointer =
cast[pointer](unsafeAddr field.buffer[field.offset])
proc extend*[T: Asn1Buffer|Asn1Composite](abc: var T, length: int) {.inline.} =
proc extend*[T: Asn1Buffer | Asn1Composite](abc: var T, length: int) {.inline.} =
## Extend buffer or composite's internal buffer by ``length`` octets.
abc.buffer.setLen(len(abc.buffer) + length)
proc code*(tag: Asn1Tag): byte {.inline.} =
## Converts Nim ``tag`` enum to ASN.1 tag code.
case tag:
of Asn1Tag.NoSupport:
0x00'u8
of Asn1Tag.Boolean:
0x01'u8
of Asn1Tag.Integer:
0x02'u8
of Asn1Tag.BitString:
0x03'u8
of Asn1Tag.OctetString:
0x04'u8
of Asn1Tag.Null:
0x05'u8
of Asn1Tag.Oid:
0x06'u8
of Asn1Tag.Sequence:
0x30'u8
of Asn1Tag.Context:
0xA0'u8
case tag
of Asn1Tag.NoSupport: 0x00'u8
of Asn1Tag.Boolean: 0x01'u8
of Asn1Tag.Integer: 0x02'u8
of Asn1Tag.BitString: 0x03'u8
of Asn1Tag.OctetString: 0x04'u8
of Asn1Tag.Null: 0x05'u8
of Asn1Tag.Oid: 0x06'u8
of Asn1Tag.Sequence: 0x30'u8
of Asn1Tag.Context: 0xA0'u8
proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
## Encode ASN.1 DER length part of TLV triple and return number of bytes
@@ -182,8 +158,7 @@ proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
# then 9, so it is safe to convert it to `int`.
int(res)
proc asn1EncodeInteger*(dest: var openArray[byte],
value: openArray[byte]): int =
proc asn1EncodeInteger*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
## and return number of bytes (octets) used.
##
@@ -193,17 +168,16 @@ proc asn1EncodeInteger*(dest: var openArray[byte],
var buffer: array[16, byte]
var lenlen = 0
let offset =
block:
var o = 0
for i in 0 ..< len(value):
if value[o] != 0x00:
break
inc(o)
if o < len(value):
o
else:
o - 1
let offset = block:
var o = 0
for i in 0 ..< len(value):
if value[o] != 0x00:
break
inc(o)
if o < len(value):
o
else:
o - 1
let destlen =
if len(value) > 0:
@@ -225,12 +199,10 @@ proc asn1EncodeInteger*(dest: var openArray[byte],
if value[offset] >= 0x80'u8:
dest[1 + lenlen] = 0x00'u8
shift = 2
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset],
len(value) - offset)
copyMem(addr dest[shift + lenlen], unsafeAddr value[offset], len(value) - offset)
destlen
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
## bytes (octets) used.
##
@@ -265,8 +237,7 @@ proc asn1EncodeNull*(dest: var openArray[byte]): int =
dest[1] = 0x00'u8
res
proc asn1EncodeOctetString*(dest: var openArray[byte],
value: openArray[byte]): int =
proc asn1EncodeOctetString*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
## bytes (octets) used.
##
@@ -283,8 +254,9 @@ proc asn1EncodeOctetString*(dest: var openArray[byte],
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeBitString*(dest: var openArray[byte],
value: openArray[byte], bits = 0): int =
proc asn1EncodeBitString*(
dest: var openArray[byte], value: openArray[byte], bits = 0
): int =
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
## (octets) used.
##
@@ -305,7 +277,7 @@ proc asn1EncodeBitString*(dest: var openArray[byte],
let bytelen = (bitlen + 7) shr 3
# Number of unused bits
let unused = (8 - (bitlen and 7)) and 7
let mask = not((1'u8 shl unused) - 1'u8)
let mask = not ((1'u8 shl unused) - 1'u8)
var lenlen = asn1EncodeLength(buffer, uint64(bytelen + 1))
let res = 1 + lenlen + 1 + len(value)
if len(dest) >= res:
@@ -319,8 +291,7 @@ proc asn1EncodeBitString*(dest: var openArray[byte],
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
@@ -361,8 +332,7 @@ proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeSequence*(dest: var openArray[byte],
value: openArray[byte]): int =
proc asn1EncodeSequence*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
## (octets) used.
##
@@ -378,8 +348,7 @@ proc asn1EncodeSequence*(dest: var openArray[byte],
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeComposite*(dest: var openArray[byte],
value: Asn1Composite): int =
proc asn1EncodeComposite*(dest: var openArray[byte], value: Asn1Composite): int =
## Encode composite value and return number of bytes (octets) used.
##
## If length of ``dest`` is less then number of required bytes to encode
@@ -391,12 +360,12 @@ proc asn1EncodeComposite*(dest: var openArray[byte],
if len(dest) >= res:
dest[0] = value.tag.code()
copyMem(addr dest[1], addr buffer[0], lenlen)
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0],
len(value.buffer))
copyMem(addr dest[1 + lenlen], unsafeAddr value.buffer[0], len(value.buffer))
res
proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
tag: int): int =
proc asn1EncodeContextTag*(
dest: var openArray[byte], value: openArray[byte], tag: int
): int =
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
## return number of bytes (octets) used.
##
@@ -432,7 +401,7 @@ proc getLength(ab: var Asn1Buffer): Asn1Result[int] =
return err(Asn1Error.Overflow)
if ab.isEnough(octets):
var lengthU: uint64 = 0
for i in 0..<octets:
for i in 0 ..< octets:
lengthU = (lengthU shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i + 1])
if lengthU > uint64(int64.high):
return err(Asn1Error.Overflow)
@@ -471,7 +440,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
inclass = false
while true:
offset = ab.offset
aclass = ? ab.getTag(tag)
aclass = ?ab.getTag(tag)
case aclass
of Asn1Class.ContextSpecific:
@@ -480,9 +449,9 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
else:
inclass = true
ttag = tag
tlength = ? ab.getLength()
tlength = ?ab.getLength()
of Asn1Class.Universal:
length = ? ab.getLength()
length = ?ab.getLength()
if inclass:
if length >= tlength:
@@ -499,22 +468,26 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
let b = ab.buffer[ab.offset]
if b != 0xFF'u8 and b != 0x00'u8:
return err(Asn1Error.Incorrect)
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
index: ttag, offset: ab.offset,
length: 1, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Boolean,
klass: aclass,
index: ttag,
offset: ab.offset,
length: 1,
buffer: ab.buffer,
)
field.vbool = (b == 0xFF'u8)
ab.offset += 1
return ok(field)
of Asn1Tag.Integer.code():
# INTEGER
if length == 0:
return err(Asn1Error.Incorrect)
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
return err(Asn1Error.Incomplete)
# Count number of leading zeroes
var zc = 0
@@ -526,9 +499,14 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if zc == 0:
# Negative or Positive integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
# Negative integer
if length <= 8:
@@ -538,54 +516,68 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field.vint = (field.vint shl 8) or 0xFF'u64
else:
let offset = ab.offset + i - (8 - length)
field.vint = (field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
else:
# Positive integer
if length <= 8:
for i in 0 ..< length:
field.vint = (field.vint shl 8) or
safeConvert[uint64](ab.buffer[ab.offset + i])
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
return ok(field)
else:
if length == 1:
# Zero value integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: ab.offset,
length: length, vint: 0'u64,
buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
vint: 0'u64,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
else:
# Positive integer with leading zero
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: ab.offset + 1,
length: length - 1, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Integer,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: length - 1,
buffer: ab.buffer,
)
if length <= 9:
for i in 1 ..< length:
field.vint = (field.vint shl 8) or
safeConvert[uint64](ab.buffer[ab.offset + i])
field.vint =
(field.vint shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
return ok(field)
of Asn1Tag.BitString.code():
# BIT STRING
if length == 0:
# BIT STRING should include `unused` bits field, so length should be
# bigger then 1.
return err(Asn1Error.Incorrect)
elif length == 1:
if ab.buffer[ab.offset] != 0x00'u8:
return err(Asn1Error.Incorrect)
else:
# Zero-length BIT STRING.
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: ab.offset + 1,
length: 0, ubits: 0, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.BitString,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: 0,
ubits: 0,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
else:
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
@@ -600,56 +592,77 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
## All unused bits should be set to `0`.
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: ab.offset + 1,
length: length - 1, ubits: safeConvert[int](unused),
buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.BitString,
klass: aclass,
index: ttag,
offset: ab.offset + 1,
length: length - 1,
ubits: safeConvert[int](unused),
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
of Asn1Tag.OctetString.code():
# OCTET STRING
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.OctetString,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
of Asn1Tag.Null.code():
# NULL
if length != 0:
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
offset: ab.offset, length: 0, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Null,
klass: aclass,
index: ttag,
offset: ab.offset,
length: 0,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
of Asn1Tag.Oid.code():
# OID
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Oid,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
of Asn1Tag.Sequence.code():
# SEQUENCE
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
field = Asn1Field(
kind: Asn1Tag.Sequence,
klass: aclass,
index: ttag,
offset: ab.offset,
length: length,
buffer: ab.buffer,
)
ab.offset += length
return ok(field)
else:
return err(Asn1Error.NoSupport)
@@ -672,9 +685,9 @@ proc `==`*(field: Asn1Field, data: openArray[byte]): bool =
if length > 0:
if field.length == len(data):
CT.isEqual(
field.buffer.toOpenArray(field.offset,
field.offset + field.length - 1),
data.toOpenArray(0, field.length - 1))
field.buffer.toOpenArray(field.offset, field.offset + field.length - 1),
data.toOpenArray(0, field.length - 1),
)
else:
false
else:
@@ -752,13 +765,14 @@ proc `$`*(field: Asn1Field): string =
res.add(ncrutils.toHex(field.toOpenArray()))
res
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag) =
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, tag: Asn1Tag) =
## Write empty value to buffer or composite with ``tag``.
##
## This procedure must be used to write `NULL`, `0` or empty `BIT STRING`,
## `OCTET STRING` types.
doAssert(tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString,
Asn1Tag.OctetString})
doAssert(
tag in {Asn1Tag.Null, Asn1Tag.Integer, Asn1Tag.BitString, Asn1Tag.OctetString}
)
var length: int
if tag == Asn1Tag.Null:
length = asn1EncodeNull(abc.toOpenArray())
@@ -780,22 +794,23 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag) =
discard asn1EncodeOctetString(abc.toOpenArray(), tmp.toOpenArray(0, -1))
abc.offset += length
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: uint64) =
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: uint64) =
## Write uint64 ``value`` to buffer or composite as ASN.1 `INTEGER`.
let length = asn1EncodeInteger(abc.toOpenArray(), value)
abc.extend(length)
discard asn1EncodeInteger(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: bool) =
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: bool) =
## Write bool ``value`` to buffer or composite as ASN.1 `BOOLEAN`.
let length = asn1EncodeBoolean(abc.toOpenArray(), value)
abc.extend(length)
discard asn1EncodeBoolean(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
value: openArray[byte], bits = 0) =
proc write*[T: Asn1Buffer | Asn1Composite](
abc: var T, tag: Asn1Tag, value: openArray[byte], bits = 0
) =
## Write array ``value`` using ``tag``.
##
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,
@@ -803,8 +818,9 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
##
## For `BIT STRING` you can use ``bits`` argument to specify number of used
## bits.
doAssert(tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString,
Asn1Tag.Oid})
doAssert(
tag in {Asn1Tag.Integer, Asn1Tag.OctetString, Asn1Tag.BitString, Asn1Tag.Oid}
)
var length: int
if tag == Asn1Tag.Integer:
length = asn1EncodeInteger(abc.toOpenArray(), value)
@@ -824,7 +840,7 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
discard asn1EncodeOid(abc.toOpenArray(), value)
abc.offset += length
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: Asn1Composite) =
proc write*[T: Asn1Buffer | Asn1Composite](abc: var T, value: Asn1Composite) =
doAssert(len(value) > 0, "Composite value not finished")
var length: int
if value.tag == Asn1Tag.Sequence:
@@ -841,6 +857,6 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: Asn1Composite) =
discard asn1EncodeContextTag(abc.toOpenArray(), value.buffer, value.idx)
abc.offset += length
proc finish*[T: Asn1Buffer|Asn1Composite](abc: var T) {.inline.} =
proc finish*[T: Asn1Buffer | Asn1Composite](abc: var T) {.inline.} =
## Finishes buffer or composite and prepares it for writing.
abc.offset = 0

View File

@@ -30,32 +30,17 @@ const
MinKeySize* = 2048
## Minimal allowed RSA key size in bits.
## https://github.com/libp2p/go-libp2p-core/blob/master/crypto/rsa_common.go#L13
DefaultKeySize* = 3072
## Default RSA key size in bits.
DefaultKeySize* = 3072 ## Default RSA key size in bits.
RsaOidSha1* = [
byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A
]
RsaOidSha1* = [byte 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A]
## RSA PKCS#1.5 SHA-1 hash object identifier.
RsaOidSha224* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x04
]
RsaOidSha224* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04]
## RSA PKCS#1.5 SHA-224 hash object identifier.
RsaOidSha256* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x01
]
RsaOidSha256* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
## RSA PKCS#1.5 SHA-256 hash object identifier.
RsaOidSha384* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x02
]
RsaOidSha384* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02]
## RSA PKCS#1.5 SHA-384 hash object identifier.
RsaOidSha512* = [
byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04,
0x02, 0x03
]
RsaOidSha512* = [byte 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
## RSA PKCS#1.5 SHA-512 hash object identifier.
type
@@ -79,9 +64,9 @@ type
RsaKP* = RsaPrivateKey | RsaKeyPair
RsaError* = enum
RsaGenError,
RsaKeyIncorrectError,
RsaSignatureError,
RsaGenError
RsaKeyIncorrectError
RsaSignatureError
RsaLowSecurityError
RsaResult*[T] = Result[T, RsaError]
@@ -109,15 +94,18 @@ template getArray*(bs, os, ls: untyped): untyped =
template trimZeroes(b: seq[byte], pt, ptlen: untyped) =
var length = ptlen
for i in 0..<length:
for i in 0 ..< length:
if pt[] != byte(0x00):
break
pt = cast[ptr byte](cast[uint](pt) + 1)
ptlen -= 1
proc random*[T: RsaKP](t: typedesc[T], rng: var HmacDrbgContext,
bits = DefaultKeySize,
pubexp = DefaultPublicExponent): RsaResult[T] =
proc random*[T: RsaKP](
t: typedesc[T],
rng: var HmacDrbgContext,
bits = DefaultKeySize,
pubexp = DefaultPublicExponent,
): RsaResult[T] =
## Generate new random RSA private key using BearSSL's HMAC-SHA256-DRBG
## algorithm.
##
@@ -139,10 +127,15 @@ proc random*[T: RsaKP](t: typedesc[T], rng: var HmacDrbgContext,
var keygen = rsaKeygenGetDefault()
if keygen(addr rng.vtable,
addr res.seck, addr res.buffer[sko],
addr res.pubk, addr res.buffer[pko],
cuint(bits), pubexp) == 0:
if keygen(
addr rng.vtable,
addr res.seck,
addr res.buffer[sko],
addr res.pubk,
addr res.buffer[pko],
cuint(bits),
pubexp,
) == 0:
return err(RsaGenError)
let
@@ -170,9 +163,10 @@ proc copy*[T: RsaPKI](key: T): T =
doAssert(not isNil(key))
when T is RsaPrivateKey:
if len(key.buffer) > 0:
let length = key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
key.pubk.elen.uint + key.pexplen.uint
let length =
key.seck.plen.uint + key.seck.qlen.uint + key.seck.dplen.uint +
key.seck.dqlen.uint + key.seck.iqlen.uint + key.pubk.nlen.uint +
key.pubk.elen.uint + key.pexplen.uint
result = new RsaPrivateKey
result.buffer = newSeq[byte](length)
let po: uint = 0
@@ -235,8 +229,7 @@ proc getPublicKey*(key: RsaPrivateKey): RsaPublicKey =
result.key.n = addr result.buffer[0]
result.key.e = addr result.buffer[key.pubk.nlen]
copyMem(addr result.buffer[0], cast[pointer](key.pubk.n), key.pubk.nlen)
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e),
key.pubk.elen)
copyMem(addr result.buffer[key.pubk.nlen], cast[pointer](key.pubk.e), key.pubk.elen)
result.key.nlen = key.pubk.nlen
result.key.elen = key.pubk.elen
@@ -248,7 +241,7 @@ proc pubkey*(pair: RsaKeyPair): RsaPublicKey {.inline.} =
## Get RSA public key from pair ``pair``.
result = cast[RsaPrivateKey](pair).getPublicKey()
proc clear*[T: RsaPKI|RsaKeyPair](pki: var T) =
proc clear*[T: RsaPKI | RsaKeyPair](pki: var T) =
## Wipe and clear EC private key, public key or scalar object.
doAssert(not isNil(pki))
when T is RsaPrivateKey:
@@ -292,21 +285,14 @@ proc toBytes*(key: RsaPrivateKey, data: var openArray[byte]): RsaResult[int] =
var b = Asn1Buffer.init()
var p = Asn1Composite.init(Asn1Tag.Sequence)
p.write(0'u64)
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n,
key.pubk.nlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e,
key.pubk.elen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.n, key.pubk.nlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pubk.e, key.pubk.elen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.pexp, key.pexplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p,
key.seck.plen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q,
key.seck.qlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp,
key.seck.dplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq,
key.seck.dqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq,
key.seck.iqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.p, key.seck.plen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.q, key.seck.qlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dp, key.seck.dplen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.dq, key.seck.dqlen))
p.write(Asn1Tag.Integer, getArray(key.buffer, key.seck.iq, key.seck.iqlen))
p.finish()
b.write(p)
b.finish()
@@ -371,7 +357,7 @@ proc getBytes*(key: RsaPrivateKey): RsaResult[seq[byte]] =
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeq[byte](4096)
let length = ? key.toBytes(res)
let length = ?key.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -384,7 +370,7 @@ proc getBytes*(key: RsaPublicKey): RsaResult[seq[byte]] =
if isNil(key):
return err(RsaKeyIncorrectError)
var res = newSeq[byte](4096)
let length = ? key.toBytes(res)
let length = ?key.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -396,7 +382,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
if isNil(sig):
return err(RsaSignatureError)
var res = newSeq[byte](4096)
let length = ? sig.toBytes(res)
let length = ?sig.toBytes(res)
if length > 0:
res.setLen(length)
ok(res)
@@ -408,20 +394,19 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
## ``data``.
##
## Procedure returns ``Asn1Status``.
var
field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
var field, rawn, rawpube, rawprie, rawp, rawq, rawdp, rawdq, rawiq: Asn1Field
# Asn1Field is not trivial so avoid too much Result
var ab = Asn1Buffer.init(data)
field = ? ab.read()
field = ?ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ? ib.read()
field = ?ib.read()
if field.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
@@ -429,48 +414,48 @@ proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Erro
if field.vint != 0'u64:
return err(Asn1Error.Incorrect)
rawn = ? ib.read()
rawn = ?ib.read()
if rawn.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawpube = ? ib.read()
rawpube = ?ib.read()
if rawpube.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawprie = ? ib.read()
rawprie = ?ib.read()
if rawprie.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawp = ? ib.read()
rawp = ?ib.read()
if rawp.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawq = ? ib.read()
rawq = ?ib.read()
if rawq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawdp = ? ib.read()
rawdp = ?ib.read()
if rawdp.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawdq = ? ib.read()
rawdq = ?ib.read()
if rawdq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawiq = ? ib.read()
rawiq = ?ib.read()
if rawiq.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
if len(rawn) >= (MinKeySize shr 3) and len(rawp) > 0 and len(rawq) > 0 and
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
len(rawdp) > 0 and len(rawdq) > 0 and len(rawiq) > 0:
key = new RsaPrivateKey
key.buffer = @data
key.pubk.n = addr key.buffer[rawn.offset]
@@ -502,52 +487,52 @@ proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error
var field, rawn, rawe: Asn1Field
var ab = Asn1Buffer.init(data)
field = ? ab.read()
field = ?ab.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ib = field.getBuffer()
field = ? ib.read()
field = ?ib.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var ob = field.getBuffer()
field = ? ob.read()
field = ?ob.read()
if field.kind != Asn1Tag.Oid:
return err(Asn1Error.Incorrect)
elif field != Asn1OidRsaEncryption:
return err(Asn1Error.Incorrect)
field = ? ob.read()
field = ?ob.read()
if field.kind != Asn1Tag.Null:
return err(Asn1Error.Incorrect)
field = ? ib.read()
field = ?ib.read()
if field.kind != Asn1Tag.BitString:
return err(Asn1Error.Incorrect)
var vb = field.getBuffer()
field = ? vb.read()
field = ?vb.read()
if field.kind != Asn1Tag.Sequence:
return err(Asn1Error.Incorrect)
var sb = field.getBuffer()
rawn = ? sb.read()
rawn = ?sb.read()
if rawn.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
rawe = ? sb.read()
rawe = ?sb.read()
if rawe.kind != Asn1Tag.Integer:
return err(Asn1Error.Incorrect)
@@ -575,16 +560,16 @@ proc init*(sig: var RsaSignature, data: openArray[byte]): Result[void, Asn1Error
else:
err(Asn1Error.Incorrect)
proc init*[T: RsaPKI](sospk: var T,
data: string): Result[void, Asn1Error] {.inline.} =
proc init*[T: RsaPKI](sospk: var T, data: string): Result[void, Asn1Error] {.inline.} =
## Initialize EC `private key`, `public key` or `scalar` ``sospk`` from
## hexadecimal string representation ``data``.
##
## Procedure returns ``Result[void, Asn1Status]``.
sospk.init(ncrutils.fromHex(data))
proc init*(t: typedesc[RsaPrivateKey],
data: openArray[byte]): RsaResult[RsaPrivateKey] =
proc init*(
t: typedesc[RsaPrivateKey], data: openArray[byte]
): RsaResult[RsaPrivateKey] =
## Initialize RSA private key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPrivateKey
@@ -593,8 +578,7 @@ proc init*(t: typedesc[RsaPrivateKey],
else:
ok(res)
proc init*(t: typedesc[RsaPublicKey],
data: openArray[byte]): RsaResult[RsaPublicKey] =
proc init*(t: typedesc[RsaPublicKey], data: openArray[byte]): RsaResult[RsaPublicKey] =
## Initialize RSA public key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPublicKey
@@ -603,8 +587,7 @@ proc init*(t: typedesc[RsaPublicKey],
else:
ok(res)
proc init*(t: typedesc[RsaSignature],
data: openArray[byte]): RsaResult[RsaSignature] =
proc init*(t: typedesc[RsaSignature], data: openArray[byte]): RsaResult[RsaSignature] =
## Initialize RSA signature from raw binary representation ``data`` and
## return constructed object.
var res: RsaSignature
@@ -631,14 +614,11 @@ proc `$`*(key: RsaPrivateKey): string =
result.add("\nq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.q, key.seck.qlen)))
result.add("\ndp = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp,
key.seck.dplen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dp, key.seck.dplen)))
result.add("\ndq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq,
key.seck.dqlen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.dq, key.seck.dqlen)))
result.add("\niq = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq,
key.seck.iqlen)))
result.add(ncrutils.toHex(getArray(key.buffer, key.seck.iq, key.seck.iqlen)))
result.add("\npre = ")
result.add(ncrutils.toHex(getArray(key.buffer, key.pexp, key.pexplen)))
result.add("\nm = ")
@@ -684,22 +664,37 @@ proc `==`*(a, b: RsaPrivateKey): bool =
else:
if a.seck.nBitlen == b.seck.nBitlen:
if a.seck.nBitlen > 0'u:
let r1 = CT.isEqual(getArray(a.buffer, a.seck.p, a.seck.plen),
getArray(b.buffer, b.seck.p, b.seck.plen))
let r2 = CT.isEqual(getArray(a.buffer, a.seck.q, a.seck.qlen),
getArray(b.buffer, b.seck.q, b.seck.qlen))
let r3 = CT.isEqual(getArray(a.buffer, a.seck.dp, a.seck.dplen),
getArray(b.buffer, b.seck.dp, b.seck.dplen))
let r4 = CT.isEqual(getArray(a.buffer, a.seck.dq, a.seck.dqlen),
getArray(b.buffer, b.seck.dq, b.seck.dqlen))
let r5 = CT.isEqual(getArray(a.buffer, a.seck.iq, a.seck.iqlen),
getArray(b.buffer, b.seck.iq, b.seck.iqlen))
let r6 = CT.isEqual(getArray(a.buffer, a.pexp, a.pexplen),
getArray(b.buffer, b.pexp, b.pexplen))
let r7 = CT.isEqual(getArray(a.buffer, a.pubk.n, a.pubk.nlen),
getArray(b.buffer, b.pubk.n, b.pubk.nlen))
let r8 = CT.isEqual(getArray(a.buffer, a.pubk.e, a.pubk.elen),
getArray(b.buffer, b.pubk.e, b.pubk.elen))
let r1 = CT.isEqual(
getArray(a.buffer, a.seck.p, a.seck.plen),
getArray(b.buffer, b.seck.p, b.seck.plen),
)
let r2 = CT.isEqual(
getArray(a.buffer, a.seck.q, a.seck.qlen),
getArray(b.buffer, b.seck.q, b.seck.qlen),
)
let r3 = CT.isEqual(
getArray(a.buffer, a.seck.dp, a.seck.dplen),
getArray(b.buffer, b.seck.dp, b.seck.dplen),
)
let r4 = CT.isEqual(
getArray(a.buffer, a.seck.dq, a.seck.dqlen),
getArray(b.buffer, b.seck.dq, b.seck.dqlen),
)
let r5 = CT.isEqual(
getArray(a.buffer, a.seck.iq, a.seck.iqlen),
getArray(b.buffer, b.seck.iq, b.seck.iqlen),
)
let r6 = CT.isEqual(
getArray(a.buffer, a.pexp, a.pexplen), getArray(b.buffer, b.pexp, b.pexplen)
)
let r7 = CT.isEqual(
getArray(a.buffer, a.pubk.n, a.pubk.nlen),
getArray(b.buffer, b.pubk.n, b.pubk.nlen),
)
let r8 = CT.isEqual(
getArray(a.buffer, a.pubk.e, a.pubk.elen),
getArray(b.buffer, b.pubk.e, b.pubk.elen),
)
r1 and r2 and r3 and r4 and r5 and r6 and r7 and r8
else:
true
@@ -737,14 +732,17 @@ proc `==`*(a, b: RsaPublicKey): bool =
elif isNil(b) and (not isNil(a)):
false
else:
let r1 = CT.isEqual(getArray(a.buffer, a.key.n, a.key.nlen),
getArray(b.buffer, b.key.n, b.key.nlen))
let r2 = CT.isEqual(getArray(a.buffer, a.key.e, a.key.elen),
getArray(b.buffer, b.key.e, b.key.elen))
let r1 = CT.isEqual(
getArray(a.buffer, a.key.n, a.key.nlen), getArray(b.buffer, b.key.n, b.key.nlen)
)
let r2 = CT.isEqual(
getArray(a.buffer, a.key.e, a.key.elen), getArray(b.buffer, b.key.e, b.key.elen)
)
(r1 and r2)
proc sign*[T: byte|char](key: RsaPrivateKey,
message: openArray[T]): RsaResult[RsaSignature] {.gcsafe.} =
proc sign*[T: byte | char](
key: RsaPrivateKey, message: openArray[T]
): RsaResult[RsaSignature] {.gcsafe.} =
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
## key ``key``.
if isNil(key):
@@ -763,16 +761,16 @@ proc sign*[T: byte|char](key: RsaPrivateKey,
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
var oid = RsaOidSha256
let implRes = impl(addr oid[0],
addr hash[0], uint(len(hash)),
addr key.seck, addr res.buffer[0])
let implRes =
impl(addr oid[0], addr hash[0], uint(len(hash)), addr key.seck, addr res.buffer[0])
if implRes == 0:
err(RsaSignatureError)
else:
ok(res)
proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
pubkey: RsaPublicKey): bool {.inline.} =
proc verify*[T: byte | char](
sig: RsaSignature, message: openArray[T], pubkey: RsaPublicKey
): bool {.inline.} =
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.
##
@@ -792,8 +790,13 @@ proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
kv.update(addr hc.vtable, nil, 0)
kv.out(addr hc.vtable, addr hash[0])
var oid = RsaOidSha256
let res = impl(addr sig.buffer[0], uint(len(sig.buffer)),
addr oid[0],
uint(len(check)), addr pubkey.key, addr check[0])
let res = impl(
addr sig.buffer[0],
uint(len(sig.buffer)),
addr oid[0],
uint(len(check)),
addr pubkey.key,
addr check[0],
)
if res == 1:
result = equalMem(addr check[0], addr hash[0], len(hash))

View File

@@ -10,20 +10,15 @@
{.push raises: [].}
import bearssl/rand
import
secp256k1,
stew/[byteutils, results],
nimcrypto/[hash, sha2]
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
export sha2, results, rand
const
SkRawPrivateKeySize* = 256 div 8
## Size of private key in octets (bytes)
SkRawPrivateKeySize* = 256 div 8 ## Size of private key in octets (bytes)
SkRawSignatureSize* = SkRawPrivateKeySize * 2 + 1
## Size of signature in octets (bytes)
SkRawPublicKeySize* = SkRawPrivateKeySize + 1
## Size of public key in octets (bytes)
SkRawPublicKeySize* = SkRawPrivateKeySize + 1 ## Size of public key in octets (bytes)
# This is extremely confusing but it's to avoid.. confusion between Eth standard and Secp standard
type
@@ -56,31 +51,31 @@ template pubkey*(v: SkKeyPair): SkPublicKey =
proc init*(key: var SkPrivateKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `private key` ``key`` from raw binary
## representation ``data``.
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
key = SkPrivateKey(?secp256k1.SkSecretKey.fromRaw(data))
ok()
proc init*(key: var SkPrivateKey, data: string): SkResult[void] =
## Initialize Secp256k1 `private key` ``key`` from hexadecimal string
## representation ``data``.
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
key = SkPrivateKey(?secp256k1.SkSecretKey.fromHex(data))
ok()
proc init*(key: var SkPublicKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `public key` ``key`` from raw binary
## representation ``data``.
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
key = SkPublicKey(?secp256k1.SkPublicKey.fromRaw(data))
ok()
proc init*(key: var SkPublicKey, data: string): SkResult[void] =
## Initialize Secp256k1 `public key` ``key`` from hexadecimal string
## representation ``data``.
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
key = SkPublicKey(?secp256k1.SkPublicKey.fromHex(data))
ok()
proc init*(sig: var SkSignature, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `signature` ``sig`` from raw binary
## representation ``data``.
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
sig = SkSignature(?secp256k1.SkSignature.fromDer(data))
ok()
proc init*(sig: var SkSignature, data: string): SkResult[void] =
@@ -151,7 +146,7 @@ proc toBytes*(key: SkPrivateKey, data: var openArray[byte]): SkResult[int] =
## Procedure returns number of bytes (octets) needed to store
## Secp256k1 private key.
if len(data) >= SkRawPrivateKeySize:
data[0..<SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
data[0 ..< SkRawPrivateKeySize] = SkSecretKey(key).toRaw()
ok(SkRawPrivateKeySize)
else:
err("secp: Not enough bytes")
@@ -163,7 +158,7 @@ proc toBytes*(key: SkPublicKey, data: var openArray[byte]): SkResult[int] =
## Procedure returns number of bytes (octets) needed to store
## Secp256k1 public key.
if len(data) >= SkRawPublicKeySize:
data[0..<SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
data[0 ..< SkRawPublicKeySize] = secp256k1.SkPublicKey(key).toRawCompressed()
ok(SkRawPublicKeySize)
else:
err("secp: Not enough bytes")
@@ -190,22 +185,28 @@ proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
let length = toBytes(sig, result)
result.setLen(length)
proc sign*[T: byte|char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
proc sign*[T: byte | char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
## Sign message `msg` using private key `key` and return signature object.
let h = sha256.digest(msg)
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
proc verify*[T: byte|char](sig: SkSignature, msg: openArray[T],
key: SkPublicKey): bool =
proc verify*[T: byte | char](
sig: SkSignature, msg: openArray[T], key: SkPublicKey
): bool =
let h = sha256.digest(msg)
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
func clear*(key: var SkPrivateKey) = clear(secp256k1.SkSecretKey(key))
func clear*(key: var SkPrivateKey) =
clear(secp256k1.SkSecretKey(key))
func `$`*(key: SkPrivateKey): string = $secp256k1.SkSecretKey(key)
func `$`*(key: SkPublicKey): string = $secp256k1.SkPublicKey(key)
func `$`*(key: SkSignature): string = $secp256k1.SkSignature(key)
func `$`*(key: SkKeyPair): string = $secp256k1.SkKeyPair(key)
func `$`*(key: SkPrivateKey): string =
$secp256k1.SkSecretKey(key)
func `$`*(key: SkPublicKey): string =
$secp256k1.SkPublicKey(key)
func `$`*(key: SkSignature): string =
$secp256k1.SkSignature(key)
func `$`*(key: SkKeyPair): string =
$secp256k1.SkKeyPair(key)
func `==`*(a, b: SkPrivateKey): bool =
secp256k1.SkSecretKey(a) == secp256k1.SkSecretKey(b)

View File

@@ -16,8 +16,7 @@ import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
import ../crypto/crypto, ../utility
export
peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
export peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
when not defined(windows):
import posix
@@ -32,83 +31,93 @@ const
type
IpfsLogLevel* {.pure.} = enum
Critical, Error, Warning, Notice, Info, Debug, Trace
Critical
Error
Warning
Notice
Info
Debug
Trace
RequestType* {.pure.} = enum
IDENTIFY = 0,
CONNECT = 1,
STREAM_OPEN = 2,
STREAM_HANDLER = 3,
DHT = 4,
LIST_PEERS = 5,
CONNMANAGER = 6,
IDENTIFY = 0
CONNECT = 1
STREAM_OPEN = 2
STREAM_HANDLER = 3
DHT = 4
LIST_PEERS = 5
CONNMANAGER = 6
DISCONNECT = 7
PUBSUB = 8
DHTRequestType* {.pure.} = enum
FIND_PEER = 0,
FIND_PEERS_CONNECTED_TO_PEER = 1,
FIND_PROVIDERS = 2,
GET_CLOSEST_PEERS = 3,
GET_PUBLIC_KEY = 4,
GET_VALUE = 5,
SEARCH_VALUE = 6,
PUT_VALUE = 7,
FIND_PEER = 0
FIND_PEERS_CONNECTED_TO_PEER = 1
FIND_PROVIDERS = 2
GET_CLOSEST_PEERS = 3
GET_PUBLIC_KEY = 4
GET_VALUE = 5
SEARCH_VALUE = 6
PUT_VALUE = 7
PROVIDE = 8
ConnManagerRequestType* {.pure.} = enum
TAG_PEER = 0,
UNTAG_PEER = 1,
TAG_PEER = 0
UNTAG_PEER = 1
TRIM = 2
PSRequestType* {.pure.} = enum
GET_TOPICS = 0,
LIST_PEERS = 1,
PUBLISH = 2,
GET_TOPICS = 0
LIST_PEERS = 1
PUBLISH = 2
SUBSCRIBE = 3
ResponseKind* = enum
Malformed,
Error,
Malformed
Error
Success
ResponseType* {.pure.} = enum
ERROR = 2,
STREAMINFO = 3,
IDENTITY = 4,
DHT = 5,
ERROR = 2
STREAMINFO = 3
IDENTITY = 4
DHT = 5
PEERINFO = 6
PUBSUB = 7
DHTResponseType* {.pure.} = enum
BEGIN = 0,
VALUE = 1,
BEGIN = 0
VALUE = 1
END = 2
MultiProtocol* = string
DHTValue* = seq[byte]
P2PStreamFlags* {.pure.} = enum
None, Closed, Inbound, Outbound
None
Closed
Inbound
Outbound
P2PDaemonFlags* = enum
DHTClient, ## Start daemon in DHT client mode
DHTFull, ## Start daemon with full DHT support
Bootstrap, ## Start daemon with bootstrap
WaitBootstrap, ## Start daemon with bootstrap and wait until daemon
## establish connection to at least 2 peers
PSFloodSub, ## Enable `FloodSub` protocol in daemon
PSGossipSub, ## Enable `GossipSub` protocol in daemon
PSNoSign, ## Disable pubsub message signing (default true)
PSStrictSign, ## Force strict checking pubsub message signature
NATPortMap, ## Force daemon to use NAT-PMP.
AutoNAT, ## Force daemon to use AutoNAT.
AutoRelay, ## Enables autorelay mode.
RelayActive, ## Enables active mode for relay.
RelayDiscovery,## Enables passive discovery for relay.
RelayHop, ## Enables hop for relay.
NoInlinePeerId,## Disable inlining of peer ID (not yet in #master).
NoProcessCtrl ## Process was not spawned.
DHTClient ## Start daemon in DHT client mode
DHTFull ## Start daemon with full DHT support
Bootstrap ## Start daemon with bootstrap
WaitBootstrap
## Start daemon with bootstrap and wait until daemon
## establish connection to at least 2 peers
PSFloodSub ## Enable `FloodSub` protocol in daemon
PSGossipSub ## Enable `GossipSub` protocol in daemon
PSNoSign ## Disable pubsub message signing (default true)
PSStrictSign ## Force strict checking pubsub message signature
NATPortMap ## Force daemon to use NAT-PMP.
AutoNAT ## Force daemon to use AutoNAT.
AutoRelay ## Enables autorelay mode.
RelayActive ## Enables active mode for relay.
RelayDiscovery ## Enables passive discovery for relay.
RelayHop ## Enables hop for relay.
NoInlinePeerId ## Disable inlining of peer ID (not yet in #master).
NoProcessCtrl ## Process was not spawned.
P2PStream* = ref object
flags*: set[P2PStreamFlags]
@@ -121,8 +130,7 @@ type
server*: StreamServer
address*: MultiAddress
DaemonAPI* = ref object
# pool*: TransportPool
DaemonAPI* = ref object # pool*: TransportPool
flags*: set[P2PDaemonFlags]
address*: MultiAddress
pattern*: string
@@ -149,11 +157,12 @@ type
signature*: Signature
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI,
stream: P2PStream): Future[void] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback* = proc(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.gcsafe, raises: [CatchableError].}
P2PStreamCallback* = proc(api: DaemonAPI, stream: P2PStream): Future[void] {.
gcsafe, raises: [CatchableError]
.}
P2PPubSubCallback* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.gcsafe, raises: [CatchableError].}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
@@ -161,7 +170,8 @@ type
var daemonsCount {.threadvar.}: int
chronicles.formatIt(PeerInfo): shortLog(it)
chronicles.formatIt(PeerInfo):
shortLog(it)
proc requestIdentity(): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
@@ -170,9 +180,9 @@ proc requestIdentity(): ProtoBuffer =
result.write(1, safeConvert[uint](RequestType.IDENTIFY))
result.finish()
proc requestConnect(peerid: PeerId,
addresses: openArray[MultiAddress],
timeout = 0): ProtoBuffer =
proc requestConnect(
peerid: PeerId, addresses: openArray[MultiAddress], timeout = 0
): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doConnect(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
@@ -196,9 +206,9 @@ proc requestDisconnect(peerid: PeerId): ProtoBuffer =
result.write(7, msg)
result.finish()
proc requestStreamOpen(peerid: PeerId,
protocols: openArray[string],
timeout = 0): ProtoBuffer =
proc requestStreamOpen(
peerid: PeerId, protocols: openArray[string], timeout = 0
): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doStreamOpen(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
@@ -212,8 +222,9 @@ proc requestStreamOpen(peerid: PeerId,
result.write(3, msg)
result.finish()
proc requestStreamHandler(address: MultiAddress,
protocols: openArray[MultiProtocol]): ProtoBuffer =
proc requestStreamHandler(
address: MultiAddress, protocols: openArray[MultiProtocol]
): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doStreamHandler(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
@@ -247,8 +258,7 @@ proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
timeout = 0): ProtoBuffer =
proc requestDHTFindPeersConnectedToPeer(peer: PeerId, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
let msgid = safeConvert[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
@@ -263,8 +273,7 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
result.write(5, msg)
result.finish()
proc requestDHTFindProviders(cid: Cid,
count: uint32, timeout = 0): ProtoBuffer =
proc requestDHTFindProviders(cid: Cid, count: uint32, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindProviders(req *pb.DHTRequest)`.
let msgid = safeConvert[uint](DHTRequestType.FIND_PROVIDERS)
@@ -340,8 +349,7 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestDHTPutValue(key: string, value: openArray[byte],
timeout = 0): ProtoBuffer =
proc requestDHTPutValue(key: string, value: openArray[byte], timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
let msgid = safeConvert[uint](DHTRequestType.PUT_VALUE)
@@ -484,7 +492,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
res: VarintResult[void]
var buffer = newSeq[byte](10)
try:
for i in 0..<len(buffer):
for i in 0 ..< len(buffer):
await conn.readExactly(addr buffer[i], 1)
res = PB.getUVarint(buffer.toOpenArray(0, i), length, size)
if res.isOk():
@@ -500,8 +508,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport]
{.raises: [LPError].} =
proc newConnection*(api: DaemonAPI): Future[StreamTransport] {.raises: [LPError].} =
result = connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
@@ -516,16 +523,18 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
result = false
when defined(windows):
proc getCurrentProcessId(): uint32 {.stdcall, dynlib: "kernel32",
importc: "GetCurrentProcessId".}
proc getCurrentProcessId(): uint32 {.
stdcall, dynlib: "kernel32", importc: "GetCurrentProcessId"
.}
proc getProcessId(): int =
result = cast[int](getCurrentProcessId())
else:
proc getProcessId(): int =
result = int(posix.getpid())
proc getSocket(pattern: string,
count: ptr int): Future[MultiAddress] {.async.} =
proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.} =
var sockname = ""
var pid = $getProcessId()
sockname = pattern % [pid, $(count[])]
@@ -561,21 +570,23 @@ proc copyEnv(): StringTableRef =
for key, val in envPairs():
result[key] = val
proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
bootstrapNodes: seq[string] = @[],
id: string = "",
hostAddresses: seq[MultiAddress] = @[],
announcedAddresses: seq[MultiAddress] = @[],
daemon = DefaultDaemonFile,
sockpath = "",
patternSock = "",
patternHandler = "",
poolSize = 10,
gossipsubHeartbeatInterval = 0,
gossipsubHeartbeatDelay = 0,
peersRequired = 2,
logFile = "",
logLevel = IpfsLogLevel.Debug): Future[DaemonAPI] {.async.} =
proc newDaemonApi*(
flags: set[P2PDaemonFlags] = {},
bootstrapNodes: seq[string] = @[],
id: string = "",
hostAddresses: seq[MultiAddress] = @[],
announcedAddresses: seq[MultiAddress] = @[],
daemon = DefaultDaemonFile,
sockpath = "",
patternSock = "",
patternHandler = "",
poolSize = 10,
gossipsubHeartbeatInterval = 0,
gossipsubHeartbeatDelay = 0,
peersRequired = 2,
logFile = "",
logLevel = IpfsLogLevel.Debug,
): Future[DaemonAPI] {.async.} =
## Initialize connection to `go-libp2p-daemon` control socket.
##
## ``flags`` - set of P2PDaemonFlags.
@@ -629,23 +640,15 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
var env: StringTableRef
when defined(windows):
var patternForSocket = if len(patternSock) > 0:
patternSock
else:
DefaultIpSocketPattern
var patternForChild = if len(patternHandler) > 0:
patternHandler
else:
DefaultIpChildPattern
var patternForSocket =
if len(patternSock) > 0: patternSock else: DefaultIpSocketPattern
var patternForChild =
if len(patternHandler) > 0: patternHandler else: DefaultIpChildPattern
else:
var patternForSocket = if len(patternSock) > 0:
patternSock
else:
DefaultUnixSocketPattern
var patternForChild = if len(patternHandler) > 0:
patternHandler
else:
DefaultUnixChildPattern
var patternForSocket =
if len(patternSock) > 0: patternSock else: DefaultUnixSocketPattern
var patternForChild =
if len(patternHandler) > 0: patternHandler else: DefaultUnixChildPattern
api.flags = flags
api.servers = newSeq[P2PServer]()
@@ -734,13 +737,15 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
if len(hostAddresses) > 0:
var opt = "-hostAddrs="
for i, address in hostAddresses:
if i > 0: opt.add ","
if i > 0:
opt.add ","
opt.add $address
args.add(opt)
if len(announcedAddresses) > 0:
var opt = "-announceAddrs="
for i, address in announcedAddresses:
if i > 0: opt.add ","
if i > 0:
opt.add ","
opt.add $address
args.add(opt)
args.add("-noise=true")
@@ -755,14 +760,12 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
# Starting daemon process
# echo "Starting ", cmd, " ", args.join(" ")
api.process =
exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
api.process = exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
# Waiting until daemon will not be bound to control socket.
while true:
if not api.process.running():
raise newException(DaemonLocalError,
"Daemon executable could not be started!")
raise newException(DaemonLocalError, "Daemon executable could not be started!")
let res = await socketExists(api.address)
if res:
break
@@ -822,8 +825,9 @@ template withMessage(m, body: untyped): untyped =
else:
body
proc transactMessage(transp: StreamTransport,
pb: ProtoBuffer): Future[ProtoBuffer] {.async.} =
proc transactMessage(
transp: StreamTransport, pb: ProtoBuffer
): Future[ProtoBuffer] {.async.} =
let length = pb.getLen()
let res = await transp.write(pb.getPtr(), length)
if res != length:
@@ -833,8 +837,7 @@ proc transactMessage(transp: StreamTransport,
raise newException(DaemonLocalError, "Incorrect or empty message received!")
result = initProtoBuffer(message)
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
{.raises: [DaemonLocalError].} =
proc getPeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
## Get PeerInfo object from ``pb``.
result.addresses = newSeq[MultiAddress]()
if pb.getRequiredField(1, result.peer).isErr():
@@ -847,7 +850,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
var transp = await api.newConnection()
try:
var pb = await transactMessage(transp, requestIdentity())
pb.withMessage() do:
pb.withMessage:
var res: seq[byte]
if pb.getRequiredField(ResponseType.IDENTITY.int, res).isOk():
var resPb = initProtoBuffer(res)
@@ -855,15 +858,14 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
finally:
await api.closeConnection(transp)
proc connect*(api: DaemonAPI, peer: PeerId,
addresses: seq[MultiAddress],
timeout = 0) {.async.} =
proc connect*(
api: DaemonAPI, peer: PeerId, addresses: seq[MultiAddress], timeout = 0
) {.async.} =
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestConnect(peer, addresses,
timeout))
pb.withMessage() do:
var pb = await transp.transactMessage(requestConnect(peer, addresses, timeout))
pb.withMessage:
discard
except CatchableError:
await api.closeConnection(transp)
@@ -873,22 +875,21 @@ proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDisconnect(peer))
pb.withMessage() do:
pb.withMessage:
discard
finally:
await api.closeConnection(transp)
proc openStream*(api: DaemonAPI, peer: PeerId,
protocols: seq[string],
timeout = 0): Future[P2PStream] {.async.} =
proc openStream*(
api: DaemonAPI, peer: PeerId, protocols: seq[string], timeout = 0
): Future[P2PStream] {.async.} =
## Open new stream to peer ``peer`` using one of the protocols in
## ``protocols``. Returns ``StreamTransport`` for the stream.
var transp = await api.newConnection()
var stream = new P2PStream
try:
var pb = await transp.transactMessage(requestStreamOpen(peer, protocols,
timeout))
pb.withMessage() do:
var pb = await transp.transactMessage(requestStreamOpen(peer, protocols, timeout))
pb.withMessage:
var res: seq[byte]
if pb.getRequiredField(ResponseType.STREAMINFO.int, res).isOk():
let resPb = initProtoBuffer(res)
@@ -924,8 +925,9 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
if not isNil(handler):
asyncSpawn handler(api, stream)
proc addHandler*(api: DaemonAPI, protocols: seq[string],
handler: P2PStreamCallback) {.async, raises: [LPError].} =
proc addHandler*(
api: DaemonAPI, protocols: seq[string], handler: P2PStreamCallback
) {.async, raises: [LPError].} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
@@ -934,9 +936,8 @@ proc addHandler*(api: DaemonAPI, protocols: seq[string],
for item in protocols:
api.handlers[item] = handler
server.start()
var pb = await transp.transactMessage(requestStreamHandler(maddress,
protocols))
pb.withMessage() do:
var pb = await transp.transactMessage(requestStreamHandler(maddress, protocols))
pb.withMessage:
api.servers.add(P2PServer(server: server, address: maddress))
except CatchableError as exc:
for item in protocols:
@@ -953,7 +954,7 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestListPeers())
pb.withMessage() do:
pb.withMessage:
result = newSeq[PeerInfo]()
var ress: seq[seq[byte]]
if pb.getRequiredRepeatedField(ResponseType.PEERINFO.int, ress).isOk():
@@ -963,13 +964,12 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
finally:
await api.closeConnection(transp)
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string,
weight: int) {.async.} =
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.} =
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestCMTagPeer(peer, tag, weight))
withMessage(pb) do:
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
@@ -979,7 +979,7 @@ proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestCMUntagPeer(peer, tag))
withMessage(pb) do:
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
@@ -989,37 +989,34 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestCMTrim())
withMessage(pb) do:
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
{.raises: [DaemonLocalError].} =
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(2, res).isOk():
result = initProtoBuffer(res).getPeerInfo()
else:
raise newException(DaemonLocalError, "Missing required field `peer`!")
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
{.raises: [DaemonLocalError].} =
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte] {.raises: [DaemonLocalError].} =
result = newSeq[byte]()
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
{.raises: [DaemonLocalError].} =
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey {.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
{.raises: [DaemonLocalError].} =
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId {.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
{.inline, raises: [DaemonLocalError].} =
proc enterDhtMessage(
pb: ProtoBuffer, rt: DHTResponseType
): ProtoBuffer {.inline, raises: [DaemonLocalError].} =
var dhtResponse: seq[byte]
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
var pbDhtResponse = initProtoBuffer(dhtResponse)
@@ -1037,16 +1034,18 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
else:
raise newException(DaemonLocalError, "Wrong message type!")
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
{.inline, raises: [DaemonLocalError].} =
proc enterPsMessage(
pb: ProtoBuffer
): ProtoBuffer {.inline, raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
raise newException(DaemonLocalError, "Wrong message type!")
initProtoBuffer(res)
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
{.inline, raises: [DaemonLocalError].} =
proc getDhtMessageType(
pb: ProtoBuffer
): DHTResponseType {.inline, raises: [DaemonLocalError].} =
var dtype: uint
if pb.getRequiredField(1, dtype).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
@@ -1057,8 +1056,9 @@ proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
else:
raise newException(DaemonLocalError, "Wrong DHT answer type!")
proc dhtFindPeer*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[PeerInfo] {.async.} =
proc dhtFindPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PeerInfo] {.async.} =
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1066,13 +1066,14 @@ proc dhtFindPeer*(api: DaemonAPI, peer: PeerId,
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDHTFindPeer(peer, timeout))
withMessage(pb) do:
withMessage(pb):
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSinglePeerInfo()
finally:
await api.closeConnection(transp)
proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[PublicKey] {.async.} =
proc dhtGetPublicKey*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PublicKey] {.async.} =
## Get peer's public key from peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1080,13 +1081,14 @@ proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerId,
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDHTGetPublicKey(peer, timeout))
withMessage(pb) do:
withMessage(pb):
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSinglePublicKey()
finally:
await api.closeConnection(transp)
proc dhtGetValue*(api: DaemonAPI, key: string,
timeout = 0): Future[seq[byte]] {.async.} =
proc dhtGetValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[byte]] {.async.} =
## Get value associated with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1094,22 +1096,22 @@ proc dhtGetValue*(api: DaemonAPI, key: string,
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDHTGetValue(key, timeout))
withMessage(pb) do:
withMessage(pb):
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSingleValue()
finally:
await api.closeConnection(transp)
proc dhtPutValue*(api: DaemonAPI, key: string, value: seq[byte],
timeout = 0) {.async.} =
proc dhtPutValue*(
api: DaemonAPI, key: string, value: seq[byte], timeout = 0
) {.async.} =
## Associate ``value`` with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
## means no timeout.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDHTPutValue(key, value,
timeout))
withMessage(pb) do:
var pb = await transp.transactMessage(requestDHTPutValue(key, value, timeout))
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
@@ -1122,13 +1124,14 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestDHTProvide(cid, timeout))
withMessage(pb) do:
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[seq[PeerInfo]] {.async.} =
proc dhtFindPeersConnectedToPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
## Find peers which are connected to peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1138,7 +1141,7 @@ proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerId,
try:
let spb = requestDHTFindPeersConnectedToPeer(peer, timeout)
var pb = await transp.transactMessage(spb)
withMessage(pb) do:
withMessage(pb):
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
while true:
var message = await transp.recvMessage()
@@ -1152,8 +1155,9 @@ proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerId,
finally:
await api.closeConnection(transp)
proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
timeout = 0): Future[seq[PeerId]] {.async.} =
proc dhtGetClosestPeers*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[PeerId]] {.async.} =
## Get closest peers for ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1163,7 +1167,7 @@ proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
try:
let spb = requestDHTGetClosestPeers(key, timeout)
var pb = await transp.transactMessage(spb)
withMessage(pb) do:
withMessage(pb):
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
while true:
var message = await transp.recvMessage()
@@ -1177,8 +1181,9 @@ proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
finally:
await api.closeConnection(transp)
proc dhtFindProviders*(api: DaemonAPI, cid: Cid, count: uint32,
timeout = 0): Future[seq[PeerInfo]] {.async.} =
proc dhtFindProviders*(
api: DaemonAPI, cid: Cid, count: uint32, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
## Get ``count`` providers for content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1188,7 +1193,7 @@ proc dhtFindProviders*(api: DaemonAPI, cid: Cid, count: uint32,
try:
let spb = requestDHTFindProviders(cid, count, timeout)
var pb = await transp.transactMessage(spb)
withMessage(pb) do:
withMessage(pb):
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
while true:
var message = await transp.recvMessage()
@@ -1202,8 +1207,9 @@ proc dhtFindProviders*(api: DaemonAPI, cid: Cid, count: uint32,
finally:
await api.closeConnection(transp)
proc dhtSearchValue*(api: DaemonAPI, key: string,
timeout = 0): Future[seq[seq[byte]]] {.async.} =
proc dhtSearchValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[seq[byte]]] {.async.} =
## Search for value with ``key``, return list of values found.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1212,7 +1218,7 @@ proc dhtSearchValue*(api: DaemonAPI, key: string,
var list = newSeq[seq[byte]]()
try:
var pb = await transp.transactMessage(requestDHTSearchValue(key, timeout))
withMessage(pb) do:
withMessage(pb):
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
while true:
var message = await transp.recvMessage()
@@ -1231,7 +1237,7 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestPSGetTopics())
withMessage(pb) do:
withMessage(pb):
let innerPb = pb.enterPsMessage()
var topics = newSeq[string]()
discard innerPb.getRepeatedField(1, topics)
@@ -1239,14 +1245,13 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
finally:
await api.closeConnection(transp)
proc pubsubListPeers*(api: DaemonAPI,
topic: string): Future[seq[PeerId]] {.async.} =
proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.async.} =
## Get list of peers we are connected to and which also subscribed to topic
## ``topic``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestPSListPeers(topic))
withMessage(pb) do:
withMessage(pb):
var peer: PeerId
let innerPb = pb.enterPsMessage()
var peers = newSeq[seq[byte]]()
@@ -1255,13 +1260,12 @@ proc pubsubListPeers*(api: DaemonAPI,
finally:
await api.closeConnection(transp)
proc pubsubPublish*(api: DaemonAPI, topic: string,
value: seq[byte]) {.async.} =
proc pubsubPublish*(api: DaemonAPI, topic: string, value: seq[byte]) {.async.} =
## Get list of peer identifiers which are subscribed to topic ``topic``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestPSPublish(topic, value))
withMessage(pb) do:
withMessage(pb):
discard
finally:
await api.closeConnection(transp)
@@ -1290,13 +1294,14 @@ proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
await ticket.transp.join()
break
proc pubsubSubscribe*(api: DaemonAPI, topic: string,
handler: P2PPubSubCallback): Future[PubsubTicket] {.async.} =
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.async.} =
## Subscribe to topic ``topic``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestPSSubscribe(topic))
pb.withMessage() do:
pb.withMessage:
var ticket = new PubsubTicket
ticket.topic = topic
ticket.handler = handler
@@ -1314,7 +1319,7 @@ proc shortLog*(pinfo: PeerInfo): string =
result.add($pinfo.peer.shortLog())
result.add("' Addresses: [")
let length = len(pinfo.addresses)
for i in 0..<length:
for i in 0 ..< length:
result.add("'")
result.add($pinfo.addresses[i])
result.add("'")

View File

@@ -12,23 +12,24 @@
## This module implements Pool of StreamTransport.
import chronos
const
DefaultPoolSize* = 8
## Default pool size
const DefaultPoolSize* = 8 ## Default pool size
type
ConnectionFlags = enum
None, Busy
None
Busy
PoolItem = object
transp*: StreamTransport
flags*: set[ConnectionFlags]
PoolState = enum
Connecting, Connected, Closing, Closed
Connecting
Connected
Closing
Closed
TransportPool* = ref object
## Transports pool object
TransportPool* = ref object ## Transports pool object
transports: seq[PoolItem]
busyCount: int
state: PoolState
@@ -45,13 +46,16 @@ proc waitAll[T](futs: seq[Future[T]]): Future[void] =
dec(counter)
if counter == 0:
retFuture.complete()
for fut in futs:
fut.addCallback(cb)
return retFuture
proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
@@ -59,12 +63,12 @@ proc newPool*(address: TransportAddress, poolsize: int = DefaultPoolSize,
pool.transports = newSeq[PoolItem](poolsize)
var conns = newSeq[Future[StreamTransport]](poolsize)
pool.state = Connecting
for i in 0..<poolsize:
for i in 0 ..< poolsize:
conns[i] = connect(address, bufferSize)
# Waiting for all connections to be established.
await waitAll(conns)
# Checking connections and preparing pool.
for i in 0..<poolsize:
for i in 0 ..< poolsize:
if conns[i].failed:
raise conns[i].error
else:
@@ -134,7 +138,7 @@ proc close*(pool: TransportPool) {.async.} =
await pool.join()
# Closing all transports
var pending = newSeq[Future[void]](len(pool.transports))
for i in 0..<len(pool.transports):
for i in 0 ..< len(pool.transports):
let transp = pool.transports[i].transp
transp.close()
pending[i] = transp.join()

View File

@@ -27,17 +27,24 @@
## 7. Message: required bytes
import os
import nimcrypto/utils, stew/endians2
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
multiaddress, peerid, varint, muxers/mplex/coder
import
protobuf/minprotobuf,
stream/connection,
protocols/secure/secure,
multiaddress,
peerid,
varint,
muxers/mplex/coder
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
NanosecondRange, initTime
from times import
getTime, toUnix, fromUnix, nanosecond, format, Time, NanosecondRange, initTime
from strutils import toHex, repeat
export peerid, multiaddress
type
FlowDirection* = enum
Outgoing, Incoming
Outgoing
Incoming
ProtoMessage* = object
timestamp*: uint64
@@ -48,11 +55,10 @@ type
local*: Opt[MultiAddress]
remote*: Opt[MultiAddress]
const
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
## default directory where all the dumps will be stored, if the path
## relative it will be created in home directory. You can overload this path
## using ``-d:libp2p_dump_dir=<otherpath>``.
const libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
## default directory where all the dumps will be stored, if the path
## relative it will be created in home directory. You can overload this path
## using ``-d:libp2p_dump_dir=<otherpath>``.
proc getTimestamp(): uint64 =
## This procedure is present because `stdlib.times` missing it.
@@ -65,8 +71,7 @@ proc getTimedate(value: uint64): string =
let time = initTime(int64(value div 1_000_000_000), value mod 1_000_000_000)
time.format("yyyy-MM-dd HH:mm:ss'.'fffzzz")
proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
data: openArray[byte]) =
proc dumpMessage*(conn: SecureConn, direction: FlowDirection, data: openArray[byte]) =
## Store unencrypted message ``data`` to dump file, all the metadata will be
## extracted from ``conn`` instance.
var pb = initProtoBuffer(options = {WithVarintLength})
@@ -87,7 +92,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
# This is debugging procedure so it should not generate any exceptions,
# and we going to return at every possible OS error.
if not(dirExists(dirName)):
if not (dirExists(dirName)):
try:
createDir(dirName)
except CatchableError:
@@ -153,13 +158,11 @@ iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
while offset < len(data):
value = 0
size = 0
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1),
size, value)
let res = PB.getUVarint(data.toOpenArray(offset, len(data) - 1), size, value)
if res.isOk():
if (value > 0'u64) and (value < uint64(len(data) - offset)):
offset += size
yield decodeDumpMessage(data.toOpenArray(offset,
offset + int(value) - 1))
yield decodeDumpMessage(data.toOpenArray(offset, offset + int(value) - 1))
# value is previously checked to be less then len(data) which is `int`.
offset += int(value)
else:
@@ -179,10 +182,15 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
for k in 0 ..< groupBy:
let ch = pbytes[offset + k]
ascii.add(if ord(ch) > 31 and ord(ch) < 127: char(ch) else: '.')
ascii.add(
if ord(ch) > 31 and ord(ch) < 127:
char(ch)
else:
'.'
)
let item =
case groupBy:
case groupBy
of 1:
toHex(pbytes[offset])
of 2:
@@ -204,8 +212,7 @@ proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
res.add("\p")
if (offset mod 16) != 0:
let spacesCount = ((16 - (offset mod 16)) div groupBy) *
(groupBy * 2 + 1) + 1
let spacesCount = ((16 - (offset mod 16)) div groupBy) * (groupBy * 2 + 1) + 1
res = res & repeat(' ', spacesCount)
res = res & ascii
@@ -233,25 +240,30 @@ proc toString*(msg: ProtoMessage, dump = true): string =
var res = getTimedate(msg.timestamp)
let direction =
case msg.direction
of Incoming:
" << "
of Outgoing:
" >> "
let address =
block:
let local = block:
msg.local.withValue(loc): "[" & $loc & "]"
else: "[LOCAL]"
let remote = block:
msg.remote.withValue(rem): "[" & $rem & "]"
else: "[REMOTE]"
local & direction & remote
of Incoming: " << "
of Outgoing: " >> "
let address = block:
let local = block:
msg.local.withValue(loc):
"[" & $loc & "]"
else:
"[LOCAL]"
let remote = block:
msg.remote.withValue(rem):
"[" & $rem & "]"
else:
"[REMOTE]"
local & direction & remote
let seqid = block:
msg.seqID.withValue(seqid): "seqID = " & $seqid & " "
else: ""
msg.seqID.withValue(seqid):
"seqID = " & $seqid & " "
else:
""
let mtype = block:
msg.mtype.withValue(typ): "type = " & $typ & " "
else: ""
msg.mtype.withValue(typ):
"type = " & $typ & " "
else:
""
res.add(" ")
res.add(address)
res.add(" ")

View File

@@ -11,22 +11,20 @@
import chronos
import stew/results
import peerid,
stream/connection,
transports/transport
import peerid, stream/connection, transports/transport
export results
type
Dial* = ref object of RootObj
type Dial* = ref object of RootObj
method connect*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out) {.async, base.} =
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##
@@ -34,18 +32,15 @@ method connect*(
doAssert(false, "Not implemented!")
method connect*(
self: Dial,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async, base.} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")
method dial*(
self: Dial,
peerId: PeerId,
protos: seq[string],
): Future[Connection] {.async, base.} =
self: Dial, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async, base.} =
## create a protocol stream over an
## existing connection
##
@@ -53,24 +48,22 @@ method dial*(
doAssert(false, "Not implemented!")
method dial*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false): Future[Connection] {.async, base.} =
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async, base.} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
doAssert(false, "Not implemented!")
method addTransport*(
self: Dial,
transport: Transport) {.base.} =
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
method tryDial*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async, base.} =
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")

View File

@@ -10,23 +10,22 @@
import std/tables
import stew/results
import pkg/[chronos,
chronicles,
metrics]
import pkg/[chronos, chronicles, metrics]
import dial,
peerid,
peerinfo,
peerstore,
multicodec,
muxers/muxer,
multistream,
connmanager,
stream/connection,
transports/transport,
nameresolving/nameresolver,
upgrademngrs/upgrade,
errors
import
dial,
peerid,
peerinfo,
peerstore,
multicodec,
muxers/muxer,
multistream,
connmanager,
stream/connection,
transports/transport,
nameresolving/nameresolver,
upgrademngrs/upgrade,
errors
export dial, errors, results
@@ -49,15 +48,14 @@ type
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
dir = Direction.Out):
Future[Muxer] {.async.} =
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
let dialed =
try:
@@ -88,7 +86,8 @@ proc dialAndUpgrade(
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Connection upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
debug "Connection upgrade failed",
err = exc.msg, peerId = peerId.get(default(PeerId))
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
@@ -103,13 +102,12 @@ proc dialAndUpgrade(
return nil
proc expandDnsAddr(
self: Dialer,
peerId: Opt[PeerId],
address: MultiAddress): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
if not DNSADDR.matchPartial(address): return @[(address, peerId)]
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
if not DNSADDR.matchPartial(address):
return @[(address, peerId)]
if isNil(self.nameResolver):
info "Can't resolve DNSADDR without NameResolver", ma=address
info "Can't resolve DNSADDR without NameResolver", ma = address
return @[]
let
@@ -126,17 +124,13 @@ proc expandDnsAddr(
let
peerIdBytes = lastPart.protoArgument().tryGet()
addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0..^2].tryGet(), Opt.some(addrPeerId)))
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
else:
result.add((resolvedAddress, peerId))
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
dir = Direction.Out):
Future[Muxer] {.async.} =
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.async.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
for rawAddress in addrs:
@@ -148,8 +142,10 @@ proc dialAndUpgrade(
let
hostname = expandedAddress.getHostname()
resolvedAddresses =
if isNil(self.nameResolver): @[expandedAddress]
else: await self.nameResolver.resolveMAddress(expandedAddress)
if isNil(self.nameResolver):
@[expandedAddress]
else:
await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
@@ -165,13 +161,13 @@ proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
return Opt.some(muxer)
proc internalConnect(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
dir = Direction.Out):
Future[Muxer] {.async.} =
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
dir = Direction.Out,
): Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -200,7 +196,7 @@ proc internalConnect(
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
except CatchableError as exc:
trace "Failed to finish outgoung upgrade", err=exc.msg
trace "Failed to finish outgoung upgrade", err = exc.msg
await muxed.close()
raise exc
@@ -210,12 +206,13 @@ proc internalConnect(
lock.release()
method connect*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out) {.async.} =
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -223,32 +220,30 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
discard
await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async.} =
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
parseFullAddress(address).toOpt().withValue(fullAddress):
return (await self.internalConnect(
Opt.some(fullAddress[0]),
@[fullAddress[1]],
false)).connection.peerId
return (
await self.internalConnect(Opt.some(fullAddress[0]), @[fullAddress[1]], false)
).connection.peerId
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
raise newException(
DialFailedError, "Address without PeerID and unknown peer id disabled!"
)
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).connection.peerId
return
(await self.internalConnect(Opt.none(PeerId), @[address], false)).connection.peerId
proc negotiateStream(
self: Dialer,
conn: Connection,
protos: seq[string]): Future[Connection] {.async.} =
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async.} =
trace "Negotiating stream", conn, protos
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
@@ -258,9 +253,8 @@ proc negotiateStream(
return conn
method tryDial*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async.} =
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async.} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.
@@ -279,9 +273,8 @@ method tryDial*(
raise newException(DialFailedError, exc.msg)
method dial*(
self: Dialer,
peerId: PeerId,
protos: seq[string]): Future[Connection] {.async.} =
self: Dialer, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async.} =
## create a protocol stream over an
## existing connection
##
@@ -294,11 +287,12 @@ method dial*(
return await self.negotiateStream(stream, protos)
method dial*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false): Future[Connection] {.async.} =
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async.} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
@@ -308,10 +302,10 @@ method dial*(
stream: Connection
proc cleanup() {.async.} =
if not(isNil(stream)):
if not (isNil(stream)):
await stream.closeWithEOF()
if not(isNil(conn)):
if not (isNil(conn)):
await conn.close()
try:
@@ -321,8 +315,7 @@ method dial*(
stream = await self.connManager.getStream(conn)
if isNil(stream):
raise newException(DialFailedError,
"Couldn't get muxed stream")
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
@@ -338,15 +331,17 @@ method addTransport*(self: Dialer, t: Transport) =
self.transports &= t
proc new*(
T: type Dialer,
localPeerId: PeerId,
connManager: ConnManager,
peerStore: PeerStore,
transports: seq[Transport],
nameResolver: NameResolver = nil): Dialer =
T(localPeerId: localPeerId,
T: type Dialer,
localPeerId: PeerId,
connManager: ConnManager,
peerStore: PeerStore,
transports: seq[Transport],
nameResolver: NameResolver = nil,
): Dialer =
T(
localPeerId: localPeerId,
connManager: connManager,
transports: transports,
peerStore: peerStore,
nameResolver: nameResolver)
nameResolver: nameResolver,
)

View File

@@ -33,12 +33,13 @@ proc ofType*[T](f: BaseAttr, _: type[T]): bool =
proc to*[T](f: BaseAttr, _: type[T]): T =
Attribute[T](f).value
proc add*[T](pa: var PeerAttributes,
value: T) =
pa.attributes.add(Attribute[T](
proc add*[T](pa: var PeerAttributes, value: T) =
pa.attributes.add(
Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
,
)
)
@@ -58,7 +59,8 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
Opt.none(T)
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
pa{T}.valueOr:
raise newException(KeyError, "Attritute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -101,7 +103,7 @@ type
proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
dm.interfaces &= di
di.onPeerFound = proc (pa: PeerAttributes) =
di.onPeerFound = proc(pa: PeerAttributes) =
for query in dm.queries:
if query.attr.match(pa):
try:
@@ -139,8 +141,10 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
proc forEachInternal(q: DiscoveryQuery) {.async.} =
while true:
let peer {.inject.} =
try: await q.getPeer()
except DiscoveryFinished: return
try:
await q.getPeer()
except DiscoveryFinished:
return
code
asyncSpawn forEachInternal(query)
@@ -148,13 +152,15 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
proc stop*(query: DiscoveryQuery) =
query.finished = true
for r in query.futs:
if not r.finished(): r.cancel()
if not r.finished():
r.cancel()
proc stop*(dm: DiscoveryManager) =
for q in dm.queries:
q.stop()
for i in dm.interfaces:
if isNil(i.advertiseLoop): continue
if isNil(i.advertiseLoop):
continue
i.advertiseLoop.cancel()
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =

View File

@@ -10,9 +10,7 @@
{.push raises: [].}
import chronos
import ./discoverymngr,
../protocols/rendezvous,
../peerid
import ./discoverymngr, ../protocols/rendezvous, ../peerid
type
RendezVousInterface* = ref object of DiscoveryInterface
@@ -70,9 +68,11 @@ method advertise*(self: RendezVousInterface) {.async.} =
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = 1.minutes,
ttl: Duration = MinimumDuration): RendezVousInterface =
proc new*(
T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = 1.minutes,
ttl: Duration = MinimumDuration,
): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)

View File

@@ -24,23 +24,25 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:
quote do:
quote:
for res in `futs`:
if res.failed:
let exc = res.readError()
# We still don't abort but warn
debug "A future has failed, enable trace logging for details", error = exc.name
trace "Exception message", msg= exc.msg, stack = getStackTrace()
debug "A future has failed, enable trace logging for details",
error = exc.name
trace "Exception message", msg = exc.msg, stack = getStackTrace()
else:
quote do:
quote:
for res in `futs`:
block check:
if res.failed:
let exc = res.readError()
for i in 0..<`nexclude`:
for i in 0 ..< `nexclude`:
if exc of `exclude`[i]:
trace "A future has failed", error=exc.name, msg=exc.msg
trace "A future has failed", error = exc.name, msg = exc.msg
break check
# We still don't abort but warn
debug "A future has failed, enable trace logging for details", error=exc.name
trace "Exception details", msg=exc.msg
debug "A future has failed, enable trace logging for details",
error = exc.name
trace "Exception details", msg = exc.msg

View File

@@ -15,8 +15,16 @@
import pkg/chronos, chronicles
import std/[nativesockets, net, hashes]
import tables, strutils, sets
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
protobuf/minprotobuf, errors, utility
import
multicodec,
multihash,
multibase,
transcoder,
vbuffer,
peerid,
protobuf/minprotobuf,
errors,
utility
import stew/[base58, base32, endians2, results]
export results, minprotobuf, vbuffer, utility
@@ -25,7 +33,11 @@ logScope:
type
MAKind* = enum
None, Fixed, Length, Path, Marker
None
Fixed
Length
Path
Marker
MAProtocol* = object
mcodec*: MultiCodec
@@ -37,7 +49,9 @@ type
data: VBuffer
MaPatternOp* = enum
Eq, Or, And
Eq
Or
And
MaPattern* = object
operator*: MaPatternOp
@@ -128,17 +142,11 @@ template pathStringToBuffer(s: string, vb: var VBuffer): bool =
template pathBufferToString(vb: var VBuffer, s: var string): bool =
s = ""
if (vb.readSeq(s) > 0) and (len(s) > 0):
true
else:
false
if (vb.readSeq(s) > 0) and (len(s) > 0): true else: false
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
s = ""
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1):
true
else:
false
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1): true else: false
template pathValidateBuffer(vb: var VBuffer): bool =
var s = ""
@@ -177,8 +185,7 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
## Port number bufferToString() implementation.
var port: array[2, byte]
if vb.readArray(port) == 2:
let nport =
(safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
let nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
s = $nport
result = true
@@ -238,8 +245,7 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[12, byte]
if vb.readArray(buf) == 12:
let nport =
(safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
let nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
s = Base32Lower.encode(buf.toOpenArray(0, 9))
s.add(":")
s.add($nport)
@@ -273,8 +279,7 @@ proc onion3BtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[37, byte]
if vb.readArray(buf) == 37:
var nport =
(safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
s = Base32Lower.encode(buf.toOpenArray(0, 34))
s.add(":")
s.add($nport)
@@ -326,155 +331,65 @@ proc mapAnd*(args: varargs[MaPattern]): MaPattern =
result.args = @args
const
TranscoderIP4* = Transcoder(
stringToBuffer: ip4StB,
bufferToString: ip4BtS,
validateBuffer: ip4VB
)
TranscoderIP6* = Transcoder(
stringToBuffer: ip6StB,
bufferToString: ip6BtS,
validateBuffer: ip6VB
)
TranscoderIP4* =
Transcoder(stringToBuffer: ip4StB, bufferToString: ip4BtS, validateBuffer: ip4VB)
TranscoderIP6* =
Transcoder(stringToBuffer: ip6StB, bufferToString: ip6BtS, validateBuffer: ip6VB)
TranscoderIP6Zone* = Transcoder(
stringToBuffer: ip6zoneStB,
bufferToString: ip6zoneBtS,
validateBuffer: ip6zoneVB
)
TranscoderUnix* = Transcoder(
stringToBuffer: unixStB,
bufferToString: unixBtS,
validateBuffer: unixVB
)
TranscoderP2P* = Transcoder(
stringToBuffer: p2pStB,
bufferToString: p2pBtS,
validateBuffer: p2pVB
)
TranscoderPort* = Transcoder(
stringToBuffer: portStB,
bufferToString: portBtS,
validateBuffer: portVB
stringToBuffer: ip6zoneStB, bufferToString: ip6zoneBtS, validateBuffer: ip6zoneVB
)
TranscoderUnix* =
Transcoder(stringToBuffer: unixStB, bufferToString: unixBtS, validateBuffer: unixVB)
TranscoderP2P* =
Transcoder(stringToBuffer: p2pStB, bufferToString: p2pBtS, validateBuffer: p2pVB)
TranscoderPort* =
Transcoder(stringToBuffer: portStB, bufferToString: portBtS, validateBuffer: portVB)
TranscoderOnion* = Transcoder(
stringToBuffer: onionStB,
bufferToString: onionBtS,
validateBuffer: onionVB
stringToBuffer: onionStB, bufferToString: onionBtS, validateBuffer: onionVB
)
TranscoderOnion3* = Transcoder(
stringToBuffer: onion3StB,
bufferToString: onion3BtS,
validateBuffer: onion3VB
)
TranscoderDNS* = Transcoder(
stringToBuffer: dnsStB,
bufferToString: dnsBtS,
validateBuffer: dnsVB
stringToBuffer: onion3StB, bufferToString: onion3BtS, validateBuffer: onion3VB
)
TranscoderDNS* =
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
ProtocolsList = [
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
MAProtocol(mcodec: multiCodec("udp"), kind: Fixed, size: 2, coder: TranscoderPort),
MAProtocol(mcodec: multiCodec("ip6"), kind: Fixed, size: 16, coder: TranscoderIP6),
MAProtocol(mcodec: multiCodec("dccp"), kind: Fixed, size: 2, coder: TranscoderPort),
MAProtocol(mcodec: multiCodec("sctp"), kind: Fixed, size: 2, coder: TranscoderPort),
MAProtocol(mcodec: multiCodec("udt"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("utp"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("http"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("https"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("quic"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("quic-v1"), kind: Marker, size: 0),
MAProtocol(
mcodec: multiCodec("ip4"), kind: Fixed, size: 4,
coder: TranscoderIP4
mcodec: multiCodec("ip6zone"), kind: Length, size: 0, coder: TranscoderIP6Zone
),
MAProtocol(
mcodec: multiCodec("tcp"), kind: Fixed, size: 2,
coder: TranscoderPort
mcodec: multiCodec("onion"), kind: Fixed, size: 10, coder: TranscoderOnion
),
MAProtocol(
mcodec: multiCodec("udp"), kind: Fixed, size: 2,
coder: TranscoderPort
mcodec: multiCodec("onion3"), kind: Fixed, size: 37, coder: TranscoderOnion3
),
MAProtocol(mcodec: multiCodec("ws"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("wss"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("tls"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("ipfs"), kind: Length, size: 0, coder: TranscoderP2P),
MAProtocol(mcodec: multiCodec("p2p"), kind: Length, size: 0, coder: TranscoderP2P),
MAProtocol(mcodec: multiCodec("unix"), kind: Path, size: 0, coder: TranscoderUnix),
MAProtocol(mcodec: multiCodec("dns"), kind: Length, size: 0, coder: TranscoderDNS),
MAProtocol(mcodec: multiCodec("dns4"), kind: Length, size: 0, coder: TranscoderDNS),
MAProtocol(mcodec: multiCodec("dns6"), kind: Length, size: 0, coder: TranscoderDNS),
MAProtocol(
mcodec: multiCodec("ip6"), kind: Fixed, size: 16,
coder: TranscoderIP6
mcodec: multiCodec("dnsaddr"), kind: Length, size: 0, coder: TranscoderDNS
),
MAProtocol(
mcodec: multiCodec("dccp"), kind: Fixed, size: 2,
coder: TranscoderPort
),
MAProtocol(
mcodec: multiCodec("sctp"), kind: Fixed, size: 2,
coder: TranscoderPort
),
MAProtocol(
mcodec: multiCodec("udt"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("utp"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("http"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("https"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone
),
MAProtocol(
mcodec: multiCodec("onion"), kind: Fixed, size: 10,
coder: TranscoderOnion
),
MAProtocol(
mcodec: multiCodec("onion3"), kind: Fixed, size: 37,
coder: TranscoderOnion3
),
MAProtocol(
mcodec: multiCodec("ws"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("wss"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("tls"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
coder: TranscoderP2P
),
MAProtocol(
mcodec: multiCodec("p2p"), kind: Length, size: 0,
coder: TranscoderP2P
),
MAProtocol(
mcodec: multiCodec("unix"), kind: Path, size: 0,
coder: TranscoderUnix
),
MAProtocol(
mcodec: multiCodec("dns"), kind: Length, size: 0,
coder: TranscoderDNS
),
MAProtocol(
mcodec: multiCodec("dns4"), kind: Length, size: 0,
coder: TranscoderDNS
),
MAProtocol(
mcodec: multiCodec("dns6"), kind: Length, size: 0,
coder: TranscoderDNS
),
MAProtocol(
mcodec: multiCodec("dnsaddr"), kind: Length, size: 0,
coder: TranscoderDNS
),
MAProtocol(
mcodec: multiCodec("p2p-circuit"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0
)
MAProtocol(mcodec: multiCodec("p2p-circuit"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
]
DNSANY* = mapEq("dns")
@@ -517,31 +432,24 @@ const
IPFS* = mapAnd(Reliable, P2PPattern)
HTTP* = mapOr(
mapAnd(TCP, mapEq("http")),
mapAnd(IP, mapEq("http")),
mapAnd(DNS, mapEq("http"))
mapAnd(TCP, mapEq("http")), mapAnd(IP, mapEq("http")), mapAnd(DNS, mapEq("http"))
)
HTTPS* = mapOr(
mapAnd(TCP, mapEq("https")),
mapAnd(IP, mapEq("https")),
mapAnd(DNS, mapEq("https"))
mapAnd(TCP, mapEq("https")), mapAnd(IP, mapEq("https")), mapAnd(DNS, mapEq("https"))
)
WebRTCDirect* = mapOr(
mapAnd(HTTP, mapEq("p2p-webrtc-direct")),
mapAnd(HTTPS, mapEq("p2p-webrtc-direct"))
mapAnd(HTTP, mapEq("p2p-webrtc-direct")), mapAnd(HTTPS, mapEq("p2p-webrtc-direct"))
)
CircuitRelay* = mapEq("p2p-circuit")
proc initMultiAddressCodeTable(): Table[MultiCodec,
MAProtocol] {.compileTime.} =
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
for item in ProtocolsList:
result[item.mcodec] = item
const
CodeAddresses = initMultiAddressCodeTable()
const CodeAddresses = initMultiAddressCodeTable()
proc trimRight(s: string, ch: char): string =
## Consume trailing characters ``ch`` from string ``s`` and return result.
@@ -551,7 +459,7 @@ proc trimRight(s: string, ch: char): string =
inc(m)
else:
break
result = s[0..(s.high - m)]
result = s[0 .. (s.high - m)]
proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
## Returns MultiAddress ``ma`` protocol code.
@@ -579,8 +487,7 @@ proc protoName*(ma: MultiAddress): MaResult[string] =
else:
ok($(proto.mcodec))
proc protoArgument*(ma: MultiAddress,
value: var openArray[byte]): MaResult[int] =
proc protoArgument*(ma: MultiAddress, value: var openArray[byte]): MaResult[int] =
## Returns MultiAddress ``ma`` protocol argument value.
##
## If current MultiAddress do not have argument value, then result will be
@@ -599,7 +506,7 @@ proc protoArgument*(ma: MultiAddress,
if proto.kind == Fixed:
res = proto.size
if len(value) >= res and
vb.data.readArray(value.toOpenArray(0, proto.size - 1)) != proto.size:
vb.data.readArray(value.toOpenArray(0, proto.size - 1)) != proto.size:
err("multiaddress: Decoding protocol error")
else:
ok(res)
@@ -620,7 +527,7 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
## If current MultiAddress do not have argument value, then result array will
## be empty.
var buffer = newSeq[byte](len(ma.data.buffer))
let res = ? protoArgument(ma, buffer)
let res = ?protoArgument(ma, buffer)
buffer.setLen(res)
ok(buffer)
@@ -639,7 +546,8 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
var res: MultiAddress
res.data = initVBuffer()
if index < 0: return err("multiaddress: negative index gived to getPart")
if index < 0:
return err("multiaddress: negative index gived to getPart")
while offset <= index:
if vb.data.readVarint(header) == -1:
@@ -648,7 +556,6 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
let proto = CodeAddresses.getOrDefault(MultiCodec(header))
if proto.kind == None:
return err("multiaddress: Unsupported protocol '" & $header & "'")
elif proto.kind == Fixed:
data.setLen(proto.size)
if vb.data.readArray(data) != proto.size:
@@ -673,26 +580,29 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
inc(offset)
ok(res)
proc getParts[U, V](ma: MultiAddress,
slice: HSlice[U, V]): MaResult[MultiAddress] =
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
let maLength = ? len(ma)
let maLength = ?len(ma)
template normalizeIndex(index): int =
when index is BackwardsIndex: maLength - int(index)
else: int(index)
when index is BackwardsIndex:
maLength - int(index)
else:
int(index)
let
indexStart = normalizeIndex(slice.a)
indexEnd = normalizeIndex(slice.b)
var res: MultiAddress
for i in indexStart..indexEnd:
? res.append(? ma[i])
for i in indexStart .. indexEnd:
?res.append(?ma[i])
ok(res)
proc `[]`*(ma: MultiAddress,
i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
proc `[]`*(
ma: MultiAddress, i: int | BackwardsIndex
): MaResult[MultiAddress] {.inline.} =
## Returns part with index ``i`` of MultiAddress ``ma``.
when i is BackwardsIndex:
let maLength = ? len(ma)
let maLength = ?len(ma)
ma.getPart(maLength - int(i))
else:
ma.getPart(i)
@@ -716,9 +626,7 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
let proto = CodeAddresses.getOrDefault(MultiCodec(header))
if proto.kind == None:
yield err(MaResult[MultiAddress], "Unsupported protocol '" &
$header & "'")
yield err(MaResult[MultiAddress], "Unsupported protocol '" & $header & "'")
elif proto.kind == Fixed:
data.setLen(proto.size)
if vb.data.readArray(data) != proto.size:
@@ -740,7 +648,8 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
proc len*(ma: MultiAddress): MaResult[int] =
var counter: int
for part in ma:
if part.isErr: return err(part.error)
if part.isErr:
return err(part.error)
counter.inc()
ok(counter)
@@ -753,8 +662,7 @@ proc contains*(ma: MultiAddress, codec: MultiCodec): MaResult[bool] {.inline.} =
return ok(true)
ok(false)
proc `[]`*(ma: MultiAddress,
codec: MultiCodec): MaResult[MultiAddress] {.inline.} =
proc `[]`*(ma: MultiAddress, codec: MultiCodec): MaResult[MultiAddress] {.inline.} =
## Returns partial MultiAddress with MultiCodec ``codec`` and present in
## MultiAddress ``ma``.
for item in ma.items:
@@ -779,13 +687,12 @@ proc toString*(value: MultiAddress): MaResult[string] =
return err("multiaddress: Unsupported protocol '" & $header & "'")
if proto.kind in {Fixed, Length, Path}:
if isNil(proto.coder.bufferToString):
return err("multiaddress: Missing protocol '" & $(proto.mcodec) &
"' coder")
return err("multiaddress: Missing protocol '" & $(proto.mcodec) & "' coder")
if not proto.coder.bufferToString(vb.data, part):
return err("multiaddress: Decoding protocol error")
parts.add($(proto.mcodec))
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
parts.add(part[1..^1])
parts.add(part[1 ..^ 1])
else:
parts.add(part)
elif proto.kind == Marker:
@@ -797,8 +704,10 @@ proc toString*(value: MultiAddress): MaResult[string] =
proc `$`*(value: MultiAddress): string =
## Return string representation of MultiAddress ``value``.
let s = value.toString()
if s.isErr: s.error
else: s[]
if s.isErr:
s.error
else:
s[]
proc protocols*(value: MultiAddress): MaResult[seq[MultiCodec]] =
## Returns list of protocol codecs inside of MultiAddress ``value``.
@@ -815,8 +724,9 @@ proc write*(vb: var VBuffer, ma: MultiAddress) {.inline.} =
## Write MultiAddress value ``ma`` to buffer ``vb``.
vb.writeArray(ma.data.buffer)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
ma: MultiAddress): string {.inline.} =
proc encode*(
mbtype: typedesc[MultiBase], encoding: string, ma: MultiAddress
): string {.inline.} =
## Get MultiBase encoded representation of ``ma`` using encoding
## ``encoding``.
result = MultiBase.encode(encoding, ma.data.buffer)
@@ -843,8 +753,8 @@ proc validate*(ma: MultiAddress): bool =
result = true
proc init*(
mtype: typedesc[MultiAddress], protocol: MultiCodec,
value: openArray[byte] = []): MaResult[MultiAddress] =
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: openArray[byte] = []
): MaResult[MultiAddress] =
## Initialize MultiAddress object from protocol id ``protocol`` and array
## of bytes ``value``.
let proto = CodeAddresses.getOrDefault(protocol)
@@ -874,19 +784,21 @@ proc init*(
of None:
raiseAssert "None checked above"
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
value: PeerId): MaResult[MultiAddress] {.inline.} =
proc init*(
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: PeerId
): MaResult[MultiAddress] {.inline.} =
## Initialize MultiAddress object from protocol id ``protocol`` and peer id
## ``value``.
init(mtype, protocol, value.data)
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
value: int): MaResult[MultiAddress] =
proc init*(
mtype: typedesc[MultiAddress], protocol: MultiCodec, value: int
): MaResult[MultiAddress] =
## Initialize MultiAddress object from protocol id ``protocol`` and integer
## ``value``. This procedure can be used to instantiate ``tcp``, ``udp``,
## ``dccp`` and ``sctp`` MultiAddresses.
var allowed = [multiCodec("tcp"), multiCodec("udp"), multiCodec("dccp"),
multiCodec("sctp")]
var allowed =
[multiCodec("tcp"), multiCodec("udp"), multiCodec("dccp"), multiCodec("sctp")]
if protocol notin allowed:
err("multiaddress: Incorrect protocol for integer value")
else:
@@ -906,8 +818,7 @@ proc getProtocol(name: string): MAProtocol {.inline.} =
if mc != InvalidMultiCodec:
result = CodeAddresses.getOrDefault(mc)
proc init*(mtype: typedesc[MultiAddress],
value: string): MaResult[MultiAddress] =
proc init*(mtype: typedesc[MultiAddress], value: string): MaResult[MultiAddress] =
## Initialize MultiAddress object from string representation ``value``.
if len(value) == 0 or value == "/":
return err("multiaddress: Address must not be empty!")
@@ -926,8 +837,7 @@ proc init*(mtype: typedesc[MultiAddress],
else:
if proto.kind in {Fixed, Length, Path}:
if isNil(proto.coder.stringToBuffer):
return err("multiaddress: Missing protocol '" &
part & "' transcoder")
return err("multiaddress: Missing protocol '" & part & "' transcoder")
if offset + 1 >= len(parts):
return err("multiaddress: Missing protocol '" & part & "' argument")
@@ -936,16 +846,15 @@ proc init*(mtype: typedesc[MultiAddress],
res.data.write(proto.mcodec)
let res = proto.coder.stringToBuffer(parts[offset + 1], res.data)
if not res:
return err("multiaddress: Error encoding `" & part & "/" &
parts[offset + 1] & "`")
return err(
"multiaddress: Error encoding `" & part & "/" & parts[offset + 1] & "`"
)
offset += 2
elif proto.kind == Path:
var path = "/" & (parts[(offset + 1)..^1].join("/"))
var path = "/" & (parts[(offset + 1) ..^ 1].join("/"))
res.data.write(proto.mcodec)
if not proto.coder.stringToBuffer(path, res.data):
return err("multiaddress: Error encoding `" & part & "/" &
path & "`")
return err("multiaddress: Error encoding `" & part & "/" & path & "`")
break
elif proto.kind == Marker:
@@ -954,8 +863,9 @@ proc init*(mtype: typedesc[MultiAddress],
res.data.finish()
ok(res)
proc init*(mtype: typedesc[MultiAddress],
data: openArray[byte]): MaResult[MultiAddress] =
proc init*(
mtype: typedesc[MultiAddress], data: openArray[byte]
): MaResult[MultiAddress] =
## Initialize MultiAddress with array of bytes ``data``.
if len(data) == 0:
err("multiaddress: Address must not be empty!")
@@ -973,38 +883,55 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
## Initialize empty MultiAddress.
result.data = initVBuffer()
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
protocol: IpTransportProtocol, port: Port): MultiAddress =
proc init*(
mtype: typedesc[MultiAddress],
address: IpAddress,
protocol: IpTransportProtocol,
port: Port,
): MultiAddress =
var res: MultiAddress
res.data = initVBuffer()
let
networkProto = case address.family
of IpAddressFamily.IPv4: getProtocol("ip4")
of IpAddressFamily.IPv6: getProtocol("ip6")
networkProto =
case address.family
of IpAddressFamily.IPv4:
getProtocol("ip4")
of IpAddressFamily.IPv6:
getProtocol("ip6")
transportProto = case protocol
of tcpProtocol: getProtocol("tcp")
of udpProtocol: getProtocol("udp")
transportProto =
case protocol
of tcpProtocol:
getProtocol("tcp")
of udpProtocol:
getProtocol("udp")
res.data.write(networkProto.mcodec)
case address.family
of IpAddressFamily.IPv4: res.data.writeArray(address.address_v4)
of IpAddressFamily.IPv6: res.data.writeArray(address.address_v6)
of IpAddressFamily.IPv4:
res.data.writeArray(address.address_v4)
of IpAddressFamily.IPv6:
res.data.writeArray(address.address_v6)
res.data.write(transportProto.mcodec)
res.data.writeArray(toBytesBE(uint16(port)))
res.data.finish()
res
proc init*(mtype: typedesc[MultiAddress], address: TransportAddress,
protocol = IPPROTO_TCP): MaResult[MultiAddress] =
proc init*(
mtype: typedesc[MultiAddress], address: TransportAddress, protocol = IPPROTO_TCP
): MaResult[MultiAddress] =
## Initialize MultiAddress using chronos.TransportAddress (IPv4/IPv6/Unix)
## and protocol information (UDP/TCP).
var res: MultiAddress
res.data = initVBuffer()
let protoProto = case protocol
of IPPROTO_TCP: getProtocol("tcp")
of IPPROTO_UDP: getProtocol("udp")
else: default(MAProtocol)
let protoProto =
case protocol
of IPPROTO_TCP:
getProtocol("tcp")
of IPPROTO_UDP:
getProtocol("udp")
else:
default(MAProtocol)
if protoProto.size == 0:
return err("multiaddress: protocol should be either TCP or UDP")
if address.family == AddressFamily.IPv4:
@@ -1043,8 +970,7 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
else:
ok()
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
raises: [LPError].} =
proc `&`*(m1, m2: MultiAddress): MultiAddress {.raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
##
## This procedure performs validation of concatenated result and can raise
@@ -1053,8 +979,7 @@ proc `&`*(m1, m2: MultiAddress): MultiAddress {.
concat(m1, m2).tryGet()
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
raises: [LPError].} =
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``.
##
## This procedure performs validation of concatenated result and can raise
@@ -1076,13 +1001,12 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
let res = a.matchPart(pcs)
if res.flag:
#Greedy Or
if result.flag == false or
result.rem.len > res.rem.len:
if result.flag == false or result.rem.len > res.rem.len:
result = res
elif pat.operator == And:
if len(pcs) < len(pat.args):
return MaPatResult(flag: false, rem: empty)
for i in 0..<len(pat.args):
for i in 0 ..< len(pat.args):
let res = pat.args[i].matchPart(pcs)
if not res.flag:
return MaPatResult(flag: false, rem: res.rem)
@@ -1092,20 +1016,22 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
if len(pcs) == 0:
return MaPatResult(flag: false, rem: empty)
if pcs[0] == pat.value:
return MaPatResult(flag: true, rem: pcs[1..^1])
return MaPatResult(flag: true, rem: pcs[1 ..^ 1])
result = MaPatResult(flag: false, rem: empty)
proc match*(pat: MaPattern, address: MultiAddress): bool =
## Match full ``address`` using pattern ``pat`` and return ``true`` if
## ``address`` satisfies pattern.
let protos = address.protocols().valueOr: return false
let protos = address.protocols().valueOr:
return false
let res = matchPart(pat, protos)
res.flag and (len(res.rem) == 0)
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
## Match prefix part of ``address`` using pattern ``pat`` and return
## ``true`` if ``address`` starts with pattern.
let protos = address.protocols().valueOr: return false
let protos = address.protocols().valueOr:
return false
let res = matchPart(pat, protos)
res.flag
@@ -1127,21 +1053,21 @@ proc bytes*(value: MultiAddress): seq[byte] =
proc write*(pb: var ProtoBuffer, field: int, value: MultiAddress) {.inline.} =
write(pb, field, value.data.buffer)
proc getField*(pb: ProtoBuffer, field: int,
value: var MultiAddress): ProtoResult[bool] {.
inline.} =
proc getField*(
pb: ProtoBuffer, field: int, value: var MultiAddress
): ProtoResult[bool] {.inline.} =
var buffer: seq[byte]
let res = ? pb.getField(field, buffer)
if not(res):
let res = ?pb.getField(field, buffer)
if not (res):
ok(false)
else:
value = MultiAddress.init(buffer).valueOr:
return err(ProtoError.IncorrectBlob)
ok(true)
proc getRepeatedField*(pb: ProtoBuffer, field: int,
value: var seq[MultiAddress]): ProtoResult[bool] {.
inline.} =
proc getRepeatedField*(
pb: ProtoBuffer, field: int, value: var seq[MultiAddress]
): ProtoResult[bool] {.inline.} =
## Read repeated field from protobuf message. ``field`` is field number.
## If the message is malformed, an error is returned. If field is not present
## in message, then ``ok(false)`` is returned and value is empty. If field is
@@ -1151,8 +1077,8 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
## returned and value contains the parsed values.
var items: seq[seq[byte]]
value.setLen(0)
let res = ? pb.getRepeatedField(field, items)
if not(res):
let res = ?pb.getRepeatedField(field, items)
if not (res):
ok(false)
else:
for item in items:

View File

@@ -20,7 +20,12 @@ import stew/[base32, base58, base64, results]
type
MultiBaseStatus* {.pure.} = enum
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
Error
Success
Overrun
Incorrect
BadCodec
NotSupported
MultiBase* = object
@@ -29,17 +34,18 @@ type
MBCodec = object
code: char
name: string
encr: proc(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
decr: proc(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
encr: proc(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
decr: proc(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
encl: MBCodeSize
decl: MBCodeSize
proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc idd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
@@ -49,9 +55,9 @@ proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
outlen = length
result = MultiBaseStatus.Success
proc ide(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc ide(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
@@ -61,31 +67,37 @@ proc ide(inbytes: openArray[byte],
outlen = length
result = MultiBaseStatus.Success
proc idel(length: int): int = length
proc iddl(length: int): int = length
proc idel(length: int): int =
length
proc b16d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc iddl(length: int): int =
length
proc b16d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
discard
proc b16e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b16e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
discard
proc b16ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b16ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
discard
proc b16ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b16ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
discard
proc b16el(length: int): int = length shl 1
proc b16dl(length: int): int = (length + 1) div 2
proc b16el(length: int): int =
length shl 1
proc b16dl(length: int): int =
(length + 1) div 2
proc b32ce(r: Base32Status): MultiBaseStatus {.inline.} =
result = MultiBaseStatus.Error
@@ -114,218 +126,253 @@ proc b64ce(r: Base64Status): MultiBaseStatus {.inline.} =
elif r == Base64Status.Success:
result = MultiBaseStatus.Success
proc b32hd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32hd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
proc b32he(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32he(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
proc b32hud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32hud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
proc b32hue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32hue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
proc b32hpd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32hpd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
proc b32hpe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32hpe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
proc b32hpud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32hpud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
proc b32hpue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32hpue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
proc b32d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
proc b32e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
proc b32ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
proc b32ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
proc b32pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32pd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
proc b32pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32pe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
proc b32pud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32pud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
proc b32pue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b32pue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
proc b32el(length: int): int = Base32Lower.encodedLength(length)
proc b32dl(length: int): int = Base32Lower.decodedLength(length)
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
proc b32el(length: int): int =
Base32Lower.encodedLength(length)
proc b58fd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b32dl(length: int): int =
Base32Lower.decodedLength(length)
proc b32pel(length: int): int =
Base32LowerPad.encodedLength(length)
proc b32pdl(length: int): int =
Base32LowerPad.decodedLength(length)
proc b58fd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
proc b58fe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b58fe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
proc b58bd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b58bd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
proc b58be(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b58be(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
proc b58el(length: int): int = Base58.encodedLength(length)
proc b58dl(length: int): int = Base58.decodedLength(length)
proc b58el(length: int): int =
Base58.encodedLength(length)
proc b64el(length: int): int = Base64.encodedLength(length)
proc b64dl(length: int): int = Base64.decodedLength(length)
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
proc b58dl(length: int): int =
Base58.decodedLength(length)
proc b64e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b64el(length: int): int =
Base64.encodedLength(length)
proc b64dl(length: int): int =
Base64.decodedLength(length)
proc b64pel(length: int): int =
Base64Pad.encodedLength(length)
proc b64pdl(length: int): int =
Base64Pad.decodedLength(length)
proc b64e(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
proc b64d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b64d(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
proc b64pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b64pe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
proc b64pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b64pd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
proc b64ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b64ue(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
proc b64ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b64ud(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
proc b64upe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc b64upe(
inbytes: openArray[byte], outbytes: var openArray[char], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
proc b64upd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
proc b64upd(
inbytes: openArray[char], outbytes: var openArray[byte], outlen: var int
): MultiBaseStatus =
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
const
MultiBaseCodecs = [
MBCodec(name: "identity", code: chr(0x00),
decr: idd, encr: ide, decl: iddl, encl: idel
),
MBCodec(name: "base1", code: '1'),
MBCodec(name: "base2", code: '0'),
MBCodec(name: "base8", code: '7'),
MBCodec(name: "base10", code: '9'),
MBCodec(name: "base16", code: 'f',
decr: b16d, encr: b16e, decl: b16dl, encl: b16el
),
MBCodec(name: "base16upper", code: 'F',
decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
),
MBCodec(name: "base32hex", code: 'v',
decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
),
MBCodec(name: "base32hexupper", code: 'V',
decr: b32hud, encr: b32hue, decl: b32dl, encl: b32el
),
MBCodec(name: "base32hexpad", code: 't',
decr: b32hpd, encr: b32hpe, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32hexpadupper", code: 'T',
decr: b32hpud, encr: b32hpue, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32", code: 'b',
decr: b32d, encr: b32e, decl: b32dl, encl: b32el
),
MBCodec(name: "base32upper", code: 'B',
decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
),
MBCodec(name: "base32pad", code: 'c',
decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32padupper", code: 'C',
decr: b32pud, encr: b32pue, decl: b32pdl, encl: b32pel
),
MBCodec(name: "base32z", code: 'h'),
MBCodec(name: "base58flickr", code: 'Z',
decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
),
MBCodec(name: "base58btc", code: 'z',
decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
),
MBCodec(name: "base64", code: 'm',
decr: b64d, encr: b64e, decl: b64dl, encl: b64el
),
MBCodec(name: "base64pad", code: 'M',
decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
),
MBCodec(name: "base64url", code: 'u',
decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
),
MBCodec(name: "base64urlpad", code: 'U',
decr: b64upd, encr: b64upe, decl: b64pdl, encl: b64pel
)
]
const MultiBaseCodecs = [
MBCodec(
name: "identity", code: chr(0x00), decr: idd, encr: ide, decl: iddl, encl: idel
),
MBCodec(name: "base1", code: '1'),
MBCodec(name: "base2", code: '0'),
MBCodec(name: "base8", code: '7'),
MBCodec(name: "base10", code: '9'),
MBCodec(name: "base16", code: 'f', decr: b16d, encr: b16e, decl: b16dl, encl: b16el),
MBCodec(
name: "base16upper", code: 'F', decr: b16ud, encr: b16ue, decl: b16dl, encl: b16el
),
MBCodec(
name: "base32hex", code: 'v', decr: b32hd, encr: b32he, decl: b32dl, encl: b32el
),
MBCodec(
name: "base32hexupper",
code: 'V',
decr: b32hud,
encr: b32hue,
decl: b32dl,
encl: b32el,
),
MBCodec(
name: "base32hexpad",
code: 't',
decr: b32hpd,
encr: b32hpe,
decl: b32pdl,
encl: b32pel,
),
MBCodec(
name: "base32hexpadupper",
code: 'T',
decr: b32hpud,
encr: b32hpue,
decl: b32pdl,
encl: b32pel,
),
MBCodec(name: "base32", code: 'b', decr: b32d, encr: b32e, decl: b32dl, encl: b32el),
MBCodec(
name: "base32upper", code: 'B', decr: b32ud, encr: b32ue, decl: b32dl, encl: b32el
),
MBCodec(
name: "base32pad", code: 'c', decr: b32pd, encr: b32pe, decl: b32pdl, encl: b32pel
),
MBCodec(
name: "base32padupper",
code: 'C',
decr: b32pud,
encr: b32pue,
decl: b32pdl,
encl: b32pel,
),
MBCodec(name: "base32z", code: 'h'),
MBCodec(
name: "base58flickr", code: 'Z', decr: b58fd, encr: b58fe, decl: b58dl, encl: b58el
),
MBCodec(
name: "base58btc", code: 'z', decr: b58bd, encr: b58be, decl: b58dl, encl: b58el
),
MBCodec(name: "base64", code: 'm', decr: b64d, encr: b64e, decl: b64dl, encl: b64el),
MBCodec(
name: "base64pad", code: 'M', decr: b64pd, encr: b64pe, decl: b64pdl, encl: b64pel
),
MBCodec(
name: "base64url", code: 'u', decr: b64ud, encr: b64ue, decl: b64dl, encl: b64el
),
MBCodec(
name: "base64urlpad",
code: 'U',
decr: b64upd,
encr: b64upe,
decl: b64pdl,
encl: b64pel,
),
]
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
for item in MultiBaseCodecs:
@@ -339,8 +386,7 @@ const
CodeMultiBases = initMultiBaseCodeTable()
NameMultiBases = initMultiBaseNameTable()
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
length: int): int =
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string, length: int): int =
## Return estimated size of buffer to store MultiBase encoded value with
## encoding ``encoding`` of length ``length``.
##
@@ -355,8 +401,7 @@ proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
else:
result = mb.encl(length) + 1
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
length: int): int =
proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char, length: int): int =
## Return estimated size of buffer to store MultiBase decoded value with
## encoding character ``encoding`` of length ``length``.
let mb = CodeMultiBases.getOrDefault(encoding)
@@ -368,9 +413,13 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
else:
result = mb.decl(length - 1)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openArray[byte], outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
proc encode*(
mbtype: typedesc[MultiBase],
encoding: string,
inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int,
): MultiBaseStatus =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## store encoded value to ``outbytes``.
##
@@ -392,8 +441,7 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
if isNil(mb.encr) or isNil(mb.encl):
return MultiBaseStatus.NotSupported
if len(outbytes) > 1:
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
outlen)
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high), outlen)
if result == MultiBaseStatus.Overrun:
outlen += 1
elif result == MultiBaseStatus.Success:
@@ -408,8 +456,12 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
result = MultiBaseStatus.Overrun
outlen = mb.encl(len(inbytes)) + 1
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
outbytes: var openArray[byte], outlen: var int): MultiBaseStatus =
proc decode*(
mbtype: typedesc[MultiBase],
inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int,
): MultiBaseStatus =
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
## to ``outbytes``.
##
@@ -438,8 +490,9 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
else:
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openArray[byte]): Result[string, string] =
proc encode*(
mbtype: typedesc[MultiBase], encoding: string, inbytes: openArray[byte]
): Result[string, string] =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## return encoded string.
let length = len(inbytes)
@@ -462,7 +515,9 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
buffer[0] = mb.code
ok(buffer)
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[byte], string] =
proc decode*(
mbtype: typedesc[MultiBase], inbytes: openArray[char]
): Result[seq[byte], string] =
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
## bytes.
let length = len(inbytes)
@@ -479,8 +534,7 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[
else:
var buffer = newSeq[byte](mb.decl(length - 1))
var outlen = 0
let res = mb.decr(inbytes.toOpenArray(1, length - 1),
buffer, outlen)
let res = mb.decr(inbytes.toOpenArray(1, length - 1), buffer, outlen)
if res != MultiBaseStatus.Success:
err("multibase: Decoding error [" & $res & "]")
else:

View File

@@ -49,132 +49,325 @@ const MultiCodecList = [
("keccak-384", 0x1C),
("keccak-512", 0x1D),
("murmur3", 0x22),
("blake2b-8", 0xB201), ("blake2b-16", 0xB202), ("blake2b-24", 0xB203),
("blake2b-32", 0xB204), ("blake2b-40", 0xB205), ("blake2b-48", 0xB206),
("blake2b-56", 0xB207), ("blake2b-64", 0xB208), ("blake2b-72", 0xB209),
("blake2b-80", 0xB20A), ("blake2b-88", 0xB20B), ("blake2b-96", 0xB20C),
("blake2b-104", 0xB20D), ("blake2b-112", 0xB20E), ("blake2b-120", 0xB20F),
("blake2b-128", 0xB210), ("blake2b-136", 0xB211), ("blake2b-144", 0xB212),
("blake2b-152", 0xB213), ("blake2b-160", 0xB214), ("blake2b-168", 0xB215),
("blake2b-176", 0xB216), ("blake2b-184", 0xB217), ("blake2b-192", 0xB218),
("blake2b-200", 0xB219), ("blake2b-208", 0xB21A), ("blake2b-216", 0xB21B),
("blake2b-224", 0xB21C), ("blake2b-232", 0xB21D), ("blake2b-240", 0xB21E),
("blake2b-248", 0xB21F), ("blake2b-256", 0xB220), ("blake2b-264", 0xB221),
("blake2b-272", 0xB222), ("blake2b-280", 0xB223), ("blake2b-288", 0xB224),
("blake2b-296", 0xB225), ("blake2b-304", 0xB226), ("blake2b-312", 0xB227),
("blake2b-320", 0xB228), ("blake2b-328", 0xB229), ("blake2b-336", 0xB22A),
("blake2b-344", 0xB22B), ("blake2b-352", 0xB22C), ("blake2b-360", 0xB22D),
("blake2b-368", 0xB22E), ("blake2b-376", 0xB22F), ("blake2b-384", 0xB230),
("blake2b-392", 0xB231), ("blake2b-400", 0xB232), ("blake2b-408", 0xB233),
("blake2b-416", 0xB234), ("blake2b-424", 0xB235), ("blake2b-432", 0xB236),
("blake2b-440", 0xB237), ("blake2b-448", 0xB238), ("blake2b-456", 0xB239),
("blake2b-464", 0xB23A), ("blake2b-472", 0xB23B), ("blake2b-480", 0xB23C),
("blake2b-488", 0xB23D), ("blake2b-496", 0xB23E), ("blake2b-504", 0xB23F),
("blake2b-512", 0xB240), ("blake2s-8", 0xB241), ("blake2s-16", 0xB242),
("blake2s-24", 0xB243), ("blake2s-32", 0xB244), ("blake2s-40", 0xB245),
("blake2s-48", 0xB246), ("blake2s-56", 0xB247), ("blake2s-64", 0xB248),
("blake2s-72", 0xB249), ("blake2s-80", 0xB24A), ("blake2s-88", 0xB24B),
("blake2s-96", 0xB24C), ("blake2s-104", 0xB24D), ("blake2s-112", 0xB24E),
("blake2s-120", 0xB24F), ("blake2s-128", 0xB250), ("blake2s-136", 0xB251),
("blake2s-144", 0xB252), ("blake2s-152", 0xB253), ("blake2s-160", 0xB254),
("blake2s-168", 0xB255), ("blake2s-176", 0xB256), ("blake2s-184", 0xB257),
("blake2s-192", 0xB258), ("blake2s-200", 0xB259), ("blake2s-208", 0xB25A),
("blake2s-216", 0xB25B), ("blake2s-224", 0xB25C), ("blake2s-232", 0xB25D),
("blake2s-240", 0xB25E), ("blake2s-248", 0xB25F), ("blake2s-256", 0xB260),
("skein256-8", 0xB301), ("skein256-16", 0xB302), ("skein256-24", 0xB303),
("skein256-32", 0xB304), ("skein256-40", 0xB305), ("skein256-48", 0xB306),
("skein256-56", 0xB307), ("skein256-64", 0xB308), ("skein256-72", 0xB309),
("skein256-80", 0xB30A), ("skein256-88", 0xB30B), ("skein256-96", 0xB30C),
("skein256-104", 0xB30D), ("skein256-112", 0xB30E), ("skein256-120", 0xB30F),
("skein256-128", 0xB310), ("skein256-136", 0xB311), ("skein256-144", 0xB312),
("skein256-152", 0xB313), ("skein256-160", 0xB314), ("skein256-168", 0xB315),
("skein256-176", 0xB316), ("skein256-184", 0xB317), ("skein256-192", 0xB318),
("skein256-200", 0xB319), ("skein256-208", 0xB31A), ("skein256-216", 0xB31B),
("skein256-224", 0xB31C), ("skein256-232", 0xB31D), ("skein256-240", 0xB31E),
("skein256-248", 0xB31F), ("skein256-256", 0xB320),
("skein512-8", 0xB321), ("skein512-16", 0xB322), ("skein512-24", 0xB323),
("skein512-32", 0xB324), ("skein512-40", 0xB325), ("skein512-48", 0xB326),
("skein512-56", 0xB327), ("skein512-64", 0xB328), ("skein512-72", 0xB329),
("skein512-80", 0xB32A), ("skein512-88", 0xB32B), ("skein512-96", 0xB32C),
("skein512-104", 0xB32D), ("skein512-112", 0xB32E), ("skein512-120", 0xB32F),
("skein512-128", 0xB330), ("skein512-136", 0xB331), ("skein512-144", 0xB332),
("skein512-152", 0xB333), ("skein512-160", 0xB334), ("skein512-168", 0xB335),
("skein512-176", 0xB336), ("skein512-184", 0xB337), ("skein512-192", 0xB338),
("skein512-200", 0xB339), ("skein512-208", 0xB33A), ("skein512-216", 0xB33B),
("skein512-224", 0xB33C), ("skein512-232", 0xB33D), ("skein512-240", 0xB33E),
("skein512-248", 0xB33F), ("skein512-256", 0xB340), ("skein512-264", 0xB341),
("skein512-272", 0xB342), ("skein512-280", 0xB343), ("skein512-288", 0xB344),
("skein512-296", 0xB345), ("skein512-304", 0xB346), ("skein512-312", 0xB347),
("skein512-320", 0xB348), ("skein512-328", 0xB349), ("skein512-336", 0xB34A),
("skein512-344", 0xB34B), ("skein512-352", 0xB34C), ("skein512-360", 0xB34D),
("skein512-368", 0xB34E), ("skein512-376", 0xB34F), ("skein512-384", 0xB350),
("skein512-392", 0xB351), ("skein512-400", 0xB352), ("skein512-408", 0xB353),
("skein512-416", 0xB354), ("skein512-424", 0xB355), ("skein512-432", 0xB356),
("skein512-440", 0xB357), ("skein512-448", 0xB358), ("skein512-456", 0xB359),
("skein512-464", 0xB35A), ("skein512-472", 0xB35B), ("skein512-480", 0xB35C),
("skein512-488", 0xB35D), ("skein512-496", 0xB35E), ("skein512-504", 0xB35F),
("skein512-512", 0xB360), ("skein1024-8", 0xB361), ("skein1024-16", 0xB362),
("skein1024-24", 0xB363), ("skein1024-32", 0xB364), ("skein1024-40", 0xB365),
("skein1024-48", 0xB366), ("skein1024-56", 0xB367), ("skein1024-64", 0xB368),
("skein1024-72", 0xB369), ("skein1024-80", 0xB36A), ("skein1024-88", 0xB36B),
("skein1024-96", 0xB36C), ("skein1024-104", 0xB36D),
("skein1024-112", 0xB36E), ("skein1024-120", 0xB36F),
("skein1024-128", 0xB370), ("skein1024-136", 0xB371),
("skein1024-144", 0xB372), ("skein1024-152", 0xB373),
("skein1024-160", 0xB374), ("skein1024-168", 0xB375),
("skein1024-176", 0xB376), ("skein1024-184", 0xB377),
("skein1024-192", 0xB378), ("skein1024-200", 0xB379),
("skein1024-208", 0xB37A), ("skein1024-216", 0xB37B),
("skein1024-224", 0xB37C), ("skein1024-232", 0xB37D),
("skein1024-240", 0xB37E), ("skein1024-248", 0xB37F),
("skein1024-256", 0xB380), ("skein1024-264", 0xB381),
("skein1024-272", 0xB382), ("skein1024-280", 0xB383),
("skein1024-288", 0xB384), ("skein1024-296", 0xB385),
("skein1024-304", 0xB386), ("skein1024-312", 0xB387),
("skein1024-320", 0xB388), ("skein1024-328", 0xB389),
("skein1024-336", 0xB38A), ("skein1024-344", 0xB38B),
("skein1024-352", 0xB38C), ("skein1024-360", 0xB38D),
("skein1024-368", 0xB38E), ("skein1024-376", 0xB38F),
("skein1024-384", 0xB390), ("skein1024-392", 0xB391),
("skein1024-400", 0xB392), ("skein1024-408", 0xB393),
("skein1024-416", 0xB394), ("skein1024-424", 0xB395),
("skein1024-432", 0xB396), ("skein1024-440", 0xB397),
("skein1024-448", 0xB398), ("skein1024-456", 0xB399),
("skein1024-464", 0xB39A), ("skein1024-472", 0xB39B),
("skein1024-480", 0xB39C), ("skein1024-488", 0xB39D),
("skein1024-496", 0xB39E), ("skein1024-504", 0xB39F),
("skein1024-512", 0xB3A0), ("skein1024-520", 0xB3A1),
("skein1024-528", 0xB3A2), ("skein1024-536", 0xB3A3),
("skein1024-544", 0xB3A4), ("skein1024-552", 0xB3A5),
("skein1024-560", 0xB3A6), ("skein1024-568", 0xB3A7),
("skein1024-576", 0xB3A8), ("skein1024-584", 0xB3A9),
("skein1024-592", 0xB3AA), ("skein1024-600", 0xB3AB),
("skein1024-608", 0xB3AC), ("skein1024-616", 0xB3AD),
("skein1024-624", 0xB3AE), ("skein1024-632", 0xB3AF),
("skein1024-640", 0xB3B0), ("skein1024-648", 0xB3B1),
("skein1024-656", 0xB3B2), ("skein1024-664", 0xB3B3),
("skein1024-672", 0xB3B4), ("skein1024-680", 0xB3B5),
("skein1024-688", 0xB3B6), ("skein1024-696", 0xB3B7),
("skein1024-704", 0xB3B8), ("skein1024-712", 0xB3B9),
("skein1024-720", 0xB3BA), ("skein1024-728", 0xB3BB),
("skein1024-736", 0xB3BC), ("skein1024-744", 0xB3BD),
("skein1024-752", 0xB3BE), ("skein1024-760", 0xB3BF),
("skein1024-768", 0xB3C0), ("skein1024-776", 0xB3C1),
("skein1024-784", 0xB3C2), ("skein1024-792", 0xB3C3),
("skein1024-800", 0xB3C4), ("skein1024-808", 0xB3C5),
("skein1024-816", 0xB3C6), ("skein1024-824", 0xB3C7),
("skein1024-832", 0xB3C8), ("skein1024-840", 0xB3C9),
("skein1024-848", 0xB3CA), ("skein1024-856", 0xB3CB),
("skein1024-864", 0xB3CC), ("skein1024-872", 0xB3CD),
("skein1024-880", 0xB3CE), ("skein1024-888", 0xB3CF),
("skein1024-896", 0xB3D0), ("skein1024-904", 0xB3D1),
("skein1024-912", 0xB3D2), ("skein1024-920", 0xB3D3),
("skein1024-928", 0xB3D4), ("skein1024-936", 0xB3D5),
("skein1024-944", 0xB3D6), ("skein1024-952", 0xB3D7),
("skein1024-960", 0xB3D8), ("skein1024-968", 0xB3D9),
("skein1024-976", 0xB3DA), ("skein1024-984", 0xB3DB),
("skein1024-992", 0xB3DC), ("skein1024-1000", 0xB3DD),
("skein1024-1008", 0xB3DE), ("skein1024-1016", 0xB3DF),
("blake2b-8", 0xB201),
("blake2b-16", 0xB202),
("blake2b-24", 0xB203),
("blake2b-32", 0xB204),
("blake2b-40", 0xB205),
("blake2b-48", 0xB206),
("blake2b-56", 0xB207),
("blake2b-64", 0xB208),
("blake2b-72", 0xB209),
("blake2b-80", 0xB20A),
("blake2b-88", 0xB20B),
("blake2b-96", 0xB20C),
("blake2b-104", 0xB20D),
("blake2b-112", 0xB20E),
("blake2b-120", 0xB20F),
("blake2b-128", 0xB210),
("blake2b-136", 0xB211),
("blake2b-144", 0xB212),
("blake2b-152", 0xB213),
("blake2b-160", 0xB214),
("blake2b-168", 0xB215),
("blake2b-176", 0xB216),
("blake2b-184", 0xB217),
("blake2b-192", 0xB218),
("blake2b-200", 0xB219),
("blake2b-208", 0xB21A),
("blake2b-216", 0xB21B),
("blake2b-224", 0xB21C),
("blake2b-232", 0xB21D),
("blake2b-240", 0xB21E),
("blake2b-248", 0xB21F),
("blake2b-256", 0xB220),
("blake2b-264", 0xB221),
("blake2b-272", 0xB222),
("blake2b-280", 0xB223),
("blake2b-288", 0xB224),
("blake2b-296", 0xB225),
("blake2b-304", 0xB226),
("blake2b-312", 0xB227),
("blake2b-320", 0xB228),
("blake2b-328", 0xB229),
("blake2b-336", 0xB22A),
("blake2b-344", 0xB22B),
("blake2b-352", 0xB22C),
("blake2b-360", 0xB22D),
("blake2b-368", 0xB22E),
("blake2b-376", 0xB22F),
("blake2b-384", 0xB230),
("blake2b-392", 0xB231),
("blake2b-400", 0xB232),
("blake2b-408", 0xB233),
("blake2b-416", 0xB234),
("blake2b-424", 0xB235),
("blake2b-432", 0xB236),
("blake2b-440", 0xB237),
("blake2b-448", 0xB238),
("blake2b-456", 0xB239),
("blake2b-464", 0xB23A),
("blake2b-472", 0xB23B),
("blake2b-480", 0xB23C),
("blake2b-488", 0xB23D),
("blake2b-496", 0xB23E),
("blake2b-504", 0xB23F),
("blake2b-512", 0xB240),
("blake2s-8", 0xB241),
("blake2s-16", 0xB242),
("blake2s-24", 0xB243),
("blake2s-32", 0xB244),
("blake2s-40", 0xB245),
("blake2s-48", 0xB246),
("blake2s-56", 0xB247),
("blake2s-64", 0xB248),
("blake2s-72", 0xB249),
("blake2s-80", 0xB24A),
("blake2s-88", 0xB24B),
("blake2s-96", 0xB24C),
("blake2s-104", 0xB24D),
("blake2s-112", 0xB24E),
("blake2s-120", 0xB24F),
("blake2s-128", 0xB250),
("blake2s-136", 0xB251),
("blake2s-144", 0xB252),
("blake2s-152", 0xB253),
("blake2s-160", 0xB254),
("blake2s-168", 0xB255),
("blake2s-176", 0xB256),
("blake2s-184", 0xB257),
("blake2s-192", 0xB258),
("blake2s-200", 0xB259),
("blake2s-208", 0xB25A),
("blake2s-216", 0xB25B),
("blake2s-224", 0xB25C),
("blake2s-232", 0xB25D),
("blake2s-240", 0xB25E),
("blake2s-248", 0xB25F),
("blake2s-256", 0xB260),
("skein256-8", 0xB301),
("skein256-16", 0xB302),
("skein256-24", 0xB303),
("skein256-32", 0xB304),
("skein256-40", 0xB305),
("skein256-48", 0xB306),
("skein256-56", 0xB307),
("skein256-64", 0xB308),
("skein256-72", 0xB309),
("skein256-80", 0xB30A),
("skein256-88", 0xB30B),
("skein256-96", 0xB30C),
("skein256-104", 0xB30D),
("skein256-112", 0xB30E),
("skein256-120", 0xB30F),
("skein256-128", 0xB310),
("skein256-136", 0xB311),
("skein256-144", 0xB312),
("skein256-152", 0xB313),
("skein256-160", 0xB314),
("skein256-168", 0xB315),
("skein256-176", 0xB316),
("skein256-184", 0xB317),
("skein256-192", 0xB318),
("skein256-200", 0xB319),
("skein256-208", 0xB31A),
("skein256-216", 0xB31B),
("skein256-224", 0xB31C),
("skein256-232", 0xB31D),
("skein256-240", 0xB31E),
("skein256-248", 0xB31F),
("skein256-256", 0xB320),
("skein512-8", 0xB321),
("skein512-16", 0xB322),
("skein512-24", 0xB323),
("skein512-32", 0xB324),
("skein512-40", 0xB325),
("skein512-48", 0xB326),
("skein512-56", 0xB327),
("skein512-64", 0xB328),
("skein512-72", 0xB329),
("skein512-80", 0xB32A),
("skein512-88", 0xB32B),
("skein512-96", 0xB32C),
("skein512-104", 0xB32D),
("skein512-112", 0xB32E),
("skein512-120", 0xB32F),
("skein512-128", 0xB330),
("skein512-136", 0xB331),
("skein512-144", 0xB332),
("skein512-152", 0xB333),
("skein512-160", 0xB334),
("skein512-168", 0xB335),
("skein512-176", 0xB336),
("skein512-184", 0xB337),
("skein512-192", 0xB338),
("skein512-200", 0xB339),
("skein512-208", 0xB33A),
("skein512-216", 0xB33B),
("skein512-224", 0xB33C),
("skein512-232", 0xB33D),
("skein512-240", 0xB33E),
("skein512-248", 0xB33F),
("skein512-256", 0xB340),
("skein512-264", 0xB341),
("skein512-272", 0xB342),
("skein512-280", 0xB343),
("skein512-288", 0xB344),
("skein512-296", 0xB345),
("skein512-304", 0xB346),
("skein512-312", 0xB347),
("skein512-320", 0xB348),
("skein512-328", 0xB349),
("skein512-336", 0xB34A),
("skein512-344", 0xB34B),
("skein512-352", 0xB34C),
("skein512-360", 0xB34D),
("skein512-368", 0xB34E),
("skein512-376", 0xB34F),
("skein512-384", 0xB350),
("skein512-392", 0xB351),
("skein512-400", 0xB352),
("skein512-408", 0xB353),
("skein512-416", 0xB354),
("skein512-424", 0xB355),
("skein512-432", 0xB356),
("skein512-440", 0xB357),
("skein512-448", 0xB358),
("skein512-456", 0xB359),
("skein512-464", 0xB35A),
("skein512-472", 0xB35B),
("skein512-480", 0xB35C),
("skein512-488", 0xB35D),
("skein512-496", 0xB35E),
("skein512-504", 0xB35F),
("skein512-512", 0xB360),
("skein1024-8", 0xB361),
("skein1024-16", 0xB362),
("skein1024-24", 0xB363),
("skein1024-32", 0xB364),
("skein1024-40", 0xB365),
("skein1024-48", 0xB366),
("skein1024-56", 0xB367),
("skein1024-64", 0xB368),
("skein1024-72", 0xB369),
("skein1024-80", 0xB36A),
("skein1024-88", 0xB36B),
("skein1024-96", 0xB36C),
("skein1024-104", 0xB36D),
("skein1024-112", 0xB36E),
("skein1024-120", 0xB36F),
("skein1024-128", 0xB370),
("skein1024-136", 0xB371),
("skein1024-144", 0xB372),
("skein1024-152", 0xB373),
("skein1024-160", 0xB374),
("skein1024-168", 0xB375),
("skein1024-176", 0xB376),
("skein1024-184", 0xB377),
("skein1024-192", 0xB378),
("skein1024-200", 0xB379),
("skein1024-208", 0xB37A),
("skein1024-216", 0xB37B),
("skein1024-224", 0xB37C),
("skein1024-232", 0xB37D),
("skein1024-240", 0xB37E),
("skein1024-248", 0xB37F),
("skein1024-256", 0xB380),
("skein1024-264", 0xB381),
("skein1024-272", 0xB382),
("skein1024-280", 0xB383),
("skein1024-288", 0xB384),
("skein1024-296", 0xB385),
("skein1024-304", 0xB386),
("skein1024-312", 0xB387),
("skein1024-320", 0xB388),
("skein1024-328", 0xB389),
("skein1024-336", 0xB38A),
("skein1024-344", 0xB38B),
("skein1024-352", 0xB38C),
("skein1024-360", 0xB38D),
("skein1024-368", 0xB38E),
("skein1024-376", 0xB38F),
("skein1024-384", 0xB390),
("skein1024-392", 0xB391),
("skein1024-400", 0xB392),
("skein1024-408", 0xB393),
("skein1024-416", 0xB394),
("skein1024-424", 0xB395),
("skein1024-432", 0xB396),
("skein1024-440", 0xB397),
("skein1024-448", 0xB398),
("skein1024-456", 0xB399),
("skein1024-464", 0xB39A),
("skein1024-472", 0xB39B),
("skein1024-480", 0xB39C),
("skein1024-488", 0xB39D),
("skein1024-496", 0xB39E),
("skein1024-504", 0xB39F),
("skein1024-512", 0xB3A0),
("skein1024-520", 0xB3A1),
("skein1024-528", 0xB3A2),
("skein1024-536", 0xB3A3),
("skein1024-544", 0xB3A4),
("skein1024-552", 0xB3A5),
("skein1024-560", 0xB3A6),
("skein1024-568", 0xB3A7),
("skein1024-576", 0xB3A8),
("skein1024-584", 0xB3A9),
("skein1024-592", 0xB3AA),
("skein1024-600", 0xB3AB),
("skein1024-608", 0xB3AC),
("skein1024-616", 0xB3AD),
("skein1024-624", 0xB3AE),
("skein1024-632", 0xB3AF),
("skein1024-640", 0xB3B0),
("skein1024-648", 0xB3B1),
("skein1024-656", 0xB3B2),
("skein1024-664", 0xB3B3),
("skein1024-672", 0xB3B4),
("skein1024-680", 0xB3B5),
("skein1024-688", 0xB3B6),
("skein1024-696", 0xB3B7),
("skein1024-704", 0xB3B8),
("skein1024-712", 0xB3B9),
("skein1024-720", 0xB3BA),
("skein1024-728", 0xB3BB),
("skein1024-736", 0xB3BC),
("skein1024-744", 0xB3BD),
("skein1024-752", 0xB3BE),
("skein1024-760", 0xB3BF),
("skein1024-768", 0xB3C0),
("skein1024-776", 0xB3C1),
("skein1024-784", 0xB3C2),
("skein1024-792", 0xB3C3),
("skein1024-800", 0xB3C4),
("skein1024-808", 0xB3C5),
("skein1024-816", 0xB3C6),
("skein1024-824", 0xB3C7),
("skein1024-832", 0xB3C8),
("skein1024-840", 0xB3C9),
("skein1024-848", 0xB3CA),
("skein1024-856", 0xB3CB),
("skein1024-864", 0xB3CC),
("skein1024-872", 0xB3CD),
("skein1024-880", 0xB3CE),
("skein1024-888", 0xB3CF),
("skein1024-896", 0xB3D0),
("skein1024-904", 0xB3D1),
("skein1024-912", 0xB3D2),
("skein1024-920", 0xB3D3),
("skein1024-928", 0xB3D4),
("skein1024-936", 0xB3D5),
("skein1024-944", 0xB3D6),
("skein1024-952", 0xB3D7),
("skein1024-960", 0xB3D8),
("skein1024-968", 0xB3D9),
("skein1024-976", 0xB3DA),
("skein1024-984", 0xB3DB),
("skein1024-992", 0xB3DC),
("skein1024-1000", 0xB3DD),
("skein1024-1008", 0xB3DE),
("skein1024-1016", 0xB3DF),
("skein1024-1024", 0xB3E0),
# multiaddrs
("ip4", 0x04),
@@ -233,7 +426,7 @@ const MultiCodecList = [
("dash-tx", 0xF1),
("torrent-info", 0x7B),
("torrent-file", 0x7C),
("ed25519-pub", 0xED)
("ed25519-pub", 0xED),
]
type
@@ -241,8 +434,7 @@ type
MultiCodecError* = enum
MultiCodecNotSupported
const
InvalidMultiCodec* = MultiCodec(-1)
const InvalidMultiCodec* = MultiCodec(-1)
proc initMultiCodecNameTable(): Table[string, int] {.compileTime.} =
for item in MultiCodecList:

View File

@@ -41,8 +41,9 @@ const
ErrParseError = "Parse error fromHex"
type
MHashCoderProc* = proc(data: openArray[byte],
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
MHashCoderProc* = proc(data: openArray[byte], output: var openArray[byte]) {.
nimcall, gcsafe, noSideEffect, raises: []
.}
MHash* = object
mcodec*: MultiCodec
size*: int
@@ -58,107 +59,152 @@ type
proc identhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var length = if len(data) > len(output): len(output)
else: len(data)
var length =
if len(data) > len(output):
len(output)
else:
len(data)
copyMem(addr output[0], unsafeAddr data[0], length)
proc sha1hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha1.digest(data)
var length = if sha1.sizeDigest > len(output): len(output)
else: sha1.sizeDigest
var length =
if sha1.sizeDigest > len(output):
len(output)
else:
sha1.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc dblsha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest1 = sha256.digest(data)
var digest2 = sha256.digest(digest1.data)
var length = if sha256.sizeDigest > len(output): len(output)
else: sha256.sizeDigest
var length =
if sha256.sizeDigest > len(output):
len(output)
else:
sha256.sizeDigest
copyMem(addr output[0], addr digest2.data[0], length)
proc blake2Bhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_512.digest(data)
var length = if blake2_512.sizeDigest > len(output): len(output)
else: blake2_512.sizeDigest
var length =
if blake2_512.sizeDigest > len(output):
len(output)
else:
blake2_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc blake2Shash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_256.digest(data)
var length = if blake2_256.sizeDigest > len(output): len(output)
else: blake2_256.sizeDigest
var length =
if blake2_256.sizeDigest > len(output):
len(output)
else:
blake2_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha256.digest(data)
var length = if sha256.sizeDigest > len(output): len(output)
else: sha256.sizeDigest
var length =
if sha256.sizeDigest > len(output):
len(output)
else:
sha256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha512.digest(data)
var length = if sha512.sizeDigest > len(output): len(output)
else: sha512.sizeDigest
var length =
if sha512.sizeDigest > len(output):
len(output)
else:
sha512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_224.digest(data)
var length = if sha3_224.sizeDigest > len(output): len(output)
else: sha3_224.sizeDigest
var length =
if sha3_224.sizeDigest > len(output):
len(output)
else:
sha3_224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_256.digest(data)
var length = if sha3_256.sizeDigest > len(output): len(output)
else: sha3_256.sizeDigest
var length =
if sha3_256.sizeDigest > len(output):
len(output)
else:
sha3_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_384.digest(data)
var length = if sha3_384.sizeDigest > len(output): len(output)
else: sha3_384.sizeDigest
var length =
if sha3_384.sizeDigest > len(output):
len(output)
else:
sha3_384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_512.digest(data)
var length = if sha3_512.sizeDigest > len(output): len(output)
else: sha3_512.sizeDigest
var length =
if sha3_512.sizeDigest > len(output):
len(output)
else:
sha3_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak224.digest(data)
var length = if keccak224.sizeDigest > len(output): len(output)
else: keccak224.sizeDigest
var length =
if keccak224.sizeDigest > len(output):
len(output)
else:
keccak224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak256.digest(data)
var length = if keccak256.sizeDigest > len(output): len(output)
else: keccak256.sizeDigest
var length =
if keccak256.sizeDigest > len(output):
len(output)
else:
keccak256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak384.digest(data)
var length = if keccak384.sizeDigest > len(output): len(output)
else: keccak384.sizeDigest
var length =
if keccak384.sizeDigest > len(output):
len(output)
else:
keccak384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak512.digest(data)
var length = if keccak512.sizeDigest > len(output): len(output)
else: keccak512.sizeDigest
var length =
if keccak512.sizeDigest > len(output):
len(output)
else:
keccak512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc shake_128hash(data: openArray[byte], output: var openArray[byte]) =
@@ -179,151 +225,135 @@ proc shake_256hash(data: openArray[byte], output: var openArray[byte]) =
discard sctx.output(addr output[0], uint(len(output)))
sctx.clear()
const
HashesList = [
MHash(mcodec: multiCodec("identity"), size: 0,
coder: identhash),
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest,
coder: sha1hash),
MHash(mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest,
coder: dblsha2_256hash
),
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest,
coder: sha2_256hash
),
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest,
coder: sha2_512hash
),
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest,
coder: sha3_224hash
),
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest,
coder: sha3_256hash
),
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest,
coder: sha3_384hash
),
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest,
coder: sha3_512hash
),
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
MHash(mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest,
coder: keccak_224hash
),
MHash(mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest,
coder: keccak_256hash
),
MHash(mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest,
coder: keccak_384hash
),
MHash(mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest,
coder: keccak_512hash
),
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash)
]
const HashesList = [
MHash(mcodec: multiCodec("identity"), size: 0, coder: identhash),
MHash(mcodec: multiCodec("sha1"), size: sha1.sizeDigest, coder: sha1hash),
MHash(
mcodec: multiCodec("dbl-sha2-256"), size: sha256.sizeDigest, coder: dblsha2_256hash
),
MHash(mcodec: multiCodec("sha2-256"), size: sha256.sizeDigest, coder: sha2_256hash),
MHash(mcodec: multiCodec("sha2-512"), size: sha512.sizeDigest, coder: sha2_512hash),
MHash(mcodec: multiCodec("sha3-224"), size: sha3_224.sizeDigest, coder: sha3_224hash),
MHash(mcodec: multiCodec("sha3-256"), size: sha3_256.sizeDigest, coder: sha3_256hash),
MHash(mcodec: multiCodec("sha3-384"), size: sha3_384.sizeDigest, coder: sha3_384hash),
MHash(mcodec: multiCodec("sha3-512"), size: sha3_512.sizeDigest, coder: sha3_512hash),
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
MHash(
mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest, coder: keccak_224hash
),
MHash(
mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest, coder: keccak_256hash
),
MHash(
mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest, coder: keccak_384hash
),
MHash(
mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest, coder: keccak_512hash
),
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-16"), size: 2, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-24"), size: 3, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-32"), size: 4, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-40"), size: 5, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-48"), size: 6, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-56"), size: 7, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-64"), size: 8, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-72"), size: 9, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-80"), size: 10, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-88"), size: 11, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-96"), size: 12, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-104"), size: 13, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-112"), size: 14, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-120"), size: 15, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-128"), size: 16, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-136"), size: 17, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-144"), size: 18, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-152"), size: 19, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-160"), size: 20, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-168"), size: 21, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-176"), size: 22, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-184"), size: 23, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-192"), size: 24, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-200"), size: 25, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-208"), size: 26, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-216"), size: 27, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-224"), size: 28, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-232"), size: 29, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-240"), size: 30, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-248"), size: 31, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-256"), size: 32, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-264"), size: 33, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-272"), size: 34, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-280"), size: 35, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-288"), size: 36, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-296"), size: 37, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-304"), size: 38, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-312"), size: 39, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-320"), size: 40, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-328"), size: 41, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-336"), size: 42, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-344"), size: 43, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-352"), size: 44, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-360"), size: 45, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-368"), size: 46, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-376"), size: 47, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-384"), size: 48, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-392"), size: 49, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-400"), size: 50, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-408"), size: 51, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-416"), size: 52, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-424"), size: 53, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-432"), size: 54, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-440"), size: 55, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-448"), size: 56, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-456"), size: 57, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-464"), size: 58, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-472"), size: 59, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-480"), size: 60, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-488"), size: 61, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-496"), size: 62, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-504"), size: 63, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2b-512"), size: 64, coder: blake2Bhash),
MHash(mcodec: multiCodec("blake2s-8"), size: 1, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-16"), size: 2, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-24"), size: 3, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-32"), size: 4, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-40"), size: 5, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-48"), size: 6, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-56"), size: 7, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-64"), size: 8, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-72"), size: 9, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-80"), size: 10, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-88"), size: 11, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-96"), size: 12, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-104"), size: 13, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-112"), size: 14, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-120"), size: 15, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-128"), size: 16, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-136"), size: 17, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-144"), size: 18, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-152"), size: 19, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-160"), size: 20, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-168"), size: 21, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-176"), size: 22, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-184"), size: 23, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-192"), size: 24, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-200"), size: 25, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-208"), size: 26, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-216"), size: 27, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-224"), size: 28, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-232"), size: 29, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-240"), size: 30, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-248"), size: 31, coder: blake2Shash),
MHash(mcodec: multiCodec("blake2s-256"), size: 32, coder: blake2Shash),
]
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const
CodeHashes = initMultiHashCodeTable()
const CodeHashes = initMultiHashCodeTable()
proc digestImplWithHash(hash: MHash, data: openArray[byte]): MultiHash =
var buffer: array[MaxHashSize, byte]
@@ -353,8 +383,9 @@ proc digestImplWithoutHash(hash: MHash, data: openArray[byte]): MultiHash =
result.data.writeArray(data)
result.data.finish()
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
proc digest*(
mhtype: typedesc[MultiHash], hashname: string, data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with name ``hashname`` on
## data array ``data``.
let mc = MultiCodec.codec(hashname)
@@ -367,8 +398,9 @@ proc digest*(mhtype: typedesc[MultiHash], hashname: string,
else:
ok(digestImplWithHash(hash, data))
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
proc digest*(
mhtype: typedesc[MultiHash], hashcode: int, data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with code ``hashcode`` on
## data array ``data``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -377,8 +409,9 @@ proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
else:
ok(digestImplWithHash(hash, data))
proc init*[T](mhtype: typedesc[MultiHash], hashname: string,
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
proc init*[T](
mhtype: typedesc[MultiHash], hashname: string, mdigest: MDigest[T]
): MhResult[MultiHash] {.inline.} =
## Create MultiHash from nimcrypto's `MDigest` object and hash algorithm name
## ``hashname``.
let mc = MultiCodec.codec(hashname)
@@ -393,8 +426,9 @@ proc init*[T](mhtype: typedesc[MultiHash], hashname: string,
else:
ok(digestImplWithoutHash(hash, mdigest.data))
proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
mdigest: MDigest[T]): MhResult[MultiHash] {.inline.} =
proc init*[T](
mhtype: typedesc[MultiHash], hashcode: MultiCodec, mdigest: MDigest[T]
): MhResult[MultiHash] {.inline.} =
## Create MultiHash from nimcrypto's `MDigest` and hash algorithm code
## ``hashcode``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -405,8 +439,9 @@ proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
else:
ok(digestImplWithoutHash(hash, mdigest.data))
proc init*(mhtype: typedesc[MultiHash], hashname: string,
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
proc init*(
mhtype: typedesc[MultiHash], hashname: string, bdigest: openArray[byte]
): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let mc = MultiCodec.codec(hashname)
@@ -421,8 +456,9 @@ proc init*(mhtype: typedesc[MultiHash], hashname: string,
else:
ok(digestImplWithoutHash(hash, bdigest))
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
proc init*(
mhtype: typedesc[MultiHash], hashcode: MultiCodec, bdigest: openArray[byte]
): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let hash = CodeHashes.getOrDefault(hashcode)
@@ -433,8 +469,9 @@ proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
else:
ok(digestImplWithoutHash(hash, bdigest))
proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
mhash: var MultiHash): MhResult[int] =
proc decode*(
mhtype: typedesc[MultiHash], data: openArray[byte], mhash: var MultiHash
): MhResult[int] =
## Decode MultiHash value from array of bytes ``data``.
##
## On success decoded MultiHash will be stored into ``mhash`` and number of
@@ -473,9 +510,10 @@ proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
if not vb.isEnough(int(size)):
return err(ErrDecodeError)
mhash = ? MultiHash.init(MultiCodec(code),
vb.buffer.toOpenArray(vb.offset,
vb.offset + int(size) - 1))
mhash =
?MultiHash.init(
MultiCodec(code), vb.buffer.toOpenArray(vb.offset, vb.offset + int(size) - 1)
)
ok(vb.offset + int(size))
proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
@@ -508,24 +546,24 @@ proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
return false
result = true
proc init*(mhtype: typedesc[MultiHash],
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
proc init*(
mhtype: typedesc[MultiHash], data: openArray[byte]
): MhResult[MultiHash] {.inline.} =
## Create MultiHash from bytes array ``data``.
var hash: MultiHash
discard ? MultiHash.decode(data, hash)
discard ?MultiHash.decode(data, hash)
ok(hash)
proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inline.} =
## Create MultiHash from hexadecimal string representation ``data``.
var hash: MultiHash
try:
discard ? MultiHash.decode(fromHex(data), hash)
discard ?MultiHash.decode(fromHex(data), hash)
ok(hash)
except ValueError:
err(ErrParseError)
proc init58*(mhtype: typedesc[MultiHash],
data: string): MultiHash {.inline.} =
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
## Create MultiHash from BASE58 encoded string representation ``data``.
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format")
@@ -538,7 +576,7 @@ proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
while n > 0:
dec(n)
diff = int(a[n]) - int(b[n])
res = (res and -not(diff)) or diff
res = (res and - not (diff)) or diff
result = (res == 0)
proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
@@ -548,8 +586,10 @@ proc `==`*[T](mh: MultiHash, mdigest: MDigest[T]): bool =
return false
if len(mdigest.data) != mh.size:
return false
result = cmp(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
mdigest.data.toOpenArray(0, mdigest.data.high))
result = cmp(
mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1),
mdigest.data.toOpenArray(0, mdigest.data.high),
)
proc `==`*[T](mdigest: MDigest[T], mh: MultiHash): bool {.inline.} =
## Compares MultiHash with nimcrypto's MDigest[T], returns ``true`` if
@@ -565,8 +605,10 @@ proc `==`*(a: MultiHash, b: MultiHash): bool =
return false
if a.size != b.size:
return false
result = cmp(a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1))
result = cmp(
a.data.buffer.toOpenArray(a.dpos, a.dpos + a.size - 1),
b.data.buffer.toOpenArray(b.dpos, b.dpos + b.size - 1),
)
proc hex*(value: MultiHash): string =
## Return hexadecimal string representation of MultiHash ``value``.
@@ -578,16 +620,16 @@ proc base58*(value: MultiHash): string =
proc `$`*(mh: MultiHash): string =
## Return string representation of MultiHash ``value``.
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos,
mh.dpos + mh.size - 1))
let digest = toHex(mh.data.buffer.toOpenArray(mh.dpos, mh.dpos + mh.size - 1))
result = $(mh.mcodec) & "/" & digest
proc write*(vb: var VBuffer, mh: MultiHash) {.inline.} =
## Write MultiHash value ``mh`` to buffer ``vb``.
vb.writeArray(mh.data.buffer)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
mh: MultiHash): string {.inline.} =
proc encode*(
mbtype: typedesc[MultiBase], encoding: string, mh: MultiHash
): string {.inline.} =
## Get MultiBase encoded representation of ``mh`` using encoding
## ``encoding``.
result = MultiBase.encode(encoding, mh.data.buffer)

View File

@@ -11,8 +11,7 @@
import std/[strutils, sequtils, tables]
import chronos, chronicles, stew/byteutils
import stream/connection,
protocols/protocol
import stream/connection, protocols/protocol
logScope:
topics = "libp2p multistream"
@@ -25,7 +24,7 @@ const
Ls = "ls\n"
type
Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
Matcher* = proc(proto: string): bool {.gcsafe, raises: [].}
MultiStreamError* = object of LPError
@@ -40,23 +39,17 @@ type
codec*: string
proc new*(T: typedesc[MultistreamSelect]): T =
T(
codec: Codec,
)
T(codec: Codec)
template validateSuffix(str: string): untyped =
if str.endsWith("\n"):
str.removeSuffix("\n")
else:
raise (ref MultiStreamError)(msg:
"MultistreamSelect failed, malformed message")
raise (ref MultiStreamError)(msg: "MultistreamSelect failed, malformed message")
proc select*(
_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: seq[string]
): Future[string] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: seq[string]
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
trace "initiating handshake", conn, codec = Codec
## select a remote protocol
await conn.writeLp(Codec & "\n") # write handshake
@@ -85,7 +78,7 @@ proc select*(
return proto[0]
elif proto.len > 1:
# Try to negotiate alternatives
let protos = proto[1..<proto.len()]
let protos = proto[1 ..< proto.len()]
trace "selecting one of several protos", conn, protos = protos
for p in protos:
trace "selecting proto", conn, proto = p
@@ -102,28 +95,25 @@ proc select*(
return ""
proc select*(
_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: string
): Future[bool] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
_: MultistreamSelect | type MultistreamSelect, conn: Connection, proto: string
): Future[bool] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
if proto.len > 0:
(await MultistreamSelect.select(conn, @[proto])) == proto
else:
(await MultistreamSelect.select(conn, @[])) == Codec
proc select*(
m: MultistreamSelect,
conn: Connection
): Future[bool] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError], raw: true).} =
m: MultistreamSelect, conn: Connection
): Future[bool] {.
async: (raises: [CancelledError, LPStreamError, MultiStreamError], raw: true)
.} =
m.select(conn, "")
proc list*(
m: MultistreamSelect,
conn: Connection
): Future[seq[string]] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
m: MultistreamSelect, conn: Connection
): Future[seq[string]] {.
async: (raises: [CancelledError, LPStreamError, MultiStreamError])
.} =
## list remote protos requests on connection
if not await m.select(conn):
return
@@ -143,9 +133,8 @@ proc handle*(
conn: Connection,
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false
): Future[string] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
active: bool = false,
): Future[string] {.async: (raises: [CancelledError, LPStreamError, MultiStreamError]).} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -153,16 +142,17 @@ proc handle*(
validateSuffix(ms)
if not handshaked and ms != Codec:
debug "expected handshake message", conn, instead=ms
raise (ref MultiStreamError)(msg:
"MultistreamSelect handling failed, invalid first message")
debug "expected handshake message", conn, instead = ms
raise (ref MultiStreamError)(
msg: "MultistreamSelect handling failed, invalid first message"
)
trace "handle: got request", conn, ms
if ms.len() <= 0:
trace "handle: invalid proto", conn
await conn.writeLp(Na)
case ms:
case ms
of "ls":
trace "handle: listing protos", conn
#TODO this doens't seem to follow spec, each protocol
@@ -174,8 +164,7 @@ proc handle*(
await conn.writeLp(Codec & "\n")
handshaked = true
else:
trace "handle: sending `na` for duplicate handshake while handshaked",
conn
trace "handle: sending `na` for duplicate handshake while handshaked", conn
await conn.writeLp(Na)
elif ms in protos or matchers.anyIt(it(ms)):
trace "found handler", conn, protocol = ms
@@ -187,9 +176,8 @@ proc handle*(
await conn.writeLp(Na)
proc handle*(
m: MultistreamSelect,
conn: Connection,
active: bool = false) {.async: (raises: [CancelledError]).} =
m: MultistreamSelect, conn: Connection, active: bool = false
) {.async: (raises: [CancelledError]).} =
trace "Starting multistream handler", conn, handshaked = active
var
protos: seq[string]
@@ -208,8 +196,7 @@ proc handle*(
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >=
maxIncomingStreams:
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
@@ -231,38 +218,35 @@ proc handle*(
trace "Stopped multistream handler", conn
proc addHandler*(m: MultistreamSelect,
codecs: seq[string],
protocol: LPProtocol,
matcher: Matcher = nil) =
proc addHandler*(
m: MultistreamSelect,
codecs: seq[string],
protocol: LPProtocol,
matcher: Matcher = nil,
) =
trace "registering protocols", protos = codecs
m.handlers.add(HandlerHolder(protos: codecs,
protocol: protocol,
match: matcher))
m.handlers.add(HandlerHolder(protos: codecs, protocol: protocol, match: matcher))
proc addHandler*(m: MultistreamSelect,
codec: string,
protocol: LPProtocol,
matcher: Matcher = nil) =
proc addHandler*(
m: MultistreamSelect, codec: string, protocol: LPProtocol, matcher: Matcher = nil
) =
addHandler(m, @[codec], protocol, matcher)
proc addHandler*[E](
m: MultistreamSelect,
codec: string,
handler: LPProtoHandler |
proc (
conn: Connection,
proto: string): InternalRaisesFuture[void, E],
matcher: Matcher = nil) =
handler:
LPProtoHandler |
proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
matcher: Matcher = nil,
) =
## helper to allow registering pure handlers
trace "registering proto handler", proto = codec
let protocol = new LPProtocol
protocol.codec = codec
protocol.handler = handler
m.handlers.add(HandlerHolder(protos: @[codec],
protocol: protocol,
match: matcher))
m.handlers.add(HandlerHolder(protos: @[codec], protocol: protocol, match: matcher))
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([])`
@@ -283,7 +267,8 @@ proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
elif fut.completed:
pending.add m.handlers[i].protocol.stop()
else:
static: doAssert typeof(fut).E is (CancelledError,)
static:
doAssert typeof(fut).E is (CancelledError,)
await noCancel allFutures(pending)
raise exc

View File

@@ -10,29 +10,22 @@
{.push raises: [].}
import pkg/[chronos, chronicles, stew/byteutils]
import ../../stream/connection,
../../utility,
../../varint,
../../vbuffer,
../muxer
import ../../stream/connection, ../../utility, ../../varint, ../../vbuffer, ../muxer
logScope:
topics = "libp2p mplexcoder"
type
MessageType* {.pure.} = enum
New,
MsgIn,
MsgOut,
CloseIn,
CloseOut,
ResetIn,
New
MsgIn
MsgOut
CloseIn
CloseOut
ResetIn
ResetOut
Msg* = tuple
id: uint64
msgType: MessageType
data: seq[byte]
Msg* = tuple[id: uint64, msgType: MessageType, data: seq[byte]]
InvalidMplexMsgType* = object of MuxerError
@@ -44,8 +37,7 @@ proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
proc readMsg*(
conn: Connection
): Future[Msg] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
): Future[Msg] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn
@@ -59,12 +51,8 @@ proc readMsg*(
return (header shr 3, MessageType(msgType), data)
proc writeMsg*(
conn: Connection,
id: uint64,
msgType: MessageType,
data: seq[byte] = @[]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
conn: Connection, id: uint64, msgType: MessageType, data: seq[byte] = @[]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
var
left = data.len
offset = 0
@@ -72,8 +60,11 @@ proc writeMsg*(
# Split message into length-prefixed chunks
while left > 0 or data.len == 0:
let
chunkSize = if left > MaxMsgSize: MaxMsgSize - 64 else: left
let chunkSize =
if left > MaxMsgSize:
MaxMsgSize - 64
else:
left
buf.writePBVarint(id shl 3 or ord(msgType).uint64)
buf.writeSeq(data.toOpenArray(offset, offset + chunkSize - 1))
@@ -91,10 +82,6 @@ proc writeMsg*(
conn.write(buf.buffer)
proc writeMsg*(
conn: Connection,
id: uint64,
msgType: MessageType,
data: string
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
conn: Connection, id: uint64, msgType: MessageType, data: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
conn.writeMsg(id, msgType, data.toBytes())

View File

@@ -11,10 +11,8 @@
import std/[oids, strformat]
import pkg/[chronos, chronicles, metrics]
import ./coder,
../muxer,
../../stream/[bufferstream, connection, streamseq],
../../peerinfo
import
./coder, ../muxer, ../../stream/[bufferstream, connection, streamseq], ../../peerinfo
export connection
@@ -22,7 +20,8 @@ logScope:
topics = "libp2p mplexchannel"
when defined(libp2p_mplex_metrics):
declareHistogram libp2p_mplex_qlen, "message queue length",
declareHistogram libp2p_mplex_qlen,
"message queue length",
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
declareHistogram libp2p_mplex_qtime, "message queuing time"
@@ -43,36 +42,39 @@ when defined(libp2p_network_protocols_metrics):
## EOF marker
const
MaxWrites = 1024 ##\
MaxWrites = 1024
##\
## Maximum number of in-flight writes - after this, we disconnect the peer
LPChannelTrackerName* = "LPChannel"
type
LPChannel* = ref object of BufferStream
id*: uint64 # channel id
name*: string # name of the channel (for debugging)
conn*: Connection # wrapped connection used to for writing
initiator*: bool # initiated remotely or locally flag
isOpen*: bool # has channel been opened
closedLocal*: bool # has channel been closed locally
remoteReset*: bool # has channel been remotely reset
localReset*: bool # has channel been reset locally
msgCode*: MessageType # cached in/out message code
closeCode*: MessageType # cached in/out close code
resetCode*: MessageType # cached in/out reset code
writes*: int # In-flight writes
type LPChannel* = ref object of BufferStream
id*: uint64 # channel id
name*: string # name of the channel (for debugging)
conn*: Connection # wrapped connection used to for writing
initiator*: bool # initiated remotely or locally flag
isOpen*: bool # has channel been opened
closedLocal*: bool # has channel been closed locally
remoteReset*: bool # has channel been remotely reset
localReset*: bool # has channel been reset locally
msgCode*: MessageType # cached in/out message code
closeCode*: MessageType # cached in/out close code
resetCode*: MessageType # cached in/out reset code
writes*: int # In-flight writes
func shortLog*(s: LPChannel): auto =
try:
if s == nil: "LPChannel(nil)"
if s == nil:
"LPChannel(nil)"
elif s.name != $s.oid and s.name.len > 0:
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
else:
&"{shortLog(s.conn.peerId)}:{s.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(LPChannel): shortLog(it)
chronicles.formatIt(LPChannel):
shortLog(it)
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Opening channel", s, conn = s.conn
@@ -160,9 +162,7 @@ method initStream*(s: LPChannel) =
procCall BufferStream(s).initStream()
method readOnce*(
s: LPChannel,
pbytes: pointer,
nbytes: int
s: LPChannel, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
## Mplex relies on reading being done regularly from every channel, or all
## channels are blocked - in particular, this means that reading from one
@@ -180,7 +180,7 @@ method readOnce*(
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
when defined(libp2p_network_protocols_metrics):
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
libp2p_protocols_bytes.inc(bytes.int64, labelValues = [s.protocol, "in"])
trace "readOnce", s, bytes
if bytes == 0:
@@ -196,8 +196,7 @@ method readOnce*(
raise newLPStreamConnDownError(exc)
proc prepareWrite(
s: LPChannel,
msg: seq[byte]
s: LPChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
# prepareWrite is the slow path of writing a message - see conditions in
# write
@@ -215,7 +214,7 @@ proc prepareWrite(
debug "Closing connection, too many in-flight writes on channel",
s, conn = s.conn, writes = s.writes
when defined(libp2p_mplex_metrics):
libp2p_mplex_qlenclose.inc()
libp2p_mplex_qlenclose.inc()
await s.reset()
await s.conn.close()
return
@@ -228,7 +227,7 @@ proc prepareWrite(
proc completeWrite(
s: LPChannel,
fut: Future[void].Raising([CancelledError, LPStreamError]),
msgLen: int
msgLen: int,
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
try:
s.writes += 1
@@ -244,8 +243,7 @@ proc completeWrite(
if s.protocol.len > 0:
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
# https://github.com/status-im/nim-metrics/issues/79
libp2p_protocols_bytes.inc(
msgLen.int64, labelValues = [s.protocol, "out"])
libp2p_protocols_bytes.inc(msgLen.int64, labelValues = [s.protocol, "out"])
s.activity = true
except CancelledError as exc:
@@ -266,15 +264,12 @@ proc completeWrite(
s.writes -= 1
method write*(
s: LPChannel,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
s: LPChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
## Write to mplex channel - there may be up to MaxWrite concurrent writes
## pending after which the peer is disconnected
let
closed = s.closedLocal or s.conn.closed
let closed = s.closedLocal or s.conn.closed
let fut =
if (not closed) and msg.len > 0 and s.writes < MaxWrites and s.isOpen:
@@ -287,7 +282,8 @@ method write*(
s.completeWrite(fut, msg.len)
method getWrapped*(s: LPChannel): Connection = s.conn
method getWrapped*(s: LPChannel): Connection =
s.conn
proc init*(
L: type LPChannel,
@@ -295,7 +291,8 @@ proc init*(
conn: Connection,
initiator: bool,
name: string = "",
timeout: Duration = DefaultChanTimeout): LPChannel =
timeout: Duration = DefaultChanTimeout,
): LPChannel =
let chann = L(
id: id,
name: name,
@@ -306,12 +303,17 @@ proc init*(
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
dir: if initiator: Direction.Out else: Direction.In)
dir: if initiator: Direction.Out else: Direction.In,
)
chann.initStream()
when chronicles.enabledLogLevel == LogLevel.TRACE:
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
chann.name =
if chann.name.len > 0:
chann.name
else:
$chann.oid
trace "Created new lpchannel", s = chann, id, initiator

View File

@@ -11,13 +11,14 @@
import tables, sequtils, oids
import chronos, chronicles, stew/byteutils, metrics
import ../muxer,
../../stream/connection,
../../stream/bufferstream,
../../utility,
../../peerinfo,
./coder,
./lpchannel
import
../muxer,
../../stream/connection,
../../stream/bufferstream,
../../utility,
../../peerinfo,
./coder,
./lpchannel
export muxer
@@ -26,12 +27,10 @@ logScope:
const MplexCodec* = "/mplex/6.7.0"
const
MaxChannelCount = 200
const MaxChannelCount = 200
when defined(libp2p_expensive_metrics):
declareGauge(libp2p_mplex_channels,
"mplex channels", labels = ["initiator", "peer"])
declareGauge(libp2p_mplex_channels, "mplex channels", labels = ["initiator", "peer"])
type
InvalidChannelIdError* = object of MuxerError
@@ -48,7 +47,8 @@ type
func shortLog*(m: Mplex): auto =
shortLog(m.connection)
chronicles.formatIt(Mplex): shortLog(it)
chronicles.formatIt(Mplex):
shortLog(it)
proc newTooManyChannels(): ref TooManyChannels =
newException(TooManyChannels, "max allowed channel count exceeded")
@@ -67,7 +67,8 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.channels[chann.initiator].len.int64,
labelValues = [$chann.initiator, $m.connection.peerId])
labelValues = [$chann.initiator, $m.connection.peerId],
)
except CancelledError as exc:
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
@@ -76,22 +77,21 @@ proc newStreamInternal*(
initiator: bool = true,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
timeout: Duration,
): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
## create new channel/stream
##
let id =
if initiator: m.currentId.inc(); m.currentId
else: chanId
if initiator:
m.currentId.inc()
m.currentId
else:
chanId
if id in m.channels[initiator]:
raise newInvalidChannelIdError()
result = LPChannel.init(
id,
m.connection,
initiator,
name,
timeout = timeout)
result = LPChannel.init(id, m.connection, initiator, name, timeout = timeout)
result.peerId = m.connection.peerId
result.observedAddr = m.connection.observedAddr
@@ -108,8 +108,8 @@ proc newStreamInternal*(
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.channels[initiator].len.int64,
labelValues = [$initiator, $m.connection.peerId])
m.channels[initiator].len.int64, labelValues = [$initiator, $m.connection.peerId]
)
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
## call the muxer stream handler for this channel
@@ -146,7 +146,7 @@ method handle*(m: Mplex) {.async: (raises: []).} =
else:
if m.channels[false].len > m.maxChannCount - 1:
warn "too many channels created by remote peer",
allowedMax = MaxChannelCount, m
allowedMax = MaxChannelCount, m
raise newTooManyChannels()
let name = string.fromBytes(data)
@@ -154,7 +154,7 @@ method handle*(m: Mplex) {.async: (raises: []).} =
trace "Processing channel message", m, channel, data = data.shortLog
case msgType:
case msgType
of MessageType.New:
trace "created channel", m, channel
@@ -162,11 +162,10 @@ method handle*(m: Mplex) {.async: (raises: []).} =
# Launch handler task
# All the errors are handled inside `handleStream()` procedure.
asyncSpawn m.handleStream(channel)
of MessageType.MsgIn, MessageType.MsgOut:
if data.len > MaxMsgSize:
warn "attempting to send a packet larger than allowed",
allowed = MaxMsgSize, channel
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
@@ -175,10 +174,9 @@ method handle*(m: Mplex) {.async: (raises: []).} =
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
trace "pushing data to channel failed",
m, channel, len = data.len, msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
of MessageType.ResetIn, MessageType.ResetOut:
@@ -201,19 +199,19 @@ proc new*(
conn: Connection,
inTimeout: Duration = DefaultChanTimeout,
outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount): Mplex =
M(connection: conn,
maxChannCount: int = MaxChannelCount,
): Mplex =
M(
connection: conn,
inChannTimeout: inTimeout,
outChannTimeout: outTimeout,
oid: genOid(),
maxChannCount: maxChannCount)
maxChannCount: maxChannCount,
)
method newStream*(
m: Mplex,
name: string = "",
lazy: bool = false
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
m: Mplex, name: string = "", lazy: bool = false
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -250,5 +248,7 @@ method close*(m: Mplex) {.async: (raises: []).} =
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
for c in m.channels[false].values: result.add(c)
for c in m.channels[true].values: result.add(c)
for c in m.channels[false].values:
result.add(c)
for c in m.channels[true].values:
result.add(c)

View File

@@ -10,14 +10,12 @@
{.push raises: [].}
import chronos, chronicles
import ../stream/connection,
../errors
import ../stream/connection, ../errors
logScope:
topics = "libp2p muxer"
const
DefaultChanTimeout* = 5.minutes
const DefaultChanTimeout* = 5.minutes
type
MuxerError* = object of LPError
@@ -32,8 +30,7 @@ type
connection*: Connection
# user provider proc that returns a constructed Muxer
MuxerConstructor* =
proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = object
@@ -41,30 +38,32 @@ type
codec*: string
func shortLog*(m: Muxer): auto =
if m == nil: "nil"
else: shortLog(m.connection)
if m == nil:
"nil"
else:
shortLog(m.connection)
chronicles.formatIt(Muxer): shortLog(it)
chronicles.formatIt(Muxer):
shortLog(it)
# muxer interface
method newStream*(
m: Muxer,
name: string = "",
lazy: bool = false
): Future[Connection] {.base, async: (raises: [
CancelledError, LPStreamError, MuxerError], raw: true).} =
m: Muxer, name: string = "", lazy: bool = false
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("Not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} = discard
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} =
discard
proc new*(
T: typedesc[MuxerProvider],
creator: MuxerConstructor,
codec: string): T {.gcsafe.} =
T: typedesc[MuxerProvider], creator: MuxerConstructor, codec: string
): T {.gcsafe.} =
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider

View File

@@ -11,8 +11,7 @@
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
import ../muxer,
../../stream/connection
import ../muxer, ../../stream/connection
export muxer
@@ -27,14 +26,13 @@ const
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
declareGauge libp2p_yamux_channels,
"yamux channels", labels = ["initiator", "peer"]
declareGauge libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"]
declareHistogram libp2p_yamux_send_queue,
"message send queue length (in byte)", buckets = [
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
"message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue,
"message recv queue length (in byte)", buckets = [
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
"message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
type
YamuxError* = object of MuxerError
@@ -52,9 +50,9 @@ type
Rst
GoAwayStatus = enum
NormalTermination = 0x0,
ProtocolError = 0x1,
InternalError = 0x2,
NormalTermination = 0x0
ProtocolError = 0x1
InternalError = 0x2
YamuxHeader = object
version: uint8
@@ -65,88 +63,84 @@ type
proc readHeader(
conn: LPStream
): Future[YamuxHeader] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
): Future[YamuxHeader] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
result.version = buffer[0]
let flags = fromBytesBE(uint16, buffer[2..3])
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16..15'u16:
raise newException(YamuxError, "Wrong header")
let flags = fromBytesBE(uint16, buffer[2 .. 3])
if not result.msgType.checkedEnumAssign(buffer[1]) or flags notin 0'u16 .. 15'u16:
raise newException(YamuxError, "Wrong header")
result.flags = cast[set[MsgFlags]](flags)
result.streamId = fromBytesBE(uint32, buffer[4..7])
result.length = fromBytesBE(uint32, buffer[8..11])
result.streamId = fromBytesBE(uint32, buffer[4 .. 7])
result.length = fromBytesBE(uint32, buffer[8 .. 11])
return result
proc `$`(header: YamuxHeader): string =
"{" & $header.msgType & ", " &
"{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, " &
"streamId: " & $header.streamId & ", " &
"length: " & $header.length & "}"
"{" & $header.msgType & ", " & "{" &
header.flags.foldl(
if a != "":
a & ", " & $b
else:
$b
,
"",
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
"}"
proc encode(header: YamuxHeader): array[12, byte] =
result[0] = header.version
result[1] = uint8(header.msgType)
result[2..3] = toBytesBE(uint16(cast[uint8](header.flags))) # workaround https://github.com/nim-lang/Nim/issues/21789
result[4..7] = toBytesBE(header.streamId)
result[8..11] = toBytesBE(header.length)
result[2 .. 3] = toBytesBE(uint16(cast[uint8](header.flags)))
# workaround https://github.com/nim-lang/Nim/issues/21789
result[4 .. 7] = toBytesBE(header.streamId)
result[8 .. 11] = toBytesBE(header.length)
proc write(
conn: LPStream,
header: YamuxHeader
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
conn: LPStream, header: YamuxHeader
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "write directly on stream", h = $header
var buffer = header.encode()
conn.write(@buffer)
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
T(
version: YamuxVersion,
msgType: MsgType.Ping,
flags: {flag},
length: pingData
)
T(version: YamuxVersion, msgType: MsgType.Ping, flags: {flag}, length: pingData)
proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
T(
version: YamuxVersion,
msgType: MsgType.GoAway,
length: uint32(status)
)
T(version: YamuxVersion, msgType: MsgType.GoAway, length: uint32(status))
proc data(
T: type[YamuxHeader],
streamId: uint32,
length: uint32 = 0,
flags: set[MsgFlags] = {}): T =
flags: set[MsgFlags] = {},
): T =
T(
version: YamuxVersion,
msgType: MsgType.Data,
length: length,
flags: flags,
streamId: streamId
streamId: streamId,
)
proc windowUpdate(
T: type[YamuxHeader],
streamId: uint32,
delta: uint32,
flags: set[MsgFlags] = {}): T =
T: type[YamuxHeader], streamId: uint32, delta: uint32, flags: set[MsgFlags] = {}
): T =
T(
version: YamuxVersion,
msgType: MsgType.WindowUpdate,
length: delta,
flags: flags,
streamId: streamId
streamId: streamId,
)
type
ToSend = tuple
data: seq[byte]
sent: int
fut: Future[void].Raising([CancelledError, LPStreamError])
ToSend =
tuple[
data: seq[byte],
sent: int,
fut: Future[void].Raising([CancelledError, LPStreamError]),
]
YamuxChannel* = ref object of Connection
id: uint32
recvWindow: int
@@ -176,7 +170,16 @@ proc `$`(channel: YamuxChannel): string =
if channel.isReset:
s.add("Reset")
if s.len > 0:
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
result &=
" {" &
s.foldl(
if a != "":
a & ", " & b
else:
b
,
"",
) & "}"
proc lengthSendQueue(channel: YamuxChannel): int =
## Returns the length of what remains to be sent
@@ -189,11 +192,13 @@ proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
# For leniency, limit big messages size to the third of maxSendQueueSize
# This value is arbitrary, it's not in the specs, it permits to store up to
# 3 big messages if the peer is stalling.
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
channel.sendQueue.foldl(
a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0
)
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
if channel.closedLocally and channel.sendQueue.len == 0 and
channel.closedRemotely.completed():
channel.closedRemotely.completed():
await procCall Connection(channel).closeImpl()
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
@@ -207,12 +212,13 @@ method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
channel.closedLocally = true
if not channel.isReset and channel.sendQueue.len == 0:
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
except CancelledError, LPStreamError: discard
try:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
except CancelledError, LPStreamError:
discard
await channel.actuallyClose()
proc reset(
channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
# it receives the reset.
@@ -228,8 +234,10 @@ proc reset(
channel.sendWindow = 0
if not channel.closedLocally:
if isLocal and not channel.isSending:
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
except CancelledError, LPStreamError: discard
try:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
except CancelledError, LPStreamError:
discard
await channel.close()
if not channel.closedRemotely.completed():
await channel.remoteClosed()
@@ -251,16 +259,11 @@ proc updateRecvWindow(
let delta = channel.maxRecvWindow - inWindow
channel.recvWindow.inc(delta)
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
delta.uint32
))
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
trace "increasing the recvWindow", delta
method readOnce*(
channel: YamuxChannel,
pbytes: pointer,
nbytes: int
channel: YamuxChannel, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
## Read from a yamux channel
@@ -276,19 +279,21 @@ method readOnce*(
raise newLPStreamRemoteClosedError()
if channel.recvQueue.len == 0:
channel.receivedData.clear()
try: # https://github.com/status-im/nim-chronos/issues/516
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(channel.closedRemotely, channel.receivedData.wait())
except ValueError: raiseAssert("Futures list is not empty")
except ValueError:
raiseAssert("Futures list is not empty")
if channel.closedRemotely.completed() and channel.recvQueue.len == 0:
channel.isEof = true
return 0 # we return 0 to indicate that the channel is closed for reading from now on
return
0 # we return 0 to indicate that the channel is closed for reading from now on
let toRead = min(channel.recvQueue.len, nbytes)
var p = cast[ptr UncheckedArray[byte]](pbytes)
toOpenArray(p, 0, nbytes - 1)[0..<toRead] =
toOpenArray(p, 0, nbytes - 1)[0 ..< toRead] =
channel.recvQueue.toOpenArray(0, toRead - 1)
channel.recvQueue = channel.recvQueue[toRead..^1]
channel.recvQueue = channel.recvQueue[toRead ..^ 1]
# We made some room in the recv buffer let the peer know
await channel.updateRecvWindow()
@@ -296,8 +301,8 @@ method readOnce*(
return toRead
proc gotDataFromRemote(
channel: YamuxChannel,
b: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
channel: YamuxChannel, b: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
channel.recvWindow -= b.len
channel.recvQueue = channel.recvQueue.concat(b)
channel.receivedData.fire()
@@ -314,7 +319,8 @@ proc trySend(
if channel.isSending:
return
channel.isSending = true
defer: channel.isSending = false
defer:
channel.isSending = false
while channel.sendQueue.len != 0:
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
@@ -339,15 +345,17 @@ proc trySend(
trace "last buffer we'll sent on this channel", toSend, bytesAvailable
header.flags.incl({Fin})
sendBuffer[0..<12] = header.encode()
sendBuffer[0 ..< 12] = header.encode()
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
while inBuffer < toSend:
# concatenate the different message we try to send into one buffer
let (data, sent, fut) = channel.sendQueue[0]
let bufferToSend = min(data.len - sent, toSend - inBuffer)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[
inBuffer ..< (inBuffer + bufferToSend)
] = channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
channel.sendQueue[0].sent.inc(bufferToSend)
if channel.sendQueue[0].sent >= data.len:
# if every byte of the message is in the buffer, add the write future to the
@@ -378,10 +386,8 @@ proc trySend(
channel.activity = true
method write*(
channel: YamuxChannel,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
channel: YamuxChannel, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
## Write to yamux channel
##
result = newFuture[void]("Yamux Send")
@@ -399,37 +405,39 @@ method write*(
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
proc open(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
proc open(channel: YamuxChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
## Open a yamux channel by sending a window update with Syn or Ack flag
##
if channel.opened:
trace "Try to open channel twice"
return
channel.opened = true
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack}))
await channel.conn.write(
YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack},
)
)
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
method getWrapped*(channel: YamuxChannel): Connection =
channel.conn
type
Yamux* = ref object of Muxer
channels: Table[uint32, YamuxChannel]
flushed: Table[uint32, int]
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
inTimeout: Duration
outTimeout: Duration
type Yamux* = ref object of Muxer
channels: Table[uint32, YamuxChannel]
flushed: Table[uint32, int]
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
inTimeout: Duration
outTimeout: Duration
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
if v.isSrc == isSrc: result += 1
if v.isSrc == isSrc:
result += 1
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
try:
@@ -439,13 +447,14 @@ proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
m.channels.del(channel.id)
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId]
)
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(
m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
m: Yamux, id: uint32, isSrc: bool, recvWindow: int, maxSendQueueSize: int
): YamuxChannel =
# During initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
@@ -453,16 +462,18 @@ proc createStream(
# recvWindow is less than maxRecvWindow
proc newClosedRemotelyFut(): Future[void] {.async: (raises: [], raw: true).} =
newFuture[void]()
var stream = YamuxChannel(
id: id,
maxRecvWindow: recvWindow,
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
recvWindow:
if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
sendWindow: YamuxDefaultWindowSize,
maxSendQueueSize: maxSendQueueSize,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
closedRemotely: newClosedRemotelyFut()
closedRemotely: newClosedRemotelyFut(),
)
stream.objName = "YamuxStream"
if isSrc:
@@ -471,10 +482,9 @@ proc createStream(
else:
stream.dir = Direction.In
stream.timeout = m.inTimeout
stream.timeoutHandler =
proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, resetting YamuxChannel"
stream.reset(isLocal = true)
stream.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, resetting YamuxChannel"
stream.reset(isLocal = true)
stream.initStream()
stream.peerId = m.connection.peerId
stream.observedAddr = m.connection.observedAddr
@@ -483,7 +493,7 @@ proc createStream(
stream.shortAgent = m.connection.shortAgent
m.channels[id] = stream
asyncSpawn m.cleanupChannel(stream)
trace "created channel", id, pid=m.connection.peerId
trace "created channel", id, pid = m.connection.peerId
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
return stream
@@ -498,9 +508,12 @@ method close*(m: Yamux) {.async: (raises: []).} =
let channels = toSeq(m.channels.values())
for channel in channels:
await channel.reset(isLocal = true)
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CancelledError as exc: trace "cancelled sending goAway", msg = exc.msg
except LPStreamError as exc: trace "failed to send goAway", msg = exc.msg
try:
await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CancelledError as exc:
trace "cancelled sending goAway", msg = exc.msg
except LPStreamError as exc:
trace "failed to send goAway", msg = exc.msg
await m.connection.close()
trace "Closed yamux"
@@ -512,34 +525,40 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
doAssert(channel.isClosed, "connection not closed by handler!")
method handle*(m: Yamux) {.async: (raises: []).} =
trace "Starting yamux handler", pid=m.connection.peerId
trace "Starting yamux handler", pid = m.connection.peerId
try:
while not m.connection.atEof:
trace "waiting for header"
let header = await m.connection.readHeader()
trace "got message", h = $header
case header.msgType:
case header.msgType
of Ping:
if MsgFlags.Syn in header.flags:
await m.connection.write(YamuxHeader.ping(MsgFlags.Ack, header.length))
of GoAway:
var status: GoAwayStatus
if status.checkedEnumAssign(header.length): trace "Received go away", status
else: trace "Received unexpected error go away"
if status.checkedEnumAssign(header.length):
trace "Received go away", status
else:
trace "Received unexpected error go away"
break
of Data, WindowUpdate:
if MsgFlags.Syn in header.flags:
if header.streamId in m.channels:
debug "Trying to create an existing channel, skipping", id=header.streamId
debug "Trying to create an existing channel, skipping", id = header.streamId
else:
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
debug "Peer used our reserved stream id, skipping",
id = header.streamId,
currentId = m.currentId,
peerId = m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
let newStream =
m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
if m.channels.len >= m.maxChannCount:
await newStream.reset()
continue
@@ -551,23 +570,23 @@ method handle*(m: Yamux) {.async: (raises: []).} =
if header.msgType == Data:
flushed[].dec(int(header.length))
if flushed[] < 0:
raise newException(YamuxError,
"Peer exhausted the recvWindow after reset")
raise
newException(YamuxError, "Peer exhausted the recvWindow after reset")
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(
addr buffer[0], int(header.length))
await m.connection.readExactly(addr buffer[0], int(header.length))
do:
raise newException(YamuxError,
"Unknown stream ID: " & $header.streamId)
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
continue
let channel =
try:
m.channels[header.streamId]
except KeyError:
raise newException(YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId)
raise newException(
YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId,
)
if header.msgType == WindowUpdate:
channel.sendWindow += int(header.length)
@@ -580,7 +599,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
trace "Msg Rcv", msg=shortLog(buffer)
trace "Msg Rcv", msg = shortLog(buffer)
await channel.gotDataFromRemote(buffer)
if MsgFlags.Fin in header.flags:
@@ -596,7 +615,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
except LPStreamError as exc:
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
except YamuxError as exc:
trace "Closing yamux connection", error=exc.msg
trace "Closing yamux connection", error = exc.msg
try:
await m.connection.write(YamuxHeader.goAway(ProtocolError))
except CancelledError, LPStreamError:
@@ -612,14 +631,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
for c in m.channels.values: result.add(c)
for c in m.channels.values:
result.add(c)
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
m: Yamux, name: string = "", lazy: bool = false
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError, MuxerError]).} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
@@ -629,12 +646,14 @@ method newStream*(
return stream
proc new*(
T: type[Yamux], conn: Connection,
T: type[Yamux],
conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes): T =
outTimeout: Duration = 5.minutes,
): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
@@ -642,5 +661,5 @@ proc new*(
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize,
inTimeout: inTimeout,
outTimeout: outTimeout
outTimeout: outTimeout,
)

View File

@@ -11,19 +11,19 @@
import
std/[streams, strutils, sets, sequtils],
chronos, chronicles, stew/byteutils,
chronos,
chronicles,
stew/byteutils,
dnsclientpkg/[protocol, types],
../utility
import
nameresolver
import nameresolver
logScope:
topics = "libp2p dnsresolver"
type
DnsResolver* = ref object of NameResolver
nameServers*: seq[TransportAddress]
type DnsResolver* = ref object of NameResolver
nameServers*: seq[TransportAddress]
proc questionToBuf(address: string, kind: QKind): seq[byte] =
try:
@@ -45,10 +45,8 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
return newSeq[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress,
address: string,
kind: QKind): Future[Response] {.async.} =
dnsServer: TransportAddress, address: string, kind: QKind
): Future[Response] {.async.} =
var sendBuf = questionToBuf(address, kind)
if sendBuf.len == 0:
@@ -56,9 +54,10 @@ proc getDnsResponse(
let receivedDataFuture = newFuture[void]()
proc datagramDataReceived(transp: DatagramTransport,
raddr: TransportAddress): Future[void] {.async, closure.} =
receivedDataFuture.complete()
proc datagramDataReceived(
transp: DatagramTransport, raddr: TransportAddress
): Future[void] {.async, closure.} =
receivedDataFuture.complete()
let sock =
if dnsServer.family == AddressFamily.IPv6:
@@ -78,16 +77,14 @@ proc getDnsResponse(
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return exceptionToAssert: parseResponse(string.fromBytes(rawResponse))
return exceptionToAssert:
parseResponse(string.fromBytes(rawResponse))
finally:
await sock.closeWait()
method resolveIp*(
self: DnsResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
@@ -113,16 +110,14 @@ method resolveIp*(
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(
exceptionToAssert(answer.toString())
)
resolvedAddresses.incl(exceptionToAssert(answer.toString()))
except CancelledError as e:
raise e
except ValueError as e:
info "Invalid DNS query", address, error=e.msg
info "Invalid DNS query", address, error = e.msg
return @[]
except CatchableError as e:
info "Failed to query DNS", address, error=e.msg
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
@@ -137,10 +132,7 @@ method resolveIp*(
debug "Failed to resolve address, returning empty set"
return @[]
method resolveTxt*(
self: DnsResolver,
address: string): Future[seq[string]] {.async.} =
method resolveTxt*(self: DnsResolver, address: string): Future[seq[string]] {.async.} =
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
@@ -150,12 +142,13 @@ method resolveTxt*(
# it can't actually raise though
let response = await getDnsResponse(server, address, TXT)
return exceptionToAssert:
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except CatchableError as e:
info "Failed to query DNS", address, error=e.msg
info "Failed to query DNS", address, error = e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
@@ -163,7 +156,5 @@ method resolveTxt*(
debug "Failed to resolve TXT, returning empty set"
return @[]
proc new*(
T: typedesc[DnsResolver],
nameServers: seq[TransportAddress]): T =
proc new*(T: typedesc[DnsResolver], nameServers: seq[TransportAddress]): T =
T(nameServers: nameServers)

View File

@@ -9,9 +9,7 @@
{.push raises: [].}
import
std/tables,
chronos, chronicles
import std/tables, chronos, chronicles
import nameresolver
@@ -26,10 +24,8 @@ type MockResolver* = ref object of NameResolver
ipResponses*: Table[(string, bool), seq[string]]
method resolveIp*(
self: MockResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, false)):
result.add(initTAddress(resp, port))
@@ -38,9 +34,8 @@ method resolveIp*(
for resp in self.ipResponses.getOrDefault((address, true)):
result.add(initTAddress(resp, port))
method resolveTxt*(
self: MockResolver,
address: string): Future[seq[string]] {.async.} =
method resolveTxt*(self: MockResolver, address: string): Future[seq[string]] {.async.} =
return self.txtResponses.getOrDefault(address)
proc new*(T: typedesc[MockResolver]): T = T()
proc new*(T: typedesc[MockResolver]): T =
T()

View File

@@ -10,31 +10,25 @@
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import
chronos,
chronicles,
stew/endians2
import chronos, chronicles, stew/endians2
import ".."/[multiaddress, multicodec]
logScope:
topics = "libp2p nameresolver"
type
NameResolver* = ref object of RootObj
type NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver,
address: string): Future[seq[string]] {.async, base.} =
self: NameResolver, address: string
): Future[seq[string]] {.async, base.} =
## Get TXT record
##
doAssert(false, "Not implemented!")
method resolveIp*(
self: NameResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, base.} =
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async, base.} =
## Resolve the specified address
##
@@ -42,17 +36,17 @@ method resolveIp*(
proc getHostname*(ma: MultiAddress): string =
let
firstPart = ma[0].valueOr: return ""
firstPart = ma[0].valueOr:
return ""
fpSplitted = ($firstPart).split('/', 2)
if fpSplitted.len > 2: fpSplitted[2]
else: ""
if fpSplitted.len > 2:
fpSplitted[2]
else:
""
proc resolveOneAddress(
self: NameResolver,
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async.} =
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
): Future[seq[MultiAddress]] {.async.} =
#Resolve a single address
var pbuf: array[2, byte]
@@ -68,15 +62,14 @@ proc resolveOneAddress(
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.tryGet()): continue
if DNS.match(part.tryGet()):
continue
createdAddress &= part.tryGet()
createdAddress
proc resolveDnsAddr*(
self: NameResolver,
ma: MultiAddress,
depth: int = 0): Future[seq[MultiAddress]] {.async.} =
self: NameResolver, ma: MultiAddress, depth: int = 0
): Future[seq[MultiAddress]] {.async.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
@@ -93,10 +86,12 @@ proc resolveDnsAddr*(
var result: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="): continue
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
if not entry.startsWith("dnsaddr="):
continue
let entryValue = MultiAddress.init(entry[8 ..^ 1]).tryGet()
if entryValue.contains(multiCodec("p2p")).tryGet() and ma.contains(multiCodec("p2p")).tryGet():
if entryValue.contains(multiCodec("p2p")).tryGet() and
ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
@@ -109,17 +104,17 @@ proc resolveDnsAddr*(
return @[]
return result
proc resolveMAddress*(
self: NameResolver,
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
self: NameResolver, address: MultiAddress
): Future[seq[MultiAddress]] {.async.} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):
res.incl(address)
else:
let code = address[0].tryGet().protoCode().tryGet()
let seq = case code:
let seq =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):

View File

@@ -20,31 +20,39 @@ type
maxSize: int
minCount: int
proc addObservation*(self:ObservedAddrManager, observedAddr: MultiAddress): bool =
proc addObservation*(self: ObservedAddrManager, observedAddr: MultiAddress): bool =
## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed.
if self.observedIPsAndPorts.len >= self.maxSize:
self.observedIPsAndPorts.del(0)
self.observedIPsAndPorts.del(0)
self.observedIPsAndPorts.add(observedAddr)
return true
proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], multiCodec: MultiCodec): Opt[MultiAddress] =
proc getProtocol(
self: ObservedAddrManager, observations: seq[MultiAddress], multiCodec: MultiCodec
): Opt[MultiAddress] =
var countTable = toCountTable(observations)
countTable.sort()
var orderedPairs = toSeq(countTable.pairs)
for (ma, count) in orderedPairs:
let protoCode = (ma[0].flatMap(protoCode)).valueOr: continue
let protoCode = (ma[0].flatMap(protoCode)).valueOr:
continue
if protoCode == multiCodec and count >= self.minCount:
return Opt.some(ma)
return Opt.none(MultiAddress)
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
proc getMostObservedProtocol(
self: ObservedAddrManager, multiCodec: MultiCodec
): Opt[MultiAddress] =
## Returns the most observed IP address or none if the number of observations are less than minCount.
let observedIPs = collect:
for observedIp in self.observedIPsAndPorts:
observedIp[0].valueOr: continue
observedIp[0].valueOr:
continue
return self.getProtocol(observedIPs, multiCodec)
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
proc getMostObservedProtoAndPort(
self: ObservedAddrManager, multiCodec: MultiCodec
): Opt[MultiAddress] =
## Returns the most observed IP/Port address or none if the number of observations are less than minCount.
return self.getProtocol(self.observedIPsAndPorts, multiCodec)
@@ -58,29 +66,27 @@ proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress
res.add(ip6)
return res
proc guessDialableAddr*(
self: ObservedAddrManager,
ma: MultiAddress): MultiAddress =
proc guessDialableAddr*(self: ObservedAddrManager, ma: MultiAddress): MultiAddress =
## Replaces the first proto value of each listen address by the corresponding (matching the proto code) most observed value.
## If the most observed value is not available, the original MultiAddress is returned.
let
maFirst = ma[0].valueOr: return ma
maRest = ma[1..^1].valueOr: return ma
maFirstProto = maFirst.protoCode().valueOr: return ma
maFirst = ma[0].valueOr:
return ma
maRest = ma[1 ..^ 1].valueOr:
return ma
maFirstProto = maFirst.protoCode().valueOr:
return ma
let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr: return ma
return concat(observedIP, maRest).valueOr: ma
let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr:
return ma
return concat(observedIP, maRest).valueOr:
ma
proc `$`*(self: ObservedAddrManager): string =
## Returns a string representation of the ObservedAddrManager.
return "IPs and Ports: " & $self.observedIPsAndPorts
proc new*(
T: typedesc[ObservedAddrManager],
maxSize = 10,
minCount = 3): T =
proc new*(T: typedesc[ObservedAddrManager], maxSize = 10, minCount = 3): T =
## Creates a new ObservedAddrManager.
return T(
observedIPsAndPorts: newSeq[MultiAddress](),
maxSize: maxSize,
minCount: minCount)
return
T(observedIPsAndPorts: newSeq[MultiAddress](), maxSize: maxSize, minCount: minCount)

View File

@@ -18,17 +18,18 @@ import
chronicles,
nimcrypto/utils,
utility,
./crypto/crypto, ./multicodec, ./multihash, ./vbuffer,
./crypto/crypto,
./multicodec,
./multihash,
./vbuffer,
./protobuf/minprotobuf
export results, utility
const
maxInlineKeyLength* = 42
const maxInlineKeyLength* = 42
type
PeerId* = object
data*: seq[byte]
type PeerId* = object
data*: seq[byte]
func `$`*(pid: PeerId): string =
## Return base58 encoded ``pid`` representation.
@@ -45,7 +46,8 @@ func shortLog*(pid: PeerId): string =
spid
chronicles.formatIt(PeerId): shortLog(it)
chronicles.formatIt(PeerId):
shortLog(it)
func toBytes*(pid: PeerId, data: var openArray[byte]): int =
## Store PeerId ``pid`` to array of bytes ``data``.
@@ -78,7 +80,8 @@ func cmp*(a, b: PeerId): int =
var m = min(len(a.data), len(b.data))
while i < m:
result = ord(a.data[i]) - ord(b.data[i])
if result != 0: return
if result != 0:
return
inc(i)
result = len(a.data) - len(b.data)
@@ -165,18 +168,18 @@ func init*(t: typedesc[PeerId], data: string): Result[PeerId, cstring] =
func init*(t: typedesc[PeerId], pubkey: PublicKey): Result[PeerId, cstring] =
## Create new peer id from public key ``pubkey``.
var pubraw = ? pubkey.getBytes().orError(
cstring("peerid: failed to get bytes from given key"))
var pubraw =
?pubkey.getBytes().orError(cstring("peerid: failed to get bytes from given key"))
var mh: MultiHash
if len(pubraw) <= maxInlineKeyLength:
mh = ? MultiHash.digest("identity", pubraw)
mh = ?MultiHash.digest("identity", pubraw)
else:
mh = ? MultiHash.digest("sha2-256", pubraw)
mh = ?MultiHash.digest("sha2-256", pubraw)
ok(PeerId(data: mh.data.buffer))
func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
## Create new peer id from private key ``seckey``.
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
PeerId.init(?seckey.getPublicKey().orError(cstring("invalid private key")))
proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
## Create new peer id with random public key.
@@ -201,12 +204,13 @@ func write*(pb: var ProtoBuffer, field: int, pid: PeerId) =
## Write PeerId value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
write(pb, field, pid.data)
func getField*(pb: ProtoBuffer, field: int,
pid: var PeerId): ProtoResult[bool] {.inline.} =
func getField*(
pb: ProtoBuffer, field: int, pid: var PeerId
): ProtoResult[bool] {.inline.} =
## Read ``PeerId`` from ProtoBuf's message and validate it
var buffer: seq[byte]
let res = ? pb.getField(field, buffer)
if not(res):
let res = ?pb.getField(field, buffer)
if not (res):
ok(false)
else:
var peerId: PeerId

View File

@@ -21,10 +21,9 @@ export peerid, multiaddress, crypto, routing_record, errors, results
type
PeerInfoError* = object of LPError
AddressMapper* =
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
{.gcsafe, raises: [].}
## A proc that expected to resolve the listen addresses into dialable addresses
AddressMapper* = proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.
gcsafe, raises: []
.} ## A proc that expected to resolve the listen addresses into dialable addresses
PeerInfo* {.public.} = ref object
peerId*: PeerId
@@ -50,7 +49,8 @@ func shortLog*(p: PeerInfo): auto =
protoVersion: p.protoVersion,
agentVersion: p.agentVersion,
)
chronicles.formatIt(PeerInfo): shortLog(it)
chronicles.formatIt(PeerInfo):
shortLog(it)
proc update*(p: PeerInfo) {.async.} =
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
@@ -62,9 +62,8 @@ proc update*(p: PeerInfo) {.async.} =
p.addrs = await mapper(p.addrs)
p.signedPeerRecord = SignedPeerRecord.init(
p.privateKey,
PeerRecord.init(p.peerId, p.addrs)
).valueOr():
p.privateKey, PeerRecord.init(p.peerId, p.addrs)
).valueOr:
info "Can't update the signed peer record"
return
@@ -72,38 +71,35 @@ proc addrs*(p: PeerInfo): seq[MultiAddress] =
p.addrs
proc fullAddrs*(p: PeerInfo): MaResult[seq[MultiAddress]] =
let peerIdPart = ? MultiAddress.init(multiCodec("p2p"), p.peerId.data)
let peerIdPart = ?MultiAddress.init(multiCodec("p2p"), p.peerId.data)
var res: seq[MultiAddress]
for address in p.addrs:
res.add(? concat(address, peerIdPart))
res.add(?concat(address, peerIdPart))
ok(res)
proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
let p2pPart = ? ma[^1]
if ? p2pPart.protoCode != multiCodec("p2p"):
let p2pPart = ?ma[^1]
if ?p2pPart.protoCode != multiCodec("p2p"):
return err("Missing p2p part from multiaddress!")
let res = (
? PeerId.init(? p2pPart.protoArgument()).orErr("invalid peerid"),
? ma[0 .. ^2]
)
let res =
(?PeerId.init(?p2pPart.protoArgument()).orErr("invalid peerid"), ?ma[0 .. ^2])
ok(res)
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
parseFullAddress(? MultiAddress.init(ma))
parseFullAddress(?MultiAddress.init(ma))
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,
listenAddrs: openArray[MultiAddress] = [],
protocols: openArray[string] = [],
protoVersion: string = "",
agentVersion: string = "",
addressMappers = newSeq[AddressMapper](),
): PeerInfo
{.raises: [LPError].} =
let pubkey = try:
p: typedesc[PeerInfo],
key: PrivateKey,
listenAddrs: openArray[MultiAddress] = [],
protocols: openArray[string] = [],
protoVersion: string = "",
agentVersion: string = "",
addressMappers = newSeq[AddressMapper](),
): PeerInfo {.raises: [LPError].} =
let pubkey =
try:
key.getPublicKey().tryGet()
except CatchableError:
raise newException(PeerInfoError, "invalid private key")
@@ -118,7 +114,7 @@ proc new*(
agentVersion: agentVersion,
listenAddrs: @listenAddrs,
protocols: @protocols,
addressMappers: addressMappers
addressMappers: addressMappers,
)
return peerInfo

View File

@@ -29,7 +29,8 @@ import
./crypto/crypto,
./protocols/identify,
./protocols/protocol,
./peerid, ./peerinfo,
./peerid,
./peerinfo,
./routing_record,
./multiaddress,
./stream/connection,
@@ -41,7 +42,6 @@ type
#################
# Handler types #
#################
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [].}
#########
@@ -69,31 +69,26 @@ type
####################
# Peer store types #
####################
PeerStore* {.public.} = ref object
books: Table[string, BasePeerBook]
identify: Identify
capacity*: int
toClean*: seq[PeerId]
proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} =
T(
identify: identify,
capacity: capacity
)
proc new*(
T: type PeerStore, identify: Identify, capacity = 1000
): PeerStore {.public.} =
T(identify: identify, capacity: capacity)
#########################
# Generic Peer Book API #
#########################
proc `[]`*[T](peerBook: PeerBook[T],
peerId: PeerId): T {.public.} =
proc `[]`*[T](peerBook: PeerBook[T], peerId: PeerId): T {.public.} =
## Get all known metadata of a provided peer, or default(T) if missing
peerBook.book.getOrDefault(peerId)
proc `[]=`*[T](peerBook: PeerBook[T],
peerId: PeerId,
entry: T) {.public.} =
proc `[]=`*[T](peerBook: PeerBook[T], peerId: PeerId, entry: T) {.public.} =
## Set metadata for a given peerId.
peerBook.book[peerId] = entry
@@ -102,8 +97,7 @@ proc `[]=`*[T](peerBook: PeerBook[T],
for handler in peerBook.changeHandlers:
handler(peerId)
proc del*[T](peerBook: PeerBook[T],
peerId: PeerId): bool {.public.} =
proc del*[T](peerBook: PeerBook[T], peerId: PeerId): bool {.public.} =
## Delete the provided peer from the book. Returns whether the peer was in the book
if peerId notin peerBook.book:
@@ -122,7 +116,8 @@ proc addHandler*[T](peerBook: PeerBook[T], handler: PeerBookChangeHandler) {.pub
## Adds a callback that will be called everytime the book changes
peerBook.changeHandlers.add(handler)
proc len*[T](peerBook: PeerBook[T]): int {.public.} = peerBook.book.len
proc len*[T](peerBook: PeerBook[T]): int {.public.} =
peerBook.book.len
##################
# Peer Store API #
@@ -145,16 +140,12 @@ proc `[]`*[T](p: PeerStore, typ: type[T]): T {.public.} =
p.books[name] = result
return result
proc del*(peerStore: PeerStore,
peerId: PeerId) {.public.} =
proc del*(peerStore: PeerStore, peerId: PeerId) {.public.} =
## Delete the provided peer from every book.
for _, book in peerStore.books:
book.deletor(peerId)
proc updatePeerInfo*(
peerStore: PeerStore,
info: IdentifyInfo) =
proc updatePeerInfo*(peerStore: PeerStore, info: IdentifyInfo) =
if info.addrs.len > 0:
peerStore[AddressBook][info.peerId] = info.addrs
@@ -177,10 +168,7 @@ proc updatePeerInfo*(
if cleanupPos >= 0:
peerStore.toClean.delete(cleanupPos)
proc cleanup*(
peerStore: PeerStore,
peerId: PeerId) =
proc cleanup*(peerStore: PeerStore, peerId: PeerId) =
if peerStore.capacity == 0:
peerStore.del(peerId)
return
@@ -193,10 +181,7 @@ proc cleanup*(
peerStore.del(peerStore.toClean[0])
peerStore.toClean.delete(0)
proc identify*(
peerStore: PeerStore,
muxer: Muxer) {.async.} =
proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
@@ -209,7 +194,8 @@ proc identify*(
when defined(libp2p_agents_metrics):
var
knownAgent = "unknown"
shortAgent = info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
shortAgent =
info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
if KnownLibP2PAgentsSeq.contains(shortAgent):
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)

View File

@@ -21,14 +21,20 @@ const MaxMessageSize = 1'u shl 22
type
ProtoFieldKind* = enum
## Protobuf's field types enum
Varint, Fixed64, Length, StartGroup, EndGroup, Fixed32
Varint
Fixed64
Length
StartGroup
EndGroup
Fixed32
ProtoFlags* = enum
## Protobuf's encoding types
WithVarintLength, WithUint32BeLength, WithUint32LeLength
WithVarintLength
WithUint32BeLength
WithUint32LeLength
ProtoBuffer* = object
## Protobuf's message representation object
ProtoBuffer* = object ## Protobuf's message representation object
options: set[ProtoFlags]
buffer*: seq[byte]
offset*: int
@@ -39,8 +45,7 @@ type
wire*: ProtoFieldKind
index*: uint64
ProtoField* = object
## Protobuf's message field representation object
ProtoField* = object ## Protobuf's message field representation object
index*: int
case kind*: ProtoFieldKind
of Varint:
@@ -55,30 +60,33 @@ type
discard
ProtoError* {.pure.} = enum
VarintDecode,
MessageIncomplete,
BufferOverflow,
MessageTooBig,
BadWireType,
IncorrectBlob,
VarintDecode
MessageIncomplete
BufferOverflow
MessageTooBig
BadWireType
IncorrectBlob
RequiredFieldMissing
ProtoResult*[T] = Result[T, ProtoError]
ProtoScalar* = uint | uint32 | uint64 | zint | zint32 | zint64 |
hint | hint32 | hint64 | float32 | float64
ProtoScalar* =
uint | uint32 | uint64 | zint | zint32 | zint64 | hint | hint32 | hint64 | float32 |
float64
const
SupportedWireTypes* = @[
const SupportedWireTypes* =
@[
uint64(ProtoFieldKind.Varint),
uint64(ProtoFieldKind.Fixed64),
uint64(ProtoFieldKind.Length),
uint64(ProtoFieldKind.Fixed32)
uint64(ProtoFieldKind.Fixed32),
]
template checkFieldNumber*(i: int) =
doAssert((i > 0 and i < (1 shl 29)) and not(i >= 19000 and i <= 19999),
"Incorrect or reserved field number")
doAssert(
(i > 0 and i < (1 shl 29)) and not (i >= 19000 and i <= 19999),
"Incorrect or reserved field number",
)
template getProtoHeader*(index: int, wire: ProtoFieldKind): uint64 =
## Get protobuf's field header integer for ``index`` and ``wire``.
@@ -114,29 +122,34 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
vsizeof(getProtoHeader(field)) + sizeof(field.vfloat32)
of ProtoFieldKind.Length:
vsizeof(getProtoHeader(field)) + vsizeof(uint64(len(field.vbuffer))) +
len(field.vbuffer)
len(field.vbuffer)
else:
0
proc initProtoBuffer*(data: seq[byte], offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize): ProtoBuffer =
proc initProtoBuffer*(
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
result.buffer = data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(data: openArray[byte], offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize): ProtoBuffer =
proc initProtoBuffer*(
data: openArray[byte],
offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize,
): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(options: set[ProtoFlags] = {}, maxSize = MaxMessageSize): ProtoBuffer =
proc initProtoBuffer*(
options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
): ProtoBuffer =
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
result.buffer = newSeq[byte]()
result.options = options
@@ -152,37 +165,31 @@ proc initProtoBuffer*(options: set[ProtoFlags] = {}, maxSize = MaxMessageSize):
result.buffer.setLen(4)
result.offset = 4
proc write*[T: ProtoScalar](pb: var ProtoBuffer,
field: int, value: T) =
proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
checkFieldNumber(field)
var length = 0
when (T is uint64) or (T is uint32) or (T is uint) or
(T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Varint)) +
vsizeof(value)
when (T is uint64) or (T is uint32) or (T is uint) or (T is zint64) or (T is zint32) or
(T is zint) or (T is hint64) or (T is hint32) or (T is hint):
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Varint)) + vsizeof(value)
let header = ProtoFieldKind.Varint
elif T is float32:
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Fixed32)) +
sizeof(T)
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Fixed32)) + sizeof(T)
let header = ProtoFieldKind.Fixed32
elif T is float64:
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Fixed64)) +
sizeof(T)
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Fixed64)) + sizeof(T)
let header = ProtoFieldKind.Fixed64
pb.buffer.setLen(len(pb.buffer) + flength)
let hres = PB.putUVarint(pb.toOpenArray(), length,
getProtoHeader(field, header))
let hres = PB.putUVarint(pb.toOpenArray(), length, getProtoHeader(field, header))
doAssert(hres.isOk())
pb.offset += length
when (T is uint64) or (T is uint32) or (T is uint):
let vres = PB.putUVarint(pb.toOpenArray(), length, value)
doAssert(vres.isOk())
pb.offset += length
elif (T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
elif (T is zint64) or (T is zint32) or (T is zint) or (T is hint64) or (T is hint32) or
(T is hint):
let vres = putSVarint(pb.toOpenArray(), length, value)
doAssert(vres.isOk())
pb.offset += length
@@ -197,14 +204,14 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer,
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
value: openArray[T]) =
proc writePacked*[T: ProtoScalar](
pb: var ProtoBuffer, field: int, value: openArray[T]
) =
checkFieldNumber(field)
var length = 0
let dlength =
when (T is uint64) or (T is uint32) or (T is uint) or
(T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
when (T is uint64) or (T is uint32) or (T is uint) or (T is zint64) or (T is zint32) or
(T is zint) or (T is hint64) or (T is hint32) or (T is hint):
var res = 0
for item in value:
res += vsizeof(item)
@@ -228,8 +235,8 @@ proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
let vres = PB.putUVarint(pb.toOpenArray(), length, item)
doAssert(vres.isOk())
pb.offset += length
elif (T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
elif (T is zint64) or (T is zint32) or (T is zint) or (T is hint64) or (T is hint32) or
(T is hint):
length = 0
let vres = PB.putSVarint(pb.toOpenArray(), length, item)
doAssert(vres.isOk())
@@ -245,19 +252,19 @@ proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
proc write*[T: byte|char](pb: var ProtoBuffer, field: int,
value: openArray[T]) =
proc write*[T: byte | char](pb: var ProtoBuffer, field: int, value: openArray[T]) =
checkFieldNumber(field)
var length = 0
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Length)) +
vsizeof(uint64(len(value))) + len(value)
let flength =
vsizeof(getProtoHeader(field, ProtoFieldKind.Length)) + vsizeof(uint64(len(value))) +
len(value)
pb.buffer.setLen(len(pb.buffer) + flength)
let hres = PB.putUVarint(pb.toOpenArray(), length,
getProtoHeader(field, ProtoFieldKind.Length))
let hres = PB.putUVarint(
pb.toOpenArray(), length, getProtoHeader(field, ProtoFieldKind.Length)
)
doAssert(hres.isOk())
pb.offset += length
let lres = PB.putUVarint(pb.toOpenArray(), length,
uint64(len(value)))
let lres = PB.putUVarint(pb.toOpenArray(), length, uint64(len(value)))
doAssert(lres.isOk())
pb.offset += length
if len(value) > 0:
@@ -294,8 +301,7 @@ proc finish*(pb: var ProtoBuffer) =
doAssert(len(pb.buffer) > 0)
pb.offset = 0
proc getHeader(data: var ProtoBuffer,
header: var ProtoHeader): ProtoResult[void] =
proc getHeader(data: var ProtoBuffer, header: var ProtoHeader): ProtoResult[void] =
var length = 0
var hdr = 0'u64
if PB.getUVarint(data.toOpenArray(), length, hdr).isOk():
@@ -350,9 +356,9 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
of ProtoFieldKind.StartGroup, ProtoFieldKind.EndGroup:
err(ProtoError.BadWireType)
proc getValue[T: ProtoScalar](data: var ProtoBuffer,
header: ProtoHeader,
outval: var T): ProtoResult[void] =
proc getValue[T: ProtoScalar](
data: var ProtoBuffer, header: ProtoHeader, outval: var T
): ProtoResult[void] =
when (T is uint64) or (T is uint32) or (T is uint):
doAssert(header.wire == ProtoFieldKind.Varint)
var length = 0
@@ -363,8 +369,8 @@ proc getValue[T: ProtoScalar](data: var ProtoBuffer,
ok()
else:
err(ProtoError.VarintDecode)
elif (T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
elif (T is zint64) or (T is zint32) or (T is zint) or (T is hint64) or (T is hint32) or
(T is hint):
doAssert(header.wire == ProtoFieldKind.Varint)
var length = 0
var value = T(0)
@@ -391,9 +397,12 @@ proc getValue[T: ProtoScalar](data: var ProtoBuffer,
else:
err(ProtoError.MessageIncomplete)
proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
outBytes: var openArray[T],
outLength: var int): ProtoResult[void] =
proc getValue[T: byte | char](
data: var ProtoBuffer,
header: ProtoHeader,
outBytes: var openArray[T],
outLength: var int,
): ProtoResult[void] =
doAssert(header.wire == ProtoFieldKind.Length)
var length = 0
var bsize = 0'u64
@@ -420,8 +429,9 @@ proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
else:
err(ProtoError.VarintDecode)
proc getValue[T:seq[byte]|string](data: var ProtoBuffer, header: ProtoHeader,
outBytes: var T): ProtoResult[void] =
proc getValue[T: seq[byte] | string](
data: var ProtoBuffer, header: ProtoHeader, outBytes: var T
): ProtoResult[void] =
doAssert(header.wire == ProtoFieldKind.Length)
var length = 0
var bsize = 0'u64
@@ -443,20 +453,20 @@ proc getValue[T:seq[byte]|string](data: var ProtoBuffer, header: ProtoHeader,
else:
err(ProtoError.VarintDecode)
proc getField*[T: ProtoScalar](data: ProtoBuffer, field: int,
output: var T): ProtoResult[bool] =
proc getField*[T: ProtoScalar](
data: ProtoBuffer, field: int, output: var T
): ProtoResult[bool] =
checkFieldNumber(field)
var current: T
var res = false
var pb = data
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
? pb.getHeader(header)
?pb.getHeader(header)
let wireCheck =
when (T is uint64) or (T is uint32) or (T is uint) or
(T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
when (T is uint64) or (T is uint32) or (T is uint) or (T is zint64) or
(T is zint32) or (T is zint) or (T is hint64) or (T is hint32) or (T is hint):
header.wire == ProtoFieldKind.Varint
elif T is float32:
header.wire == ProtoFieldKind.Fixed32
@@ -474,9 +484,9 @@ proc getField*[T: ProtoScalar](data: ProtoBuffer, field: int,
else:
# We are ignoring wire types different from what we expect, because it
# is how `protoc` is working.
? pb.skipValue(header)
?pb.skipValue(header)
else:
? pb.skipValue(header)
?pb.skipValue(header)
if res:
output = current
@@ -484,16 +494,16 @@ proc getField*[T: ProtoScalar](data: ProtoBuffer, field: int,
else:
ok(false)
proc getField*[T: byte|char](data: ProtoBuffer, field: int,
output: var openArray[T],
outlen: var int): ProtoResult[bool] =
proc getField*[T: byte | char](
data: ProtoBuffer, field: int, output: var openArray[T], outlen: var int
): ProtoResult[bool] =
checkFieldNumber(field)
var pb = data
var res = false
outlen = 0
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
let hres = pb.getHeader(header)
if hres.isErr():
@@ -536,13 +546,14 @@ proc getField*[T: byte|char](data: ProtoBuffer, field: int,
else:
ok(false)
proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
output: var T): ProtoResult[bool] =
proc getField*[T: seq[byte] | string](
data: ProtoBuffer, field: int, output: var T
): ProtoResult[bool] =
checkFieldNumber(field)
var res = false
var pb = data
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
let hres = pb.getHeader(header)
if hres.isErr():
@@ -573,29 +584,32 @@ proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
else:
ok(false)
proc getField*(pb: ProtoBuffer, field: int,
output: var ProtoBuffer): ProtoResult[bool] {.inline.} =
proc getField*(
pb: ProtoBuffer, field: int, output: var ProtoBuffer
): ProtoResult[bool] {.inline.} =
var buffer: seq[byte]
if ? pb.getField(field, buffer):
if ?pb.getField(field, buffer):
output = initProtoBuffer(buffer)
ok(true)
else:
ok(false)
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
output: var T): ProtoResult[void] {.inline.} =
if ? pb.getField(field, output):
proc getRequiredField*[T](
pb: ProtoBuffer, field: int, output: var T
): ProtoResult[void] {.inline.} =
if ?pb.getField(field, output):
ok()
else:
err(RequiredFieldMissing)
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
proc getRepeatedField*[T: seq[byte] | string](
data: ProtoBuffer, field: int, output: var seq[T]
): ProtoResult[bool] =
checkFieldNumber(field)
var pb = data
output.setLen(0)
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
let hres = pb.getHeader(header)
if hres.isErr():
@@ -626,13 +640,14 @@ proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
else:
ok(false)
proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
proc getRepeatedField*[T: ProtoScalar](
data: ProtoBuffer, field: int, output: var seq[T]
): ProtoResult[bool] =
checkFieldNumber(field)
var pb = data
output.setLen(0)
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
let hres = pb.getHeader(header)
if hres.isErr():
@@ -640,8 +655,8 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
return err(hres.error)
if header.index == uint64(field):
if header.wire in {ProtoFieldKind.Varint, ProtoFieldKind.Fixed32,
ProtoFieldKind.Fixed64}:
if header.wire in
{ProtoFieldKind.Varint, ProtoFieldKind.Fixed32, ProtoFieldKind.Fixed64}:
var item: T
let vres = getValue(pb, header, item)
if vres.isOk():
@@ -665,20 +680,22 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
else:
ok(false)
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[void] {.inline.} =
if ? pb.getRepeatedField(field, output):
proc getRequiredRepeatedField*[T](
pb: ProtoBuffer, field: int, output: var seq[T]
): ProtoResult[void] {.inline.} =
if ?pb.getRepeatedField(field, output):
ok()
else:
err(RequiredFieldMissing)
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
proc getPackedRepeatedField*[T: ProtoScalar](
data: ProtoBuffer, field: int, output: var seq[T]
): ProtoResult[bool] =
checkFieldNumber(field)
var pb = data
output.setLen(0)
while not(pb.isEmpty()):
while not (pb.isEmpty()):
var header: ProtoHeader
let hres = pb.getHeader(header)
if hres.isErr():
@@ -692,15 +709,15 @@ proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
if ares.isOk():
var pbarr = initProtoBuffer(arritem)
let itemHeader =
when (T is uint64) or (T is uint32) or (T is uint) or
(T is zint64) or (T is zint32) or (T is zint) or
(T is hint64) or (T is hint32) or (T is hint):
when (T is uint64) or (T is uint32) or (T is uint) or (T is zint64) or
(T is zint32) or (T is zint) or (T is hint64) or (T is hint32) or
(T is hint):
ProtoHeader(wire: ProtoFieldKind.Varint)
elif T is float32:
ProtoHeader(wire: ProtoFieldKind.Fixed32)
elif T is float64:
ProtoHeader(wire: ProtoFieldKind.Fixed64)
while not(pbarr.isEmpty()):
while not (pbarr.isEmpty()):
var item: T
let vres = getValue(pbarr, itemHeader, item)
if vres.isOk():

View File

@@ -11,28 +11,29 @@
import stew/results
import chronos, chronicles
import ../../../switch,
../../../multiaddress,
../../../peerid
import ../../../switch, ../../../multiaddress, ../../../peerid
import core
logScope:
topics = "libp2p autonat"
type
AutonatClient* = ref object of RootObj
type AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
id: Opt.some(pid),
addrs: addrs
))).encode()
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
await conn.writeLp(pb.buffer)
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(autonatMsg: Opt[AutonatMsg]): AutonatDialResponse {.raises: [AutonatError].} =
method dialMe*(
self: AutonatClient,
switch: Switch,
pid: PeerId,
addrs: seq[MultiAddress] = newSeq[MultiAddress](),
): Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(
autonatMsg: Opt[AutonatMsg]
): AutonatDialResponse {.raises: [AutonatError].} =
autonatMsg.withValue(msg):
if msg.msgType == DialResponse:
msg.response.withValue(res):
@@ -47,24 +48,32 @@ method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[Mult
else:
await switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
raise newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
raise
newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
# To bypass maxConnectionsPerPeer
let incomingConnection = switch.connManager.expectConnection(pid, In)
if incomingConnection.failed() and incomingConnection.error of AlreadyExpectingConnectionError:
if incomingConnection.failed() and
incomingConnection.error of AlreadyExpectingConnectionError:
raise newException(AutonatError, incomingConnection.error.msg)
defer:
await conn.close()
incomingConnection.cancel() # Safer to always try to cancel cause we aren't sure if the peer dialled us or not
incomingConnection.cancel()
# Safer to always try to cancel cause we aren't sure if the peer dialled us or not
if incomingConnection.completed():
await (await incomingConnection).connection.close()
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
return
case response.status
of ResponseStatus.Ok:
response.ma.tryGet()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
raise newException(
AutonatUnreachableError, "Peer could not dial us back: " & response.text.get("")
)
else:
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))
raise newException(
AutonatError, "Bad status " & $response.status & " " & response.text.get("")
)

View File

@@ -11,9 +11,7 @@
import stew/[results, objects]
import chronos, chronicles
import ../../../multiaddress,
../../../peerid,
../../../errors
import ../../../multiaddress, ../../../peerid, ../../../errors
logScope:
topics = "libp2p autonat"
@@ -55,7 +53,9 @@ type
response*: Opt[AutonatDialResponse]
NetworkReachability* {.pure.} = enum
Unknown, NotReachable, Reachable
Unknown
NotReachable
Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
@@ -103,37 +103,39 @@ proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Opt[AutonatMsg] =
let pb = initProtoBuffer(buf)
if ? pb.getField(1, msgTypeOrd).toOpt() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
if ?pb.getField(1, msgTypeOrd).toOpt() and
not checkedEnumAssign(msg.msgType, msgTypeOrd):
return Opt.none(AutonatMsg)
if ? pb.getField(2, pbDial).toOpt():
if ?pb.getField(2, pbDial).toOpt():
var
pbPeerInfo: ProtoBuffer
dial: AutonatDial
let r4 = ? pbDial.getField(1, pbPeerInfo).toOpt()
let r4 = ?pbDial.getField(1, pbPeerInfo).toOpt()
var peerInfo: AutonatPeerInfo
if r4:
var pid: PeerId
let
r5 = ? pbPeerInfo.getField(1, pid).toOpt()
r6 = ? pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
if r5: peerInfo.id = Opt.some(pid)
r5 = ?pbPeerInfo.getField(1, pid).toOpt()
r6 = ?pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
if r5:
peerInfo.id = Opt.some(pid)
dial.peerInfo = Opt.some(peerInfo)
msg.dial = Opt.some(dial)
if ? pb.getField(3, pbResponse).toOpt():
if ?pb.getField(3, pbResponse).toOpt():
var
statusOrd: uint
text: string
ma: MultiAddress
response: AutonatDialResponse
if ? pbResponse.getField(1, statusOrd).optValue():
if ?pbResponse.getField(1, statusOrd).optValue():
if not checkedEnumAssign(response.status, statusOrd):
return Opt.none(AutonatMsg)
if ? pbResponse.getField(2, text).optValue():
if ?pbResponse.getField(2, text).optValue():
response.text = Opt.some(text)
if ? pbResponse.getField(3, ma).optValue():
if ?pbResponse.getField(3, ma).optValue():
response.ma = Opt.some(ma)
msg.response = Opt.some(response)
return Opt.some(msg)

View File

@@ -12,13 +12,14 @@
import std/[sets, sequtils]
import stew/results
import chronos, chronicles
import ../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../utils/[semaphore, future],
../../../errors
import
../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../utils/[semaphore, future],
../../../errors
import core
export core
@@ -26,33 +27,36 @@ export core
logScope:
topics = "libp2p autonat"
type
Autonat* = ref object of LPProtocol
sem: AsyncSemaphore
switch*: Switch
dialTimeout: Duration
type Autonat* = ref object of LPProtocol
sem: AsyncSemaphore
switch*: Switch
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
id: Opt.some(pid),
addrs: addrs
))).encode()
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
proc sendResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
let pb = AutonatDialResponse(
status: status,
text: if text == "": Opt.none(string) else: Opt.some(text),
ma: Opt.none(MultiAddress)
).encode()
status: status,
text:
if text == "":
Opt.none(string)
else:
Opt.some(text)
,
ma: Opt.none(MultiAddress),
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok,
text: Opt.some("Ok"),
ma: Opt.some(ma)
).encode()
status: ResponseStatus.Ok, text: Opt.some("Ok"), ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
@@ -60,12 +64,15 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
var futs: seq[Future[Opt[MultiAddress]]]
try:
# This is to bypass the per peer max connections limit
let outgoingConnection = autonat.switch.connManager.expectConnection(conn.peerId, Out)
if outgoingConnection.failed() and outgoingConnection.error of AlreadyExpectingConnectionError:
let outgoingConnection =
autonat.switch.connManager.expectConnection(conn.peerId, Out)
if outgoingConnection.failed() and
outgoingConnection.error of AlreadyExpectingConnectionError:
await conn.sendResponseError(DialRefused, outgoingConnection.error.msg)
return
# Safer to always try to cancel cause we aren't sure if the connection was established
defer: outgoingConnection.cancel()
defer:
outgoingConnection.cancel()
# tryDial is to bypass the global max connections limit
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
@@ -106,7 +113,8 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
return conn.sendResponseError(DialRefused, "Invalid observed address")
if isRelayed:
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
return
conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
let hostIp = observedAddr[0].valueOr:
return conn.sendResponseError(InternalError, "Wrong observed address")
if not IP.match(hostIp):
@@ -115,16 +123,20 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
addrs.incl(observedAddr)
trace "addrs received", addrs = peerInfo.addrs
for ma in peerInfo.addrs:
isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr: continue
let maFirst = ma[0].valueOr: continue
if not DNS_OR_IP.match(maFirst): continue
isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr:
continue
let maFirst = ma[0].valueOr:
continue
if not DNS_OR_IP.match(maFirst):
continue
try:
addrs.incl(
if maFirst == hostIp:
ma
else:
let maEnd = ma[1..^1].valueOr: continue
let maEnd = ma[1 ..^ 1].valueOr:
continue
hostIp & maEnd
)
except LPError as exc:
@@ -138,8 +150,11 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
trace "trying to dial", addrs = addrsSeq
return autonat.tryDial(conn, addrsSeq)
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc new*(
T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds
): T =
let autonat =
T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:

View File

@@ -23,7 +23,11 @@ export core.NetworkReachability
logScope:
topics = "libp2p autonatservice"
declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability confidence", labels = ["reachability"])
declarePublicGauge(
libp2p_autonat_reachability_confidence,
"autonat reachability confidence",
labels = ["reachability"],
)
type
AutonatService* = ref object of Service
@@ -44,19 +48,22 @@ type
dialTimeout: Duration
enableAddressMapper: bool
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Opt[float]): Future[void] {.gcsafe, raises: [].}
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, raises: [].}
proc new*(
T: typedesc[AutonatService],
autonatClient: AutonatClient,
rng: ref HmacDrbgContext,
scheduleInterval: Opt[Duration] = Opt.none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
dialTimeout = 30.seconds,
enableAddressMapper = true): T =
T: typedesc[AutonatService],
autonatClient: AutonatClient,
rng: ref HmacDrbgContext,
scheduleInterval: Opt[Duration] = Opt.none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
dialTimeout = 30.seconds,
enableAddressMapper = true,
): T =
return T(
scheduleInterval: scheduleInterval,
networkReachability: Unknown,
@@ -69,7 +76,8 @@ proc new*(
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
dialTimeout: dialTimeout,
enableAddressMapper: enableAddressMapper)
enableAddressMapper: enableAddressMapper,
)
proc callHandler(self: AutonatService) {.async.} =
if not isNil(self.statusAndConfidenceHandler):
@@ -82,8 +90,9 @@ proc hasEnoughIncomingSlots(switch: Switch): bool =
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool] {.async.} =
proc handleAnswer(
self: AutonatService, ans: NetworkReachability
): Future[bool] {.async.} =
if ans == Unknown:
return
@@ -99,17 +108,26 @@ proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool]
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
libp2p_autonat_reachability_confidence.set(
value = confidence, labelValues = [$reachability]
)
if self.confidence.isNone and confidence >= self.minConfidence:
self.networkReachability = reachability
self.confidence = Opt.some(confidence)
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
debug "Current status",
currentStats = $self.networkReachability,
confidence = $self.confidence,
answers = self.answers
# Return whether anything has changed
return self.networkReachability != oldNetworkReachability or self.confidence != oldConfidence
return
self.networkReachability != oldNetworkReachability or
self.confidence != oldConfidence
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
proc askPeer(
self: AutonatService, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async.} =
logScope:
peerId = $peerId
@@ -117,7 +135,8 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo
return Unknown
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peer", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
debug "No incoming slots available, not asking peer",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
return Unknown
trace "Asking peer for reachability"
@@ -150,7 +169,8 @@ proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
if answersFromPeers >= self.numPeersToAsk:
break
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peers", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
debug "No incoming slots available, not asking peers",
incomingSlotsAvailable = switch.connManager.slotsAvailable(In)
break
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
@@ -160,10 +180,8 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
await service.run(switch)
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
self: AutonatService, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -171,24 +189,32 @@ proc addressMapper(
for listenAddr in listenAddrs:
var processedMA = listenAddr
try:
if not listenAddr.isPublicMA() and self.networkReachability == NetworkReachability.Reachable:
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
if not listenAddr.isPublicMA() and
self.networkReachability == NetworkReachability.Reachable:
processedMA = peerStore.guessDialableAddr(listenAddr)
# handle manual port forwarding
except CatchableError as exc:
debug "Error while handling address mapper", msg = exc.msg
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async.} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
@@ -207,11 +233,15 @@ method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public
self.scheduleHandle.cancel()
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
switch.connManager.removePeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =
proc statusAndConfidenceHandler*(
self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler
) =
self.statusAndConfidenceHandler = statusAndConfidenceHandler

View File

@@ -15,25 +15,26 @@ import stew/results
import chronos, chronicles
import core
import ../../protocol,
../../../stream/connection,
../../../switch,
../../../utils/future
import
../../protocol, ../../../stream/connection, ../../../switch, ../../../utils/future
export DcutrError
type
DcutrClient* = ref object
connectTimeout: Duration
maxDialableAddrs: int
type DcutrClient* = ref object
connectTimeout: Duration
maxDialableAddrs: int
logScope:
topics = "libp2p dcutrclient"
proc new*(T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc new*(
T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8
): T =
return T(connectTimeout: connectTimeout, maxDialableAddrs: maxDialableAddrs)
proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc startSync*(
self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]
) {.async.} =
logScope:
peerId = switch.peerInfo.peerId
@@ -43,7 +44,8 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
try:
var ourDialableAddrs = getHolePunchableAddrs(addrs)
if ourDialableAddrs.len == 0:
debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.", addrs
debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.",
addrs
return
stream = await switch.dial(remotePeerId, DcutrCodec)
@@ -54,7 +56,8 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
peerDialableAddrs = getHolePunchableAddrs(connectAnswer.addrs)
if peerDialableAddrs.len == 0:
debug "Dcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs
debug "Dcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.",
addrs = connectAnswer.addrs
return
let rttEnd = Moment.now()
@@ -65,25 +68,47 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
await sleepAsync(halfRtt)
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
peerDialableAddrs = peerDialableAddrs[0 ..< self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(
switch.connect(
stream.peerId,
@[it],
forceDial = true,
reuseConnection = false,
dir = Direction.In,
)
)
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."
finally:
for fut in futs: fut.cancel()
for fut in futs:
fut.cancel()
except CancelledError as err:
raise err
except AllFuturesFailedError as err:
debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts failed", err)
debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed",
peerDialableAddrs, msg = err.msg
raise newException(
DcutrError,
"Dcutr initiator could not connect to the remote peer, all connect attempts failed",
err,
)
except AsyncTimeoutError as err:
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out",
peerDialableAddrs, msg = err.msg
raise newException(
DcutrError,
"Dcutr initiator could not connect to the remote peer, all connect attempts timed out",
err,
)
except CatchableError as err:
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer", err = err.msg
raise newException(DcutrError, "Unexpected error when Dcutr initiator tried to connect to the remote peer", err)
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer",
err = err.msg
raise newException(
DcutrError,
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
)
finally:
if stream != nil:
await stream.close()

View File

@@ -14,14 +14,11 @@ import std/sequtils
import chronos
import stew/objects
import ../../../multiaddress,
../../../errors,
../../../stream/connection
import ../../../multiaddress, ../../../errors, ../../../stream/connection
export multiaddress
const
DcutrCodec* = "/libp2p/dcutr"
const DcutrCodec* = "/libp2p/dcutr"
type
MsgType* = enum
@@ -56,10 +53,12 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
proc getHolePunchableAddrs*(
addrs: seq[MultiAddress]
): seq[MultiAddress] {.raises: [LPError].} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0 .. 1].tryGet())
return result

View File

@@ -14,10 +14,8 @@ import stew/[results, objects]
import chronos, chronicles
import core
import ../../protocol,
../../../stream/connection,
../../../switch,
../../../utils/future
import
../../protocol, ../../../stream/connection, ../../../switch, ../../../utils/future
export chronicles
@@ -26,21 +24,28 @@ type Dcutr* = ref object of LPProtocol
logScope:
topics = "libp2p dcutr"
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc new*(
T: typedesc[Dcutr],
switch: Switch,
connectTimeout = 15.seconds,
maxDialableAddrs = 8,
): T =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
debug "Dcutr receiver received a Connect message.", connectMsg
var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable
var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts()
# likely empty when the peer is reachable
if ourAddrs.len == 0:
# this list should be the same as the peer's public addrs when it is reachable
ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
ourAddrs =
switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
var ourDialableAddrs = getHolePunchableAddrs(ourAddrs)
if ourDialableAddrs.len == 0:
debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.", ourAddrs
debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.",
ourAddrs
return
await stream.send(MsgType.Connect, ourAddrs)
@@ -50,17 +55,27 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
peerDialableAddrs = getHolePunchableAddrs(connectMsg.addrs)
if peerDialableAddrs.len == 0:
debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectMsg.addrs
debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr.",
addrs = connectMsg.addrs
return
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
peerDialableAddrs = peerDialableAddrs[0 ..< maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(
switch.connect(
stream.peerId,
@[it],
forceDial = true,
reuseConnection = false,
dir = Direction.Out,
)
)
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."
finally:
for fut in futs: fut.cancel()
for fut in futs:
fut.cancel()
except CancelledError as err:
raise err
except AllFuturesFailedError as err:

View File

@@ -11,14 +11,15 @@
import times
import chronos, chronicles
import ./relay,
./messages,
./rconn,
./utils,
../../../peerinfo,
../../../switch,
../../../multiaddress,
../../../stream/connection
import
./relay,
./messages,
./rconn,
./utils,
../../../peerinfo,
../../../switch,
../../../multiaddress,
../../../stream/connection
logScope:
topics = "libp2p relay relay-client"
@@ -30,26 +31,28 @@ type
ReservationError* = object of RelayClientError
RelayV1DialError* = object of RelayClientError
RelayV2DialError* = object of RelayClientError
RelayClientAddConn* = proc(conn: Connection,
duration: uint32,
data: uint64): Future[void] {.gcsafe, raises: [].}
RelayClientAddConn* = proc(
conn: Connection, duration: uint32, data: uint64
): Future[void] {.gcsafe, raises: [].}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
Rsvp* = object
expire*: uint64 # required, Unix expiration time (UTC)
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
voucher*: Opt[Voucher] # optional, reservation voucher
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
voucher*: Opt[Voucher] # optional, reservation voucher
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {.async.} =
proc handleRelayedConnect(
cl: RelayClient, conn: Connection, msg: StopMessage
) {.async.} =
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
@@ -58,9 +61,7 @@ proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {
return
limitDuration = msg.limit.duration
limitData = msg.limit.data
msg = StopMessage(
msgType: StopMessageType.Status,
status: Opt.some(Ok))
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(Ok))
pb = encode(msg)
trace "incoming relay connection", src
@@ -72,24 +73,28 @@ proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {
await conn.writeLp(pb.buffer)
# This sound redundant but the callback could, in theory, be set to nil during
# conn.writeLp so it's safer to double check
if cl.onNewConnection != nil: await cl.onNewConnection(conn, limitDuration, limitData)
else: await conn.close()
if cl.onNewConnection != nil:
await cl.onNewConnection(conn, limitDuration, limitData)
else:
await conn.close()
proc reserve*(cl: RelayClient,
peerId: PeerId,
addrs: seq[MultiAddress] = @[]): Future[Rsvp] {.async.} =
proc reserve*(
cl: RelayClient, peerId: PeerId, addrs: seq[MultiAddress] = @[]
): Future[Rsvp] {.async.} =
let conn = await cl.switch.dial(peerId, addrs, RelayV2HopCodec)
defer: await conn.close()
defer:
await conn.close()
let
pb = encode(HopMessage(msgType: HopMessageType.Reserve))
msg = try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error writing or reading reservation message", exc=exc.msg
raise newException(ReservationError, exc.msg)
msg =
try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error writing or reading reservation message", exc = exc.msg
raise newException(ReservationError, exc.msg)
if msg.msgType != HopMessageType.Status:
raise newException(ReservationError, "Unexpected relay response type")
@@ -99,7 +104,7 @@ proc reserve*(cl: RelayClient,
let reservation = msg.reservation.valueOr:
raise newException(ReservationError, "Missing reservation information")
if reservation.expire > int64.high().uint64 or
now().utc > reservation.expire.int64.fromUnix.utc:
now().utc > reservation.expire.int64.fromUnix.utc:
raise newException(ReservationError, "Bad expiration date")
result.expire = reservation.expire
result.addrs = reservation.addrs
@@ -115,43 +120,49 @@ proc reserve*(cl: RelayClient,
result.limitData = msg.limit.data
proc dialPeerV1*(
cl: RelayClient,
conn: Connection,
dstPeerId: PeerId,
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
cl: RelayClient, conn: Connection, dstPeerId: PeerId, dstAddrs: seq[MultiAddress]
): Future[Connection] {.async.} =
var
msg = RelayMessage(
msgType: Opt.some(RelayType.Hop),
srcPeer: Opt.some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
srcPeer: Opt.some(
RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)
),
dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)),
)
pb = encode(msg)
trace "Dial peer", msgSend=msg
trace "Dial peer", msgSend = msg
try:
await conn.writeLp(pb.buffer)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error writing hop request", exc=exc.msg
trace "error writing hop request", exc = exc.msg
raise exc
let msgRcvFromRelayOpt = try:
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error reading stop response", exc=exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
let msgRcvFromRelayOpt =
try:
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error reading stop response", exc = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
raise newException(RelayV1DialError, "Hop can't open destination stream")
if msgRcvFromRelay.msgType.tryGet() != RelayType.Status:
raise newException(RelayV1DialError, "Hop can't open destination stream: wrong message type")
raise newException(
RelayV1DialError, "Hop can't open destination stream: wrong message type"
)
if msgRcvFromRelay.status.tryGet() != StatusV1.Success:
raise newException(RelayV1DialError, "Hop can't open destination stream: status failed")
raise newException(
RelayV1DialError, "Hop can't open destination stream: status failed"
)
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
@@ -164,21 +175,23 @@ proc dialPeerV2*(
cl: RelayClient,
conn: RelayConnection,
dstPeerId: PeerId,
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
dstAddrs: seq[MultiAddress],
): Future[Connection] {.async.} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
trace "Dial peer", p
let msgRcvFromRelay = try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error reading stop response", exc=exc.msg
raise newException(RelayV2DialError, exc.msg)
let msgRcvFromRelay =
try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error reading stop response", exc = exc.msg
raise newException(RelayV2DialError, exc.msg)
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")
@@ -198,7 +211,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
if msg.msgType == StopMessageType.Connect:
await cl.handleRelayedConnect(conn, msg)
else:
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
trace "Unexpected client / relayv2 handshake", msgType = msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
@@ -223,8 +236,10 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.}
await sendStatus(conn, StatusV1.Success)
# This sound redundant but the callback could, in theory, be set to nil during
# sendStatus(Success) so it's safer to double check
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
if cl.onNewConnection != nil:
await cl.onNewConnection(conn, 0, 0)
else:
await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
@@ -236,42 +251,54 @@ proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
case typ:
of RelayType.Hop:
if cl.canHop: await cl.handleHop(conn, msg)
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
of RelayType.Stop: await cl.handleStop(conn, msg)
of RelayType.CanHop:
if cl.canHop: await sendStatus(conn, StatusV1.Success)
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
case typ
of RelayType.Hop:
if cl.canHop:
await cl.handleHop(conn, msg)
else:
trace "Unexpected relay handshake", msgType=msg.msgType
await sendStatus(conn, StatusV1.MalformedMessage)
await sendStatus(conn, StatusV1.HopCantSpeakRelay)
of RelayType.Stop:
await cl.handleStop(conn, msg)
of RelayType.CanHop:
if cl.canHop:
await sendStatus(conn, StatusV1.Success)
else:
await sendStatus(conn, StatusV1.HopCantSpeakRelay)
else:
trace "Unexpected relay handshake", msgType = msg.msgType
await sendStatus(conn, StatusV1.MalformedMessage)
proc new*(T: typedesc[RelayClient], canHop: bool = false,
reservationTTL: times.Duration = DefaultReservationTTL,
limitDuration: uint32 = DefaultLimitDuration,
limitData: uint64 = DefaultLimitData,
heartbeatSleepTime: uint32 = DefaultHeartbeatSleepTime,
maxCircuit: int = MaxCircuit,
maxCircuitPerPeer: int = MaxCircuitPerPeer,
msgSize: int = RelayClientMsgSize,
circuitRelayV1: bool = false): T =
let cl = T(canHop: canHop,
reservationTTL: reservationTTL,
limit: Limit(duration: limitDuration, data: limitData),
heartbeatSleepTime: heartbeatSleepTime,
maxCircuit: maxCircuit,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc new*(
T: typedesc[RelayClient],
canHop: bool = false,
reservationTTL: times.Duration = DefaultReservationTTL,
limitDuration: uint32 = DefaultLimitDuration,
limitData: uint64 = DefaultLimitData,
heartbeatSleepTime: uint32 = DefaultHeartbeatSleepTime,
maxCircuit: int = MaxCircuit,
maxCircuitPerPeer: int = MaxCircuitPerPeer,
msgSize: int = RelayClientMsgSize,
circuitRelayV1: bool = false,
): T =
let cl = T(
canHop: canHop,
reservationTTL: reservationTTL,
limit: Limit(duration: limitDuration, data: limitData),
heartbeatSleepTime: heartbeatSleepTime,
maxCircuit: maxCircuit,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)
of RelayV2StopCodec: await cl.handleStopStreamV2(conn)
of RelayV2HopCodec: await cl.handleHopStreamV2(conn)
case proto
of RelayV1Codec:
await cl.handleStreamV1(conn)
of RelayV2StopCodec:
await cl.handleStopStreamV2(conn)
of RelayV2HopCodec:
await cl.handleHopStreamV2(conn)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -281,8 +308,9 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
await conn.close()
cl.handler = handleStream
cl.codecs = if cl.canHop:
@[RelayV1Codec, RelayV2HopCodec, RelayV2StopCodec]
else:
@[RelayV1Codec, RelayV2StopCodec]
cl.codecs =
if cl.canHop:
@[RelayV1Codec, RelayV2HopCodec, RelayV2StopCodec]
else:
@[RelayV1Codec, RelayV2StopCodec]
cl

View File

@@ -11,8 +11,7 @@
import macros
import stew/[objects, results]
import ../../../peerinfo,
../../../signed_envelope
import ../../../peerinfo, ../../../signed_envelope
# Circuit Relay V1 Message
@@ -87,22 +86,22 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
let pb = initProtoBuffer(buf)
if ? pb.getField(1, msgTypeOrd).toOpt():
if ?pb.getField(1, msgTypeOrd).toOpt():
if msgTypeOrd.int notin RelayType:
return Opt.none(RelayMessage)
rMsg.msgType = Opt.some(RelayType(msgTypeOrd))
if ? pb.getField(2, pbSrc).toOpt():
discard ? pbSrc.getField(1, src.peerId).toOpt()
discard ? pbSrc.getRepeatedField(2, src.addrs).toOpt()
if ?pb.getField(2, pbSrc).toOpt():
discard ?pbSrc.getField(1, src.peerId).toOpt()
discard ?pbSrc.getRepeatedField(2, src.addrs).toOpt()
rMsg.srcPeer = Opt.some(src)
if ? pb.getField(3, pbDst).toOpt():
discard ? pbDst.getField(1, dst.peerId).toOpt()
discard ? pbDst.getRepeatedField(2, dst.addrs).toOpt()
if ?pb.getField(3, pbDst).toOpt():
discard ?pbDst.getField(1, dst.peerId).toOpt()
discard ?pbDst.getRepeatedField(2, dst.addrs).toOpt()
rMsg.dstPeer = Opt.some(dst)
if ? pb.getField(4, statusOrd).toOpt():
if ?pb.getField(4, statusOrd).toOpt():
var status: StatusV1
if not checkedEnumAssign(status, statusOrd):
return Opt.none(RelayMessage)
@@ -111,19 +110,18 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
# Voucher
type
Voucher* = object
relayPeerId*: PeerId # peer ID of the relay
reservingPeerId*: PeerId # peer ID of the reserving peer
expiration*: uint64 # UNIX UTC expiration time for the reservation
type Voucher* = object
relayPeerId*: PeerId # peer ID of the relay
reservingPeerId*: PeerId # peer ID of the reserving peer
expiration*: uint64 # UNIX UTC expiration time for the reservation
proc decode*(_: typedesc[Voucher], buf: seq[byte]): Result[Voucher, ProtoError] =
let pb = initProtoBuffer(buf)
var v = Voucher()
? pb.getRequiredField(1, v.relayPeerId)
? pb.getRequiredField(2, v.reservingPeerId)
? pb.getRequiredField(3, v.expiration)
?pb.getRequiredField(1, v.relayPeerId)
?pb.getRequiredField(2, v.reservingPeerId)
?pb.getRequiredField(3, v.expiration)
ok(v)
@@ -137,20 +135,23 @@ proc encode*(v: Voucher): seq[byte] =
pb.finish()
pb.buffer
proc init*(T: typedesc[Voucher],
relayPeerId: PeerId,
reservingPeerId: PeerId,
expiration: uint64): T =
proc init*(
T: typedesc[Voucher],
relayPeerId: PeerId,
reservingPeerId: PeerId,
expiration: uint64,
): T =
T(
relayPeerId = relayPeerId,
reservingPeerId = reservingPeerId,
expiration: expiration
relayPeerId = relayPeerId, reservingPeerId = reservingPeerId, expiration: expiration
)
type SignedVoucher* = SignedPayload[Voucher]
proc payloadDomain*(_: typedesc[Voucher]): string = "libp2p-relay-rsvp"
proc payloadType*(_: typedesc[Voucher]): seq[byte] = @[ (byte)0x03, (byte)0x02 ]
proc payloadDomain*(_: typedesc[Voucher]): string =
"libp2p-relay-rsvp"
proc payloadType*(_: typedesc[Voucher]): seq[byte] =
@[(byte) 0x03, (byte) 0x02]
proc checkValid*(spr: SignedVoucher): Result[void, EnvelopeError] =
if not spr.data.relayPeerId.match(spr.envelope.publicKey):
@@ -164,13 +165,15 @@ type
Peer* = object
peerId*: PeerId
addrs*: seq[MultiAddress]
Reservation* = object
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
svoucher*: Opt[seq[byte]] # optional, reservation voucher
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
svoucher*: Opt[seq[byte]] # optional, reservation voucher
Limit* = object
duration*: uint32 # seconds
data*: uint64 # bytes
duration*: uint32 # seconds
data*: uint64 # bytes
StatusV2* = enum
Ok = 100
@@ -181,10 +184,12 @@ type
NoReservation = 204
MalformedMessage = 400
UnexpectedMessage = 401
HopMessageType* {.pure.} = enum
Reserve = 0
Connect = 1
Status = 2
HopMessage* = object
msgType*: HopMessageType
peer*: Opt[Peer]
@@ -214,8 +219,10 @@ proc encode*(msg: HopMessage): ProtoBuffer =
pb.write(3, rpb.buffer)
if msg.limit.duration > 0 or msg.limit.data > 0:
var lpb = initProtoBuffer()
if msg.limit.duration > 0: lpb.write(1, msg.limit.duration)
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
if msg.limit.duration > 0:
lpb.write(1, msg.limit.duration)
if msg.limit.data > 0:
lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(4, lpb.buffer)
msg.status.withValue(status):
@@ -229,35 +236,35 @@ proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Opt[HopMessage] =
let pb = initProtoBuffer(buf)
var msgTypeOrd: uint32
? pb.getRequiredField(1, msgTypeOrd).toOpt()
?pb.getRequiredField(1, msgTypeOrd).toOpt()
if not checkedEnumAssign(msg.msgType, msgTypeOrd):
return Opt.none(HopMessage)
var pbPeer: ProtoBuffer
if ? pb.getField(2, pbPeer).toOpt():
if ?pb.getField(2, pbPeer).toOpt():
var peer: Peer
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
?pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ?pbPeer.getRepeatedField(2, peer.addrs).toOpt()
msg.peer = Opt.some(peer)
var pbReservation: ProtoBuffer
if ? pb.getField(3, pbReservation).toOpt():
if ?pb.getField(3, pbReservation).toOpt():
var
svoucher: seq[byte]
reservation: Reservation
if ? pbReservation.getField(3, svoucher).toOpt():
if ?pbReservation.getField(3, svoucher).toOpt():
reservation.svoucher = Opt.some(svoucher)
? pbReservation.getRequiredField(1, reservation.expire).toOpt()
discard ? pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
?pbReservation.getRequiredField(1, reservation.expire).toOpt()
discard ?pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
msg.reservation = Opt.some(reservation)
var pbLimit: ProtoBuffer
if ? pb.getField(4, pbLimit).toOpt():
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
if ?pb.getField(4, pbLimit).toOpt():
discard ?pbLimit.getField(1, msg.limit.duration).toOpt()
discard ?pbLimit.getField(2, msg.limit.data).toOpt()
var statusOrd: uint32
if ? pb.getField(5, statusOrd).toOpt():
if ?pb.getField(5, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
return Opt.none(HopMessage)
@@ -270,13 +277,13 @@ type
StopMessageType* {.pure.} = enum
Connect = 0
Status = 1
StopMessage* = object
msgType*: StopMessageType
peer*: Opt[Peer]
limit*: Limit
status*: Opt[StatusV2]
proc encode*(msg: StopMessage): ProtoBuffer =
var pb = initProtoBuffer()
@@ -290,8 +297,10 @@ proc encode*(msg: StopMessage): ProtoBuffer =
pb.write(2, ppb.buffer)
if msg.limit.duration > 0 or msg.limit.data > 0:
var lpb = initProtoBuffer()
if msg.limit.duration > 0: lpb.write(1, msg.limit.duration)
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
if msg.limit.duration > 0:
lpb.write(1, msg.limit.duration)
if msg.limit.data > 0:
lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(3, lpb.buffer)
msg.status.withValue(status):
@@ -306,26 +315,25 @@ proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Opt[StopMessage] =
let pb = initProtoBuffer(buf)
var msgTypeOrd: uint32
? pb.getRequiredField(1, msgTypeOrd).toOpt()
?pb.getRequiredField(1, msgTypeOrd).toOpt()
if msgTypeOrd.int notin StopMessageType:
return Opt.none(StopMessage)
msg.msgType = StopMessageType(msgTypeOrd)
var pbPeer: ProtoBuffer
if ? pb.getField(2, pbPeer).toOpt():
if ?pb.getField(2, pbPeer).toOpt():
var peer: Peer
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
?pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ?pbPeer.getRepeatedField(2, peer.addrs).toOpt()
msg.peer = Opt.some(peer)
var pbLimit: ProtoBuffer
if ? pb.getField(3, pbLimit).toOpt():
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
if ?pb.getField(3, pbLimit).toOpt():
discard ?pbLimit.getField(1, msg.limit.duration).toOpt()
discard ?pbLimit.getField(2, msg.limit.data).toOpt()
var statusOrd: uint32
if ? pb.getField(4, statusOrd).toOpt():
if ?pb.getField(4, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
return Opt.none(StopMessage)

View File

@@ -13,24 +13,20 @@ import chronos
import ../../../stream/connection
type
RelayConnection* = ref object of Connection
conn*: Connection
limitDuration*: uint32
limitData*: uint64
dataSent*: uint64
type RelayConnection* = ref object of Connection
conn*: Connection
limitDuration*: uint32
limitData*: uint64
dataSent*: uint64
method readOnce*(
self: RelayConnection,
pbytes: pointer,
nbytes: int
self: RelayConnection, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
self.activity = true
self.conn.readOnce(pbytes, nbytes)
method write*(
self: RelayConnection,
msg: seq[byte]
self: RelayConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
self.dataSent.inc(msg.len)
if self.limitData != 0 and self.dataSent > self.limitData:
@@ -43,13 +39,15 @@ method closeImpl*(self: RelayConnection): Future[void] {.async: (raises: []).} =
await self.conn.close()
await procCall Connection(self).closeImpl()
method getWrapped*(self: RelayConnection): Connection = self.conn
method getWrapped*(self: RelayConnection): Connection =
self.conn
proc new*(
T: typedesc[RelayConnection],
conn: Connection,
limitDuration: uint32,
limitData: uint64): T =
limitData: uint64,
): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
@@ -59,5 +57,6 @@ proc new*(
await noCancel conn.join().wait(limitDuration.seconds())
except AsyncTimeoutError:
await conn.close()
asyncSpawn checkDurationConnection()
return rc

View File

@@ -13,18 +13,19 @@ import sequtils, tables
import chronos, chronicles
import ./messages,
./rconn,
./utils,
../../../peerinfo,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../stream/connection,
../../../protocols/protocol,
../../../errors,
../../../utils/heartbeat,
../../../signed_envelope
import
./messages,
./rconn,
./utils,
../../../peerinfo,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../stream/connection,
../../../protocols/protocol,
../../../errors,
../../../utils/heartbeat,
../../../signed_envelope
# TODO:
# * Eventually replace std/times by chronos/timer. Currently chronos/timer
@@ -54,47 +55,53 @@ type
# Relay Side
type
Relay* = ref object of LPProtocol
switch*: Switch
peerCount: CountTable[PeerId]
type Relay* = ref object of LPProtocol
switch*: Switch
peerCount: CountTable[PeerId]
# number of reservation (relayv2) + number of connection (relayv1)
maxCircuit*: int
# number of reservation (relayv2) + number of connection (relayv1)
maxCircuit*: int
maxCircuitPerPeer*: int
msgSize*: int
# RelayV1
isCircuitRelayV1*: bool
streamCount: int
# RelayV2
rsvp: Table[PeerId, DateTime]
reservationLoop: Future[void]
reservationTTL*: times.Duration
heartbeatSleepTime*: uint32
limit*: Limit
maxCircuitPerPeer*: int
msgSize*: int
# RelayV1
isCircuitRelayV1*: bool
streamCount: int
# RelayV2
rsvp: Table[PeerId, DateTime]
reservationLoop: Future[void]
reservationTTL*: times.Duration
heartbeatSleepTime*: uint32
limit*: Limit
# Relay V2
proc createReserveResponse(
r: Relay,
pid: PeerId,
expire: DateTime): Result[HopMessage, CryptoError] =
r: Relay, pid: PeerId, expire: DateTime
): Result[HopMessage, CryptoError] =
let
expireUnix = expire.toTime.toUnix.uint64
v = Voucher(relayPeerId: r.switch.peerInfo.peerId,
reservingPeerId: pid,
expiration: expireUnix)
sv = ? SignedVoucher.init(r.switch.peerInfo.privateKey, v)
ma = ? MultiAddress.init("/p2p/" & $r.switch.peerInfo.peerId).orErr(CryptoError.KeyError)
rsrv = Reservation(expire: expireUnix,
addrs: r.switch.peerInfo.addrs.mapIt(
? it.concat(ma).orErr(CryptoError.KeyError)),
svoucher: Opt.some(? sv.encode))
msg = HopMessage(msgType: HopMessageType.Status,
reservation: Opt.some(rsrv),
limit: r.limit,
status: Opt.some(Ok))
v = Voucher(
relayPeerId: r.switch.peerInfo.peerId,
reservingPeerId: pid,
expiration: expireUnix,
)
sv = ?SignedVoucher.init(r.switch.peerInfo.privateKey, v)
ma =
?MultiAddress.init("/p2p/" & $r.switch.peerInfo.peerId).orErr(
CryptoError.KeyError
)
rsrv = Reservation(
expire: expireUnix,
addrs: r.switch.peerInfo.addrs.mapIt(?it.concat(ma).orErr(CryptoError.KeyError)),
svoucher: Opt.some(?sv.encode),
)
msg = HopMessage(
msgType: HopMessageType.Status,
reservation: Opt.some(rsrv),
limit: r.limit,
status: Opt.some(Ok),
)
return ok(msg)
proc isRelayed*(conn: Connection): bool =
@@ -126,9 +133,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async.} =
r.rsvp[pid] = expire
await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async.} =
proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -150,38 +155,42 @@ proc handleConnect(r: Relay,
r.peerCount.inc(src, -1)
r.peerCount.inc(dst, -1)
if r.peerCount[src] > r.maxCircuitPerPeer or
r.peerCount[dst] > r.maxCircuitPerPeer:
trace "too many connections", src = r.peerCount[src],
dst = r.peerCount[dst],
max = r.maxCircuitPerPeer
if r.peerCount[src] > r.maxCircuitPerPeer or r.peerCount[dst] > r.maxCircuitPerPeer:
trace "too many connections",
src = r.peerCount[src], dst = r.peerCount[dst], max = r.maxCircuitPerPeer
await sendHopStatus(connSrc, ResourceLimitExceeded)
return
let connDst = try:
await r.switch.dial(dst, RelayV2StopCodec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error opening relay stream", dst, exc=exc.msg
await sendHopStatus(connSrc, ConnectionFailed)
return
let connDst =
try:
await r.switch.dial(dst, RelayV2StopCodec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error opening relay stream", dst, exc = exc.msg
await sendHopStatus(connSrc, ConnectionFailed)
return
defer:
await connDst.close()
proc sendStopMsg() {.async.} =
let stopMsg = StopMessage(msgType: StopMessageType.Connect,
peer: Opt.some(Peer(peerId: src, addrs: @[])),
limit: r.limit)
let stopMsg = StopMessage(
msgType: StopMessageType.Connect,
peer: Opt.some(Peer(peerId: src, addrs: @[])),
limit: r.limit,
)
await connDst.writeLp(encode(stopMsg).buffer)
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).valueOr:
raise newException(SendStopError, "Malformed message")
if msg.msgType != StopMessageType.Status:
raise newException(SendStopError, "Unexpected stop response, not a status message")
raise
newException(SendStopError, "Unexpected stop response, not a status message")
if msg.status.get(UnexpectedMessage) != Ok:
raise newException(SendStopError, "Relay stop failure")
await connSrc.writeLp(encode(HopMessage(msgType: HopMessageType.Status,
status: Opt.some(Ok))).buffer)
await connSrc.writeLp(
encode(HopMessage(msgType: HopMessageType.Status, status: Opt.some(Ok))).buffer
)
try:
await sendStopMsg()
except CancelledError as exc:
@@ -205,20 +214,24 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
await sendHopStatus(conn, MalformedMessage)
return
trace "relayv2 handle stream", msg = msg
case msg.msgType:
of HopMessageType.Reserve: await r.handleReserve(conn)
of HopMessageType.Connect: await r.handleConnect(conn, msg)
case msg.msgType
of HopMessageType.Reserve:
await r.handleReserve(conn)
of HopMessageType.Connect:
await r.handleConnect(conn, msg)
else:
trace "Unexpected relayv2 handshake", msgType=msg.msgType
trace "Unexpected relayv2 handshake", msgType = msg.msgType
await sendHopStatus(conn, MalformedMessage)
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
defer:
r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
trace "refusing connection; too many active circuit", streamCount = r.streamCount, rsvp = r.rsvp.len()
trace "refusing connection; too many active circuit",
streamCount = r.streamCount, rsvp = r.rsvp.len()
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
return
@@ -236,13 +249,14 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
trace "relay not connected to dst", dst
return err(StatusV1.HopNoConnToDst)
ok(msg)
let check = checkMsg()
if check.isErr:
await sendStatus(connSrc, check.error())
return
if r.peerCount[src.peerId] >= r.maxCircuitPerPeer or
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
trace "refusing connection; too many connection from src or to dst", src, dst
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
return
@@ -252,31 +266,32 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.peerCount.inc(src.peerId, -1)
r.peerCount.inc(dst.peerId, -1)
let connDst = try:
await r.switch.dial(dst.peerId, RelayV1Codec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error opening relay stream", dst, exc=exc.msg
await sendStatus(connSrc, StatusV1.HopCantDialDst)
return
let connDst =
try:
await r.switch.dial(dst.peerId, RelayV1Codec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error opening relay stream", dst, exc = exc.msg
await sendStatus(connSrc, StatusV1.HopCantDialDst)
return
defer:
await connDst.close()
let msgToSend = RelayMessage(
msgType: Opt.some(RelayType.Stop),
srcPeer: Opt.some(src),
dstPeer: Opt.some(dst))
msgType: Opt.some(RelayType.Stop), srcPeer: Opt.some(src), dstPeer: Opt.some(dst)
)
let msgRcvFromDstOpt = try:
await connDst.writeLp(encode(msgToSend).buffer)
RelayMessage.decode(await connDst.readLp(r.msgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error writing stop handshake or reading stop response", exc=exc.msg
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
let msgRcvFromDstOpt =
try:
await connDst.writeLp(encode(msgToSend).buffer)
RelayMessage.decode(await connDst.readLp(r.msgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "error writing stop handshake or reading stop response", exc = exc.msg
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
trace "error reading stop response", msg = msgRcvFromDstOpt
@@ -284,7 +299,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
return
if msgRcvFromDst.msgType.get(RelayType.Stop) != RelayType.Status or
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
trace "unexcepted relay stop response", msgRcvFromDst
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
@@ -303,44 +318,54 @@ proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
case typ:
of RelayType.Hop: await r.handleHop(conn, msg)
of RelayType.Stop: await sendStatus(conn, StatusV1.StopRelayRefused)
of RelayType.CanHop: await sendStatus(conn, StatusV1.Success)
else:
trace "Unexpected relay handshake", msgType=msg.msgType
await sendStatus(conn, StatusV1.MalformedMessage)
case typ
of RelayType.Hop:
await r.handleHop(conn, msg)
of RelayType.Stop:
await sendStatus(conn, StatusV1.StopRelayRefused)
of RelayType.CanHop:
await sendStatus(conn, StatusV1.Success)
else:
trace "Unexpected relay handshake", msgType = msg.msgType
await sendStatus(conn, StatusV1.MalformedMessage)
proc setup*(r: Relay, switch: Switch) =
r.switch = switch
r.switch.addPeerEventHandler(
proc (peerId: PeerId, event: PeerEvent) {.async.} =
r.rsvp.del(peerId),
Left)
proc(peerId: PeerId, event: PeerEvent) {.async.} =
r.rsvp.del(peerId)
,
Left,
)
proc new*(T: typedesc[Relay],
reservationTTL: times.Duration = DefaultReservationTTL,
limitDuration: uint32 = DefaultLimitDuration,
limitData: uint64 = DefaultLimitData,
heartbeatSleepTime: uint32 = DefaultHeartbeatSleepTime,
maxCircuit: int = MaxCircuit,
maxCircuitPerPeer: int = MaxCircuitPerPeer,
msgSize: int = RelayMsgSize,
circuitRelayV1: bool = false): T =
let r = T(reservationTTL: reservationTTL,
limit: Limit(duration: limitDuration, data: limitData),
heartbeatSleepTime: heartbeatSleepTime,
maxCircuit: maxCircuit,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc new*(
T: typedesc[Relay],
reservationTTL: times.Duration = DefaultReservationTTL,
limitDuration: uint32 = DefaultLimitDuration,
limitData: uint64 = DefaultLimitData,
heartbeatSleepTime: uint32 = DefaultHeartbeatSleepTime,
maxCircuit: int = MaxCircuit,
maxCircuitPerPeer: int = MaxCircuitPerPeer,
msgSize: int = RelayMsgSize,
circuitRelayV1: bool = false,
): T =
let r = T(
reservationTTL: reservationTTL,
limit: Limit(duration: limitDuration, data: limitData),
heartbeatSleepTime: heartbeatSleepTime,
maxCircuit: maxCircuit,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
of RelayV1Codec: await r.handleStreamV1(conn)
case proto
of RelayV2HopCodec:
await r.handleHopStreamV2(conn)
of RelayV1Codec:
await r.handleStreamV1(conn)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -349,8 +374,11 @@ proc new*(T: typedesc[Relay],
trace "exiting relayv2 handler", conn
await conn.close()
r.codecs = if r.isCircuitRelayV1: @[RelayV1Codec]
else: @[RelayV2HopCodec, RelayV1Codec]
r.codecs =
if r.isCircuitRelayV1:
@[RelayV1Codec]
else:
@[RelayV2HopCodec, RelayV1Codec]
r.handler = handleStream
r
@@ -361,9 +389,7 @@ proc deletesReservation(r: Relay) {.async.} =
if n > r.rsvp[k]:
r.rsvp.del(k)
method start*(
r: Relay
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
method start*(r: Relay): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not r.reservationLoop.isNil:

View File

@@ -13,21 +13,21 @@ import sequtils, strutils
import chronos, chronicles
import ./client,
./rconn,
./utils,
../../../switch,
../../../stream/connection,
../../../transports/transport
import
./client,
./rconn,
./utils,
../../../switch,
../../../stream/connection,
../../../transports/transport
logScope:
topics = "libp2p relay relay-transport"
type
RelayTransport* = ref object of Transport
client*: RelayClient
queue: AsyncQueue[Connection]
selfRunning: bool
type RelayTransport* = ref object of Transport
client*: RelayClient
queue: AsyncQueue[Connection]
selfRunning: bool
method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
if self.selfRunning:
@@ -35,11 +35,10 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
return
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
conn: Connection, duration: uint32 = 0, data: uint64 = 0
) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
@@ -57,7 +56,7 @@ method accept*(self: RelayTransport): Future[Connection] {.async.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
var
relayPeerId: PeerId
dstPeerId: PeerId
@@ -68,13 +67,12 @@ proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.}
trace "Dial", relayPeerId, dstPeerId
let conn = await self.client.switch.dial(
relayPeerId,
@[ relayAddrs ],
@[ RelayV2HopCodec, RelayV1Codec ])
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
var rc: RelayConnection
try:
case conn.protocol:
case conn.protocol
of RelayV1Codec:
return await self.client.dialPeerV1(conn, dstPeerId, @[])
of RelayV2HopCodec:
@@ -83,14 +81,16 @@ proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.}
except CancelledError as exc:
raise exc
except CatchableError as exc:
if not rc.isNil: await rc.close()
if not rc.isNil:
await rc.close()
raise exc
method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)

View File

@@ -10,8 +10,7 @@
{.push raises: [].}
import chronos, chronicles
import ./messages,
../../../stream/connection
import ./messages, ../../../stream/connection
logScope:
topics = "libp2p relay relay-utils"
@@ -22,19 +21,16 @@ const
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(
conn: Connection,
code: StatusV1
conn: Connection, code: StatusV1
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(
msgType: Opt.some(RelayType.Status), status: Opt.some(code))
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
conn.writeLp(pb.buffer)
proc sendHopStatus*(
conn: Connection,
code: StatusV2
conn: Connection, code: StatusV2
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
@@ -43,8 +39,7 @@ proc sendHopStatus*(
conn.writeLp(pb.buffer)
proc sendStopStatus*(
conn: Connection,
code: StatusV2
conn: Connection, code: StatusV2
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
let
@@ -53,8 +48,8 @@ proc sendStopStatus*(
conn.writeLp(pb.buffer)
proc bridge*(
connSrc: Connection,
connDst: Connection) {.async: (raises: [CancelledError]).} =
connSrc: Connection, connDst: Connection
) {.async: (raises: [CancelledError]).} =
const bufferSize = 4096
var
bufSrcToDst: array[bufferSize, byte]
@@ -67,9 +62,10 @@ proc bridge*(
try:
while not connSrc.closed() and not connDst.closed():
try: # https://github.com/status-im/nim-chronos/issues/516
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(futSrc, futDst)
except ValueError: raiseAssert("Futures list is not empty")
except ValueError:
raiseAssert("Futures list is not empty")
if futSrc.finished():
bufRead = await futSrc
if bufRead > 0:
@@ -91,7 +87,7 @@ proc bridge*(
trace "relay src closed connection", src = connSrc.peerId
if connDst.closed() or connDst.atEof():
trace "relay dst closed connection", dst = connDst.peerId
trace "relay error", exc=exc.msg
trace "relay error", exc = exc.msg
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
await futSrc.cancelAndWait()
await futDst.cancelAndWait()

View File

@@ -15,17 +15,18 @@
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import ../protobuf/minprotobuf,
../peerinfo,
../stream/connection,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
../observedaddrmanager
import
../protobuf/minprotobuf,
../peerinfo,
../stream/connection,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
../observedaddrmanager
export observedaddrmanager
@@ -59,11 +60,9 @@ type
sendSignedPeerRecord*: bool
observedAddrManager*: ObservedAddrManager
IdentifyPushHandler* = proc (
peer: PeerId,
newInfo: IdentifyInfo):
Future[void]
{.gcsafe, raises: [], public.}
IdentifyPushHandler* = proc(peer: PeerId, newInfo: IdentifyInfo): Future[void] {.
gcsafe, raises: [], public
.}
IdentifyPush* = ref object of LPProtocol
identifyHandler: IdentifyPushHandler
@@ -78,11 +77,11 @@ chronicles.expandIt(IdentifyInfo):
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if it.signedPeerRecord.isSome(): "Some"
else: "None"
if it.signedPeerRecord.isSome(): "Some" else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
{.raises: [].} =
proc encodeMsg(
peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool
): ProtoBuffer {.raises: [].} =
result = initProtoBuffer()
let pkey = peerInfo.publicKey
@@ -96,10 +95,8 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: boo
result.write(4, observed.data.buffer)
let protoVersion = ProtoVersion
result.write(5, protoVersion)
let agentVersion = if peerInfo.agentVersion.len <= 0:
AgentVersion
else:
peerInfo.agentVersion
let agentVersion =
if peerInfo.agentVersion.len <= 0: AgentVersion else: peerInfo.agentVersion
result.write(6, agentVersion)
## Optionally populate signedPeerRecord field.
@@ -120,28 +117,28 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
signedPeerRecord: SignedPeerRecord
var pb = initProtoBuffer(buf)
if ? pb.getField(1, pubkey).toOpt():
if ?pb.getField(1, pubkey).toOpt():
iinfo.pubkey = some(pubkey)
if ? pb.getField(8, signedPeerRecord).toOpt() and
pubkey == signedPeerRecord.envelope.publicKey:
if ?pb.getField(8, signedPeerRecord).toOpt() and
pubkey == signedPeerRecord.envelope.publicKey:
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
discard ? pb.getRepeatedField(2, iinfo.addrs).toOpt()
discard ? pb.getRepeatedField(3, iinfo.protos).toOpt()
if ? pb.getField(4, oaddr).toOpt():
discard ?pb.getRepeatedField(2, iinfo.addrs).toOpt()
discard ?pb.getRepeatedField(3, iinfo.protos).toOpt()
if ?pb.getField(4, oaddr).toOpt():
iinfo.observedAddr = some(oaddr)
if ? pb.getField(5, protoVersion).toOpt():
if ?pb.getField(5, protoVersion).toOpt():
iinfo.protoVersion = some(protoVersion)
if ? pb.getField(6, agentVersion).toOpt():
if ?pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord,
@@ -167,20 +164,23 @@ method init*(p: Identify) =
p.handler = handle
p.codec = IdentifyCodec
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
proc identify*(
self: Identify, conn: Connection, remotePeerId: PeerId
): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
var message = await conn.readLp(64 * 1024)
if len(message) == 0:
trace "identify: Empty message received!", conn
raise newException(IdentityInvalidMsgError, "Empty message received!")
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
var info = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
pubkey = info.pubkey.valueOr:
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr:
raise newException(IdentityInvalidMsgError, $error)
if peer != remotePeerId:
trace "Peer ids don't match", remote = peer, local = remotePeerId
@@ -190,7 +190,8 @@ proc identify*(self: Identify,
info.observedAddr.withValue(observed):
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
if observed.contains(multiCodec("p2p-circuit")).get(false) or
P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
@@ -207,7 +208,7 @@ proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
var message = await conn.readLp(64 * 1024)
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")

View File

@@ -18,9 +18,12 @@ logScope:
type PerfClient* = ref object of RootObj
proc perf*(_: typedesc[PerfClient], conn: Connection,
sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
Future[Duration] {.async, public.} =
proc perf*(
_: typedesc[PerfClient],
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
): Future[Duration] {.async, public.} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
@@ -30,7 +33,7 @@ proc perf*(_: typedesc[PerfClient], conn: Connection,
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
await conn.write(buf[0 ..< toWrite])
size -= toWrite
await conn.close()

View File

@@ -13,10 +13,7 @@
import chronos, chronicles
import stew/endians2
import ./core,
../protocol,
../../stream/connection,
../../utility
import ./core, ../protocol, ../../stream/connection, ../../utility
export chronicles, connection
@@ -47,7 +44,7 @@ proc new*(T: typedesc[Perf]): T {.public.} =
var buf: array[PerfSize, byte]
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
await conn.write(buf[0 ..< toWrite])
size -= toWrite
except CancelledError as exc:
raise exc

View File

@@ -13,15 +13,16 @@
import chronos, chronicles
import bearssl/rand
import ../protobuf/minprotobuf,
../peerinfo,
../stream/connection,
../peerid,
../crypto/crypto,
../multiaddress,
../protocols/protocol,
../utility,
../errors
import
../protobuf/minprotobuf,
../peerinfo,
../stream/connection,
../peerid,
../crypto/crypto,
../multiaddress,
../protocols/protocol,
../utility,
../errors
export chronicles, rand, connection
@@ -36,16 +37,15 @@ type
PingError* = object of LPError
WrongPingAckError* = object of PingError
PingHandler* {.public.} = proc (
peer: PeerId):
Future[void]
{.gcsafe, raises: [].}
PingHandler* {.public.} = proc(peer: PeerId): Future[void] {.gcsafe, raises: [].}
Ping* = ref object of LPProtocol
pingHandler*: PingHandler
rng: ref HmacDrbgContext
proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContext = newRng()): T {.public.} =
proc new*(
T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContext = newRng()
): T {.public.} =
let ping = Ping(pinghandler: handler, rng: rng)
ping.init()
ping
@@ -68,10 +68,7 @@ method init*(p: Ping) =
p.handler = handle
p.codec = PingCodec
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, public.} =
proc ping*(p: Ping, conn: Connection): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn
@@ -93,7 +90,7 @@ proc ping*(
trace "got ping response", conn, responseDur
for i in 0..<randomBuf.len:
for i in 0 ..< randomBuf.len:
if randomBuf[i] != resultBuf[i]:
raise newException(WrongPingAckError, "Incorrect ping data from peer!")

View File

@@ -14,24 +14,21 @@ import ../stream/connection
export results
const
DefaultMaxIncomingStreams* = 10
const DefaultMaxIncomingStreams* = 10
type
LPProtoHandler* = proc (
conn: Connection,
proto: string): Future[void] {.async.}
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.async.}
LPProtocol* = ref object of RootObj
codecs*: seq[string]
handlerImpl: LPProtoHandler ## invoked by the protocol negotiator
handlerImpl: LPProtoHandler ## invoked by the protocol negotiator
started*: bool
maxIncomingStreams: Opt[int]
method init*(p: LPProtocol) {.base, gcsafe.} = discard
method init*(p: LPProtocol) {.base, gcsafe.} =
discard
method start*(
p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
method start*(p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
let fut = newFuture[void]()
fut.complete()
p.started = true
@@ -61,8 +58,7 @@ func `codec=`*(p: LPProtocol, codec: string) =
template `handler`*(p: LPProtocol): LPProtoHandler =
p.handlerImpl
template `handler`*(
p: LPProtocol, conn: Connection, proto: string): Future[void] =
template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void] =
p.handlerImpl(conn, proto)
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
@@ -76,33 +72,37 @@ func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
# https://github.com/nim-lang/Nim/issues/23432
func `handler=`*[E](
p: LPProtocol,
handler: proc (
conn: Connection,
proto: string): InternalRaisesFuture[void, E]) =
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
) =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
p.handlerImpl = wrap
proc new*(
T: type LPProtocol,
codecs: seq[string],
handler: LPProtoHandler,
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
maxIncomingStreams: Opt[int] | int = Opt.none(int),
): T =
T(
codecs: codecs,
handlerImpl: handler,
maxIncomingStreams:
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
else: maxIncomingStreams
when maxIncomingStreams is int:
Opt.some(maxIncomingStreams)
else:
maxIncomingStreams
,
)
proc new*[E](
T: type LPProtocol,
codecs: seq[string],
handler: proc (
conn: Connection,
proto: string): InternalRaisesFuture[void, E],
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
maxIncomingStreams: Opt[int] | int = Opt.none(int),
): T =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
T.new(codec, wrap, maxIncomingStreams)

View File

@@ -3,6 +3,7 @@
import ../../utility
type
ValidationResult* {.pure, public.} = enum
Accept, Reject, Ignore
type ValidationResult* {.pure, public.} = enum
Accept
Reject
Ignore

View File

@@ -11,17 +11,18 @@
import std/[sets, hashes, tables]
import chronos, chronicles, metrics
import ./pubsub,
./pubsubpeer,
./timedcache,
./peertable,
./rpc/[message, messages, protobuf],
nimcrypto/[hash, sha2],
../../crypto/crypto,
../../stream/connection,
../../peerid,
../../peerinfo,
../../utility
import
./pubsub,
./pubsubpeer,
./timedcache,
./peertable,
./rpc/[message, messages, protobuf],
nimcrypto/[hash, sha2],
../../crypto/crypto,
../../stream/connection,
../../peerid,
../../peerinfo,
../../utility
## Simple flood-based publishing.
@@ -30,17 +31,16 @@ logScope:
const FloodSubCodec* = "/floodsub/1.0.0"
type
FloodSub* {.public.} = ref object of PubSub
floodsub*: PeerTable # topic to remote peer map
seen*: TimedCache[SaltedId]
# Early filter for messages recently observed on the network
# We use a salted id because the messages in this cache have not yet
# been validated meaning that an attacker has greater control over the
# hash key and therefore could poison the table
seenSalt*: sha256
# The salt in this case is a partially updated SHA256 context pre-seeded
# with some random data
type FloodSub* {.public.} = ref object of PubSub
floodsub*: PeerTable # topic to remote peer map
seen*: TimedCache[SaltedId]
# Early filter for messages recently observed on the network
# We use a salted id because the messages in this cache have not yet
# been validated meaning that an attacker has greater control over the
# hash key and therefore could poison the table
seenSalt*: sha256
# The salt in this case is a partially updated SHA256 context pre-seeded
# with some random data
proc salt*(f: FloodSub, msgId: MessageId): SaltedId =
var tmp = f.seenSalt
@@ -57,10 +57,7 @@ proc addSeen*(f: FloodSub, saltedId: SaltedId): bool =
proc firstSeen*(f: FloodSub, saltedId: SaltedId): Moment =
f.seen.addedAt(saltedId)
proc handleSubscribe(f: FloodSub,
peer: PubSubPeer,
topic: string,
subscribe: bool) =
proc handleSubscribe(f: FloodSub, peer: PubSubPeer, topic: string, subscribe: bool) =
logScope:
peer
topic
@@ -73,7 +70,8 @@ proc handleSubscribe(f: FloodSub,
trace "ignoring unknown peer"
return
if subscribe and not(isNil(f.subscriptionValidator)) and not(f.subscriptionValidator(topic)):
if subscribe and not (isNil(f.subscriptionValidator)) and
not (f.subscriptionValidator(topic)):
# this is a violation, so warn should be in order
warn "ignoring invalid topic subscription", topic, peer
return
@@ -103,9 +101,7 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
procCall PubSub(f).unsubscribePeer(peer)
method rpcHandler*(f: FloodSub,
peer: PubSubPeer,
data: seq[byte]) {.async.} =
method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
raise newException(CatchableError, "Peer msg couldn't be decoded")
@@ -114,11 +110,13 @@ method rpcHandler*(f: FloodSub,
# trigger hooks
peer.recvObservers(rpcMsg)
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
for i in 0 ..< min(f.topicsHigh, rpcMsg.subscriptions.len):
template sub(): untyped =
rpcMsg.subscriptions[i]
f.handleSubscribe(peer, sub.topic, sub.subscribe)
for msg in rpcMsg.messages: # for every message
for msg in rpcMsg.messages: # for every message
let msgIdResult = f.msgIdProvider(msg)
if msgIdResult.isErr:
debug "Dropping message due to failed message id generation",
@@ -194,9 +192,7 @@ method init*(f: FloodSub) =
f.handler = handler
f.codec = FloodSubCodec
method publish*(f: FloodSub,
topic: string,
data: seq[byte]): Future[int] {.async.} =
method publish*(f: FloodSub, topic: string, data: seq[byte]): Future[int] {.async.} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)
@@ -220,12 +216,10 @@ method publish*(f: FloodSub,
inc f.msgSeqno
Message.init(some(f.peerInfo), data, topic, some(f.msgSeqno), f.sign)
msgId = f.msgIdProvider(msg).valueOr:
trace "Error generating message id, skipping publish",
error = error
trace "Error generating message id, skipping publish", error = error
return 0
trace "Created new message",
msg = shortLog(msg), peers = peers.len, topic, msgId
trace "Created new message", msg = shortLog(msg), peers = peers.len, topic, msgId
if f.addSeen(f.salt(msgId)):
# custom msgid providers might cause this
@@ -242,8 +236,7 @@ method publish*(f: FloodSub,
return peers.len
method initPubSub*(f: FloodSub)
{.raises: [InitializationError].} =
method initPubSub*(f: FloodSub) {.raises: [InitializationError].} =
procCall PubSub(f).initPubSub()
f.seen = TimedCache[SaltedId].init(2.minutes)
f.seenSalt.init()

View File

@@ -14,19 +14,20 @@
import std/[sets, sequtils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./pubsub,
./floodsub,
./pubsubpeer,
./peertable,
./mcache,
./timedcache,
./rpc/[messages, message, protobuf],
../protocol,
../../stream/connection,
../../peerinfo,
../../peerid,
../../utility,
../../switch
import
./pubsub,
./floodsub,
./pubsubpeer,
./peertable,
./mcache,
./timedcache,
./rpc/[messages, message, protobuf],
../protocol,
../../stream/connection,
../../peerinfo,
../../peerid,
../../utility,
../../switch
import stew/results
export results
@@ -39,95 +40,112 @@ logScope:
topics = "libp2p gossipsub"
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
declareCounter(libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant")
declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizations", labels=["kind"])
declareCounter(
libp2p_gossipsub_invalid_topic_subscription,
"number of invalid topic subscriptions that happened",
)
declareCounter(
libp2p_gossipsub_duplicate_during_validation,
"number of duplicates received during message validation",
)
declareCounter(
libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant"
)
declareCounter(
libp2p_gossipsub_saved_bytes,
"bytes saved by gossipsub optimizations",
labels = ["kind"],
)
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
declareCounter(
libp2p_pubsub_received_messages,
"number of messages received",
labels = ["id", "topic"],
)
proc init*(
_: type[GossipSubParams],
pruneBackoff = 1.minutes,
unsubscribeBackoff = 5.seconds,
floodPublish = true,
gossipFactor: float64 = 0.25,
d = GossipSubD,
dLow = GossipSubDlo,
dHigh = GossipSubDhi,
dScore = GossipSubDlo,
dOut = GossipSubDlo - 1, # DLow - 1
dLazy = GossipSubD, # Like D,
heartbeatInterval = GossipSubHeartbeatInterval,
historyLength = GossipSubHistoryLength,
historyGossip = GossipSubHistoryGossip,
fanoutTTL = GossipSubFanoutTTL,
seenTTL = 2.minutes,
gossipThreshold = -100.0,
publishThreshold = -1000.0,
graylistThreshold = -10000.0,
opportunisticGraftThreshold = 0.0,
decayInterval = 1.seconds,
decayToZero = 0.01,
retainScore = 2.minutes,
appSpecificWeight = 0.0,
ipColocationFactorWeight = 0.0,
ipColocationFactorThreshold = 1.0,
behaviourPenaltyWeight = -1.0,
behaviourPenaltyDecay = 0.999,
directPeers = initTable[PeerId, seq[MultiAddress]](),
disconnectBadPeers = false,
enablePX = false,
bandwidthEstimatebps = 100_000_000, # 100 Mbps or 12.5 MBps
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit = false,
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue): GossipSubParams =
_: type[GossipSubParams],
pruneBackoff = 1.minutes,
unsubscribeBackoff = 5.seconds,
floodPublish = true,
gossipFactor: float64 = 0.25,
d = GossipSubD,
dLow = GossipSubDlo,
dHigh = GossipSubDhi,
dScore = GossipSubDlo,
dOut = GossipSubDlo - 1, # DLow - 1
dLazy = GossipSubD, # Like D,
heartbeatInterval = GossipSubHeartbeatInterval,
historyLength = GossipSubHistoryLength,
historyGossip = GossipSubHistoryGossip,
fanoutTTL = GossipSubFanoutTTL,
seenTTL = 2.minutes,
gossipThreshold = -100.0,
publishThreshold = -1000.0,
graylistThreshold = -10000.0,
opportunisticGraftThreshold = 0.0,
decayInterval = 1.seconds,
decayToZero = 0.01,
retainScore = 2.minutes,
appSpecificWeight = 0.0,
ipColocationFactorWeight = 0.0,
ipColocationFactorThreshold = 1.0,
behaviourPenaltyWeight = -1.0,
behaviourPenaltyDecay = 0.999,
directPeers = initTable[PeerId, seq[MultiAddress]](),
disconnectBadPeers = false,
enablePX = false,
bandwidthEstimatebps = 100_000_000, # 100 Mbps or 12.5 MBps
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit = false,
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue,
): GossipSubParams =
GossipSubParams(
explicit: true,
pruneBackoff: pruneBackoff,
unsubscribeBackoff: unsubscribeBackoff,
floodPublish: floodPublish,
gossipFactor: gossipFactor,
d: d,
dLow: dLow,
dHigh: dHigh,
dScore: dScore,
dOut: dOut,
dLazy: dLazy,
heartbeatInterval: heartbeatInterval,
historyLength: historyLength,
historyGossip: historyGossip,
fanoutTTL: fanoutTTL,
seenTTL: seenTTL,
gossipThreshold: gossipThreshold,
publishThreshold: publishThreshold,
graylistThreshold: graylistThreshold,
opportunisticGraftThreshold: opportunisticGraftThreshold,
decayInterval: decayInterval,
decayToZero: decayToZero,
retainScore: retainScore,
appSpecificWeight: appSpecificWeight,
ipColocationFactorWeight: ipColocationFactorWeight,
ipColocationFactorThreshold: ipColocationFactorThreshold,
behaviourPenaltyWeight: behaviourPenaltyWeight,
behaviourPenaltyDecay: behaviourPenaltyDecay,
directPeers: directPeers,
disconnectBadPeers: disconnectBadPeers,
enablePX: enablePX,
bandwidthEstimatebps: bandwidthEstimatebps,
overheadRateLimit: overheadRateLimit,
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
)
explicit: true,
pruneBackoff: pruneBackoff,
unsubscribeBackoff: unsubscribeBackoff,
floodPublish: floodPublish,
gossipFactor: gossipFactor,
d: d,
dLow: dLow,
dHigh: dHigh,
dScore: dScore,
dOut: dOut,
dLazy: dLazy,
heartbeatInterval: heartbeatInterval,
historyLength: historyLength,
historyGossip: historyGossip,
fanoutTTL: fanoutTTL,
seenTTL: seenTTL,
gossipThreshold: gossipThreshold,
publishThreshold: publishThreshold,
graylistThreshold: graylistThreshold,
opportunisticGraftThreshold: opportunisticGraftThreshold,
decayInterval: decayInterval,
decayToZero: decayToZero,
retainScore: retainScore,
appSpecificWeight: appSpecificWeight,
ipColocationFactorWeight: ipColocationFactorWeight,
ipColocationFactorThreshold: ipColocationFactorThreshold,
behaviourPenaltyWeight: behaviourPenaltyWeight,
behaviourPenaltyDecay: behaviourPenaltyDecay,
directPeers: directPeers,
disconnectBadPeers: disconnectBadPeers,
enablePX: enablePX,
bandwidthEstimatebps: bandwidthEstimatebps,
overheadRateLimit: overheadRateLimit,
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
)
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
if (parameters.dOut >= parameters.dLow) or
(parameters.dOut > (parameters.d div 2)):
err("gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2")
if (parameters.dOut >= parameters.dLow) or (parameters.dOut > (parameters.d div 2)):
err(
"gossipsub: dOut parameter error, Number of outbound connections to keep in the mesh. Must be less than D_lo and at most D/2"
)
elif parameters.gossipThreshold >= 0:
err("gossipsub: gossipThreshold parameter error, Must be < 0")
elif parameters.unsubscribeBackoff.seconds <= 0:
@@ -163,17 +181,29 @@ proc validateParameters*(parameters: TopicParams): Result[void, cstring] =
elif parameters.timeInMeshCap <= 0.0:
err("gossipsub: timeInMeshCap parameter error, Should be a positive value")
elif parameters.firstMessageDeliveriesWeight <= 0.0:
err("gossipsub: firstMessageDeliveriesWeight parameter error, Should be a positive value")
err(
"gossipsub: firstMessageDeliveriesWeight parameter error, Should be a positive value"
)
elif parameters.meshMessageDeliveriesWeight >= 0.0:
err("gossipsub: meshMessageDeliveriesWeight parameter error, Should be a negative value")
err(
"gossipsub: meshMessageDeliveriesWeight parameter error, Should be a negative value"
)
elif parameters.meshMessageDeliveriesThreshold <= 0.0:
err("gossipsub: meshMessageDeliveriesThreshold parameter error, Should be a positive value")
err(
"gossipsub: meshMessageDeliveriesThreshold parameter error, Should be a positive value"
)
elif parameters.meshMessageDeliveriesCap < parameters.meshMessageDeliveriesThreshold:
err("gossipsub: meshMessageDeliveriesCap parameter error, Should be >= meshMessageDeliveriesThreshold")
err(
"gossipsub: meshMessageDeliveriesCap parameter error, Should be >= meshMessageDeliveriesThreshold"
)
elif parameters.meshFailurePenaltyWeight >= 0.0:
err("gossipsub: meshFailurePenaltyWeight parameter error, Should be a negative value")
err(
"gossipsub: meshFailurePenaltyWeight parameter error, Should be a negative value"
)
elif parameters.invalidMessageDeliveriesWeight >= 0.0:
err("gossipsub: invalidMessageDeliveriesWeight parameter error, Should be a negative value")
err(
"gossipsub: invalidMessageDeliveriesWeight parameter error, Should be a negative value"
)
else:
ok()
@@ -197,7 +227,7 @@ method init*(g: GossipSub) =
g.codecs &= GossipSubCodec_10
method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
g.withPeerStats(peer.peerId) do(stats: var PeerStats):
# Make sure stats and peer information match, even when reloading peer stats
# from a previous connection
peer.score = stats.score
@@ -210,7 +240,9 @@ method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
method onPubSubPeerEvent*(
p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent
) {.gcsafe.} =
case event.kind
of PubSubPeerEventKind.StreamOpened:
discard
@@ -266,10 +298,7 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
procCall FloodSub(g).unsubscribePeer(peer)
proc handleSubscribe(g: GossipSub,
peer: PubSubPeer,
topic: string,
subscribe: bool) =
proc handleSubscribe(g: GossipSub, peer: PubSubPeer, topic: string, subscribe: bool) =
logScope:
peer
topic
@@ -283,7 +312,7 @@ proc handleSubscribe(g: GossipSub,
trace "ignoring unknown peer"
return
if not(isNil(g.subscriptionValidator)) and not(g.subscriptionValidator(topic)):
if not (isNil(g.subscriptionValidator)) and not (g.subscriptionValidator(topic)):
# this is a violation, so warn should be in order
trace "ignoring invalid topic subscription", topic, peer
libp2p_gossipsub_invalid_topic_subscription.inc()
@@ -328,7 +357,6 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
isIWantNotEmpty = respControl.iwant.len > 0
if isPruneNotEmpty or isIWantNotEmpty:
if isIWantNotEmpty:
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
@@ -340,9 +368,7 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
trace "sending control message", msg = shortLog(respControl), peer
g.send(
peer,
RPCMsg(control: some(respControl)), isHighPriority = true)
g.send(peer, RPCMsg(control: some(respControl)), isHighPriority = true)
if messages.len > 0:
for smsg in messages:
@@ -354,21 +380,22 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
# iwant replies have lower priority
trace "sending iwant reply messages", peer
g.send(
peer,
RPCMsg(messages: messages), isHighPriority = false)
g.send(peer, RPCMsg(messages: messages), isHighPriority = false)
proc validateAndRelay(g: GossipSub,
msg: Message,
msgId: MessageId, saltedId: SaltedId,
peer: PubSubPeer) {.async.} =
proc validateAndRelay(
g: GossipSub, msg: Message, msgId: MessageId, saltedId: SaltedId, peer: PubSubPeer
) {.async.} =
try:
template topic: string = msg.topic
template topic(): string =
msg.topic
proc addToSendPeers(toSendPeers: var HashSet[PubSubPeer]) =
g.floodsub.withValue(topic, peers): toSendPeers.incl(peers[])
g.mesh.withValue(topic, peers): toSendPeers.incl(peers[])
g.subscribedDirectPeers.withValue(topic, peers): toSendPeers.incl(peers[])
g.floodsub.withValue(topic, peers):
toSendPeers.incl(peers[])
g.mesh.withValue(topic, peers):
toSendPeers.incl(peers[])
g.subscribedDirectPeers.withValue(topic, peers):
toSendPeers.incl(peers[])
toSendPeers.excl(peer)
if msg.data.len > max(512, msgId.len * 10):
@@ -383,16 +410,23 @@ proc validateAndRelay(g: GossipSub,
# small).
var toSendPeers = HashSet[PubSubPeer]()
addToSendPeers(toSendPeers)
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
idontwant: @[ControlIWant(messageIDs: @[msgId])]
))), isHighPriority = true)
g.broadcast(
toSendPeers,
RPCMsg(
control:
some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
),
isHighPriority = true,
)
let validation = await g.validate(msg)
var seenPeers: HashSet[PubSubPeer]
discard g.validationSeen.pop(saltedId, seenPeers)
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
libp2p_gossipsub_saved_bytes.inc(
(msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"]
)
case validation
of ValidationResult.Reject:
@@ -428,9 +462,12 @@ proc validateAndRelay(g: GossipSub,
if saltedId in heDontWant:
peersWhoSentIdontwant.incl(peer)
libp2p_gossipsub_idontwant_saved_messages.inc
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
libp2p_gossipsub_saved_bytes.inc(
msg.data.len.int64, labelValues = ["idontwant"]
)
break
toSendPeers.excl(peersWhoSentIdontwant) # avoids len(s) == length` the length of the HashSet changed while iterating over it [AssertionDefect]
toSendPeers.excl(peersWhoSentIdontwant)
# avoids len(s) == length` the length of the HashSet changed while iterating over it [AssertionDefect]
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
@@ -438,13 +475,17 @@ proc validateAndRelay(g: GossipSub,
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = [topic])
libp2p_pubsub_messages_rebroadcasted.inc(
toSendPeers.len.int64, labelValues = [topic]
)
else:
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = ["generic"])
libp2p_pubsub_messages_rebroadcasted.inc(
toSendPeers.len.int64, labelValues = ["generic"]
)
await handleData(g, topic, msg.data)
except CatchableError as exc:
info "validateAndRelay failed", msg=exc.msg
info "validateAndRelay failed", msg = exc.msg
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topic.len).foldl(a + b, 0)
@@ -459,7 +500,7 @@ proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
dataAndTopicsIdSize(msg.messages)
controlSize = msg.control.withValue(control):
byteSize(control.ihave) + byteSize(control.iwant)
do: # no control message
do:
0
msgSize - payloadSize - controlSize
@@ -467,15 +508,17 @@ proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(overhead):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent too much useless application data and it's above rate limit.", peer, overhead
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()])
# let's just measure at the beginning for test purposes.
debug "Peer sent too much useless application data and it's above rate limit.",
peer, overhead
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
raise newException(
PeerRateLimitError, "Peer disconnected because it's above rate limit."
)
method rpcHandler*(g: GossipSub,
peer: PubSubPeer,
data: seq[byte]) {.async.} =
method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
let msgSize = data.len
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
@@ -497,25 +540,29 @@ method rpcHandler*(g: GossipSub,
# trigger hooks - these may modify the message
peer.recvObservers(rpcMsg)
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
if rpcMsg.ping.len in 1 ..< 64 and peer.pingBudget > 0:
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
peer.pingBudget.dec
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
for i in 0 ..< min(g.topicsHigh, rpcMsg.subscriptions.len):
template sub(): untyped =
rpcMsg.subscriptions[i]
g.handleSubscribe(peer, sub.topic, sub.subscribe)
# the above call applied limits to subs number
# in gossipsub we want to apply scoring as well
if rpcMsg.subscriptions.len > g.topicsHigh:
debug "received an rpc message with an oversized amount of subscriptions", peer,
size = rpcMsg.subscriptions.len,
limit = g.topicsHigh
debug "received an rpc message with an oversized amount of subscriptions",
peer, size = rpcMsg.subscriptions.len, limit = g.topicsHigh
peer.behaviourPenalty += 0.1
for i in 0..<rpcMsg.messages.len(): # for every message
template msg: untyped = rpcMsg.messages[i]
template topic: string = msg.topic
for i in 0 ..< rpcMsg.messages.len(): # for every message
template msg(): untyped =
rpcMsg.messages[i]
template topic(): string =
msg.topic
let msgIdResult = g.msgIdProvider(msg)
@@ -551,7 +598,8 @@ method rpcHandler*(g: GossipSub,
# avoid processing messages we are not interested in
if topic notin g.topics:
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
debug "Dropping message of topic without subscription",
msgId = shortLog(msgId), peer
continue
if (msg.signature.len > 0 or g.verifySignature) and not msg.verify():
@@ -581,7 +629,7 @@ method rpcHandler*(g: GossipSub,
g.handleControl(peer, rpcMsg.control.unsafeGet())
# Now, check subscription to update the meshes if required
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
for i in 0 ..< min(g.topicsHigh, rpcMsg.subscriptions.len):
let topic = rpcMsg.subscriptions[i].topic
if topic in g.topics and g.mesh.peers(topic) < g.parameters.dLow:
# rebalance but don't update metrics here, we do that only in the heartbeat
@@ -604,11 +652,20 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Remove peers from the mesh since we're no longer both interested
# in the topic
let msg = RPCMsg(control: some(ControlMessage(
prune: @[ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
let msg = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.unsubscribeBackoff.seconds.uint64,
)
]
)
)
)
g.broadcast(mpeers, msg, isHighPriority = true)
for peer in mpeers:
@@ -619,9 +676,7 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(g: GossipSub,
topic: string,
data: seq[byte]): Future[int] {.async.} =
method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.async.} =
logScope:
topic
@@ -650,19 +705,23 @@ method publish*(g: GossipSub,
let maxPeersToFlood =
if g.parameters.bandwidthEstimatebps > 0:
let
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000
# Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
msToTransmit = max(data.len div bandwidth, 1)
max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow)
max(
g.parameters.heartbeatInterval.milliseconds div msToTransmit,
g.parameters.dLow,
)
else:
int.high() # unlimited
for peer in g.gossipsub.getOrDefault(topic):
if peers.len >= maxPeersToFlood: break
if peers.len >= maxPeersToFlood:
break
if peer.score >= g.parameters.publishThreshold:
trace "publish: including flood/high score peer", peer
peers.incl(peer)
elif peers.len < g.parameters.dLow:
# not subscribed or bad mesh, send to fanout peers
# when flood-publishing, fanout won't help since all potential peers have
@@ -675,7 +734,8 @@ method publish*(g: GossipSub,
for fanPeer in fanoutPeers:
peers.incl(fanPeer)
if peers.len > g.parameters.d: break
if peers.len > g.parameters.d:
break
# Attempting to publish counts as fanout send (even if the message
# ultimately is not sent)
@@ -683,9 +743,10 @@ method publish*(g: GossipSub,
if peers.len == 0:
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
debug "No peers for topic, skipping publish", peersOnTopic = topicPeers.len,
connectedPeers = topicPeers.filterIt(it.connected).len,
topic
debug "No peers for topic, skipping publish",
peersOnTopic = topicPeers.len,
connectedPeers = topicPeers.filterIt(it.connected).len,
topic
libp2p_gossipsub_failed_publish.inc()
return 0
@@ -697,12 +758,12 @@ method publish*(g: GossipSub,
inc g.msgSeqno
Message.init(some(g.peerInfo), data, topic, some(g.msgSeqno), g.sign)
msgId = g.msgIdProvider(msg).valueOr:
trace "Error generating message id, skipping publish",
error = error
trace "Error generating message id, skipping publish", error = error
libp2p_gossipsub_failed_publish.inc()
return 0
logScope: msgId = shortLog(msgId)
logScope:
msgId = shortLog(msgId)
trace "Created new message", msg = shortLog(msg), peers = peers.len
@@ -722,7 +783,7 @@ method publish*(g: GossipSub,
else:
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = ["generic"])
trace "Published message to peers", peers=peers.len
trace "Published message to peers", peers = peers.len
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
@@ -785,8 +846,7 @@ method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
g.heartbeatFut = nil
fut
method initPubSub*(g: GossipSub)
{.raises: [InitializationError].} =
method initPubSub*(g: GossipSub) {.raises: [InitializationError].} =
procCall FloodSub(g).initPubSub()
if not g.parameters.explicit:
@@ -802,13 +862,10 @@ method initPubSub*(g: GossipSub)
# init gossip stuff
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
method getOrCreatePeer*(
g: GossipSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
method getOrCreatePeer*(g: GossipSub, peerId: PeerId, protos: seq[string]): PubSubPeer =
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
peer.overheadRateLimitOpt =
Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
peer.maxNumElementsInNonPriorityQueue = g.parameters.maxNumElementsInNonPriorityQueue
return peer

View File

@@ -14,24 +14,55 @@ import chronos, chronicles, metrics
import "."/[types, scoring]
import ".."/[pubsubpeer, peertable, mcache, floodsub, pubsub]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
import
"../../.."/[
peerid,
multiaddress,
utility,
switch,
routing_record,
signed_envelope,
utils/heartbeat,
]
logScope:
topics = "libp2p gossipsub"
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"])
declareGauge(libp2p_gossipsub_peers_per_topic_fanout, "gossipsub peers per topic in fanout", labels = ["topic"])
declareGauge(libp2p_gossipsub_peers_per_topic_gossipsub, "gossipsub peers per topic in gossipsub", labels = ["topic"])
declareGauge(
libp2p_gossipsub_peers_per_topic_mesh,
"gossipsub peers per topic in mesh",
labels = ["topic"],
)
declareGauge(
libp2p_gossipsub_peers_per_topic_fanout,
"gossipsub peers per topic in fanout",
labels = ["topic"],
)
declareGauge(
libp2p_gossipsub_peers_per_topic_gossipsub,
"gossipsub peers per topic in gossipsub",
labels = ["topic"],
)
declareGauge(libp2p_gossipsub_under_dout_topics, "number of topics below dout")
declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no peers")
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
declareGauge(
libp2p_gossipsub_low_peers_topics,
"number of topics in mesh with at least one but below dlow peers",
)
declareGauge(
libp2p_gossipsub_healthy_peers_topics,
"number of topics in mesh with at least dlow peers (but below dhigh)",
)
declareCounter(
libp2p_gossipsub_above_dhigh_condition,
"number of above dhigh pruning branches ran",
labels = ["topic"],
)
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
g.withPeerStats(p.peerId) do (stats: var PeerStats):
g.withPeerStats(p.peerId) do(stats: var PeerStats):
var info = stats.topicInfos.getOrDefault(topic)
info.graftTime = Moment.now()
info.meshTime = 0.seconds
@@ -40,40 +71,40 @@ proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
stats.topicInfos[topic] = info
trace "grafted", peer=p, topic
trace "grafted", peer = p, topic
proc pruned*(g: GossipSub,
p: PubSubPeer,
topic: string,
setBackoff: bool = true,
backoff = none(Duration)) =
proc pruned*(
g: GossipSub,
p: PubSubPeer,
topic: string,
setBackoff: bool = true,
backoff = none(Duration),
) =
if setBackoff:
let
backoffDuration = backoff.get(g.parameters.pruneBackoff)
backoffMoment = Moment.fromNow(backoffDuration)
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[p.peerId] = backoffMoment
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[p.peerId] = backoffMoment
g.peerStats.withValue(p.peerId, stats):
stats.topicInfos.withValue(topic, info):
g.topicParams.withValue(topic, topicParams):
# penalize a peer that delivered no message
let threshold = topicParams[].meshMessageDeliveriesThreshold
if info[].inMesh and
info[].meshMessageDeliveriesActive and
if info[].inMesh and info[].meshMessageDeliveriesActive and
info[].meshMessageDeliveries < threshold:
let deficit = threshold - info.meshMessageDeliveries
info[].meshFailurePenalty += deficit * deficit
info.inMesh = false
trace "pruned", peer=p, topic
trace "pruned", peer = p, topic
proc handleBackingOff*(t: var BackoffTable, topic: string) =
let now = Moment.now()
var expired = toSeq(t.getOrDefault(topic).pairs())
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
expired.keepIf do(pair: tuple[peer: PeerId, expire: Moment]) -> bool:
now >= pair.expire
for (peer, _) in expired:
t.withValue(topic, v):
@@ -83,12 +114,12 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
if not g.parameters.enablePX:
return @[]
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
peers.keepIf do (x: PubSubPeer) -> bool:
x.score >= 0.0
peers.keepIf do(x: PubSubPeer) -> bool:
x.score >= 0.0
# by spec, larger then Dhi, but let's put some hard caps
peers.setLen(min(peers.len, g.parameters.dHigh * 2))
let sprBook = g.switch.peerStore[SPRBook]
peers.map do (x: PubSubPeer) -> PeerInfoMsg:
peers.map do(x: PubSubPeer) -> PeerInfoMsg:
PeerInfoMsg(
peerId: x.peerId,
signedPeerRecord:
@@ -96,11 +127,12 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
sprBook[x.peerId].encode().get(default(seq[byte]))
else:
default(seq[byte])
)
,
)
proc handleGraft*(g: GossipSub,
peer: PubSubPeer,
grafts: seq[ControlGraft]): seq[ControlPrune] =
proc handleGraft*(
g: GossipSub, peer: PubSubPeer, grafts: seq[ControlGraft]
): seq[ControlPrune] =
var prunes: seq[ControlPrune]
for graft in grafts:
let topic = graft.topicID
@@ -113,14 +145,18 @@ proc handleGraft*(g: GossipSub,
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
peer, topic
# and such an attempt should be logged and rejected with a PRUNE
prunes.add(ControlPrune(
topicID: topic,
peers: @[], # omitting heavy computation here as the remote did something illegal
backoff: g.parameters.pruneBackoff.seconds.uint64))
prunes.add(
ControlPrune(
topicID: topic,
peers: @[],
# omitting heavy computation here as the remote did something illegal
backoff: g.parameters.pruneBackoff.seconds.uint64,
)
)
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
peer.behaviourPenalty += 0.1
@@ -134,19 +170,22 @@ proc handleGraft*(g: GossipSub,
# Ignore BackoffSlackTime here, since this only for outbound activity
# and subtract a second time to avoid race conditions
# (peers may wait to graft us as the exact instant they're allowed to)
if g.backingOff
.getOrDefault(topic)
.getOrDefault(peer.peerId) - (BackoffSlackTime * 2).seconds > Moment.now():
if g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId) -
(BackoffSlackTime * 2).seconds > Moment.now():
debug "a backingOff peer attempted to graft us", peer, topic
# and such an attempt should be logged and rejected with a PRUNE
prunes.add(ControlPrune(
topicID: topic,
peers: @[], # omitting heavy computation here as the remote did something illegal
backoff: g.parameters.pruneBackoff.seconds.uint64))
prunes.add(
ControlPrune(
topicID: topic,
peers: @[],
# omitting heavy computation here as the remote did something illegal
backoff: g.parameters.pruneBackoff.seconds.uint64,
)
)
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
peer.behaviourPenalty += 0.1
@@ -173,21 +212,27 @@ proc handleGraft*(g: GossipSub,
else:
trace "pruning grafting peer, mesh full",
peer, topic, score = peer.score, mesh = g.mesh.peers(topic)
prunes.add(ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64))
prunes.add(
ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64,
)
)
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
backoff
else:
trace "peer grafting topic we're not interested in", peer, topic
# gossip 1.1, we do not send a control message prune anymore
return prunes
proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] =
proc getPeers(
prune: ControlPrune, peer: PubSubPeer
): seq[(PeerId, Option[PeerRecord])] =
var routingRecords: seq[(PeerId, Option[PeerRecord])]
for record in prune.peers:
var peerRecord = none(PeerRecord)
@@ -214,31 +259,28 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
if prune.backoff > 0:
let
# avoid overflows and clamp to reasonable value
backoffSeconds = clamp(
prune.backoff + BackoffSlackTime,
0'u64,
1.days.seconds.uint64
)
backoffSeconds =
clamp(prune.backoff + BackoffSlackTime, 0'u64, 1.days.seconds.uint64)
backoff = Moment.fromNow(backoffSeconds.int64.seconds)
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
if backoff > current:
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
backoff
trace "pruning rpc received peer", peer, score = peer.score
g.pruned(peer, topic, setBackoff = false)
g.mesh.removePeer(topic, peer)
if peer.score > g.parameters.gossipThreshold and prune.peers.len > 0 and
g.routingRecordsHandler.len > 0:
g.routingRecordsHandler.len > 0:
let routingRecords = prune.getPeers(peer)
for handler in g.routingRecordsHandler:
handler(peer.peerId, topic, routingRecords)
proc handleIHave*(g: GossipSub,
peer: PubSubPeer,
ihaves: seq[ControlIHave]): ControlIWant =
proc handleIHave*(
g: GossipSub, peer: PubSubPeer, ihaves: seq[ControlIHave]
): ControlIWant =
var res: ControlIWant
if peer.score < g.parameters.gossipThreshold:
trace "ihave: ignoring low score peer", peer, score = peer.score
@@ -246,8 +288,7 @@ proc handleIHave*(g: GossipSub,
trace "ihave: ignoring out of budget peer", peer, score = peer.score
else:
for ihave in ihaves:
trace "peer sent ihave",
peer, topicID = ihave.topicID, msgs = ihave.messageIDs
trace "peer sent ihave", peer, topicID = ihave.topicID, msgs = ihave.messageIDs
if ihave.topicID in g.topics:
for msgId in ihave.messageIDs:
if not g.hasSeen(g.salt(msgId)):
@@ -256,23 +297,22 @@ proc handleIHave*(g: GossipSub,
elif msgId notin res.messageIDs:
res.messageIDs.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID=msgId
trace "requested message via ihave", messageID = msgId
# shuffling res.messageIDs before sending it out to increase the likelihood
# of getting an answer if the peer truncates the list due to internal size restrictions.
g.rng.shuffle(res.messageIDs)
return res
proc handleIDontWant*(g: GossipSub,
peer: PubSubPeer,
iDontWants: seq[ControlIWant]) =
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
for dontWant in iDontWants:
for messageId in dontWant.messageIDs:
if peer.heDontWants[^1].len > 1000: break
if peer.heDontWants[^1].len > 1000:
break
peer.heDontWants[^1].incl(g.salt(messageId))
proc handleIWant*(g: GossipSub,
peer: PubSubPeer,
iwants: seq[ControlIWant]): seq[Message] =
proc handleIWant*(
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
): seq[Message] =
var
messages: seq[Message]
invalidRequests = 0
@@ -284,17 +324,17 @@ proc handleIWant*(g: GossipSub,
trace "peer sent iwant", peer, messageID = mid
# canAskIWant will only return true once for a specific message
if not peer.canAskIWant(mid):
libp2p_gossipsub_received_iwants.inc(1, labelValues=["notsent"])
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["notsent"])
invalidRequests.inc()
if invalidRequests > 20:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["skipped"])
return messages
continue
let msg = g.mcache.get(mid).valueOr:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["unknown"])
continue
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["correct"])
messages.add(msg)
return messages
@@ -303,9 +343,15 @@ proc commitMetrics(metrics: var MeshMetrics) =
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
libp2p_gossipsub_healthy_peers_topics.set(metrics.healthyPeersTopics)
libp2p_gossipsub_peers_per_topic_gossipsub.set(metrics.otherPeersPerTopicGossipsub, labelValues = ["other"])
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
libp2p_gossipsub_peers_per_topic_gossipsub.set(
metrics.otherPeersPerTopicGossipsub, labelValues = ["other"]
)
libp2p_gossipsub_peers_per_topic_fanout.set(
metrics.otherPeersPerTopicFanout, labelValues = ["other"]
)
libp2p_gossipsub_peers_per_topic_mesh.set(
metrics.otherPeersPerTopicMesh, labelValues = ["other"]
)
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) =
logScope:
@@ -331,14 +377,13 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var
candidates: seq[PubSubPeer]
currentMesh = addr defaultMesh
g.mesh.withValue(topic, v): currentMesh = v
g.mesh.withValue(topic, v):
currentMesh = v
g.gossipsub.withValue(topic, peerList):
for it in peerList[]:
if
it.connected and
if it.connected and
# avoid negative score peers
it.score >= 0.0 and
it notin currentMesh[] and
it.score >= 0.0 and it notin currentMesh[] and
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
@@ -362,21 +407,19 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
g.grafted(peer, topic)
g.fanout.removePeer(topic, peer)
grafts &= peer
elif nOutPeers < g.parameters.dOut:
trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic)
var
candidates: seq[PubSubPeer]
currentMesh = addr defaultMesh
g.mesh.withValue(topic, v): currentMesh = v
g.mesh.withValue(topic, v):
currentMesh = v
g.gossipsub.withValue(topic, peerList):
for it in peerList[]:
if
it.connected and
# get only outbound ones
it.outbound and
it notin currentMesh[] and
if it.connected and
# get only outbound ones
it.outbound and it notin currentMesh[] and
# avoid negative score peers
it.score >= 0.0 and
# don't pick direct peers
@@ -402,7 +445,6 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
g.fanout.removePeer(topic, peer)
grafts &= peer
# get again npeers after possible grafts
npeers = g.mesh.peers(topic)
if npeers > g.parameters.dHigh:
@@ -413,9 +455,15 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
libp2p_gossipsub_above_dhigh_condition.inc(labelValues = ["other"])
# prune peers if we've gone over Dhi
prunes = toSeq(try: g.mesh[topic] except KeyError: raiseAssert "have peers")
prunes = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
)
# avoid pruning peers we are currently grafting in this heartbeat
prunes.keepIf do (x: PubSubPeer) -> bool: x notin grafts
prunes.keepIf do(x: PubSubPeer) -> bool:
x notin grafts
# shuffle anyway, score might be not used
g.rng.shuffle(prunes)
@@ -463,7 +511,12 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# opportunistic grafting, by spec mesh should not be empty...
if g.mesh.peers(topic) > 1:
var peers = toSeq(try: g.mesh[topic] except KeyError: raiseAssert "have peers")
var peers = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
)
# grafting so high score has priority
peers.sort(byScore, SortOrder.Descending)
let medianIdx = peers.len div 2
@@ -474,13 +527,12 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var
avail: seq[PubSubPeer]
currentMesh = addr defaultMesh
g.mesh.withValue(topic, v): currentMesh = v
g.mesh.withValue(topic, v):
currentMesh = v
g.gossipsub.withValue(topic, peerList):
for it in peerList[]:
if
# avoid negative score peers
it.score >= median.score and
it notin currentMesh[] and
if it.score >= median.score and # avoid negative score peers
it notin currentMesh[] and
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
@@ -507,17 +559,23 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
inc metrics[].healthyPeersTopics
var meshPeers = toSeq(g.mesh.getOrDefault(topic, initHashSet[PubSubPeer]()))
meshPeers.keepIf do (x: PubSubPeer) -> bool: x.outbound
meshPeers.keepIf do(x: PubSubPeer) -> bool:
x.outbound
if meshPeers.len < g.parameters.dOut:
inc metrics[].underDoutTopics
if g.knownTopics.contains(topic):
libp2p_gossipsub_peers_per_topic_gossipsub
.set(g.gossipsub.peers(topic).int64, labelValues = [topic])
libp2p_gossipsub_peers_per_topic_fanout
.set(g.fanout.peers(topic).int64, labelValues = [topic])
libp2p_gossipsub_peers_per_topic_mesh
.set(g.mesh.peers(topic).int64, labelValues = [topic])
libp2p_gossipsub_peers_per_topic_gossipsub.set(
g.gossipsub.peers(topic).int64, labelValues = [topic]
)
libp2p_gossipsub_peers_per_topic_fanout.set(
g.fanout.peers(topic).int64, labelValues = [topic]
)
libp2p_gossipsub_peers_per_topic_mesh.set(
g.mesh.peers(topic).int64, labelValues = [topic]
)
else:
metrics[].otherPeersPerTopicGossipsub += g.gossipsub.peers(topic).int64
metrics[].otherPeersPerTopicFanout += g.fanout.peers(topic).int64
@@ -527,14 +585,24 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# Send changes to peers after table updates to avoid stale state
if grafts.len > 0:
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
let graft =
RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
g.broadcast(grafts, graft, isHighPriority = true)
if prunes.len > 0:
let prune = RPCMsg(control: some(ControlMessage(
prune: @[ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
let prune = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64,
)
]
)
)
)
g.broadcast(prunes, prune, isHighPriority = true)
proc dropFanoutPeers*(g: GossipSub) =
@@ -552,14 +620,16 @@ proc dropFanoutPeers*(g: GossipSub) =
proc replenishFanout*(g: GossipSub, topic: string) =
## get fanout peers for a topic
logScope: topic
logScope:
topic
trace "about to replenish fanout"
if g.fanout.peers(topic) < g.parameters.dLow:
let currentMesh = g.mesh.getOrDefault(topic)
trace "replenishing fanout", peers = g.fanout.peers(topic)
for peer in g.gossipsub.getOrDefault(topic):
if peer in currentMesh: continue
if peer in currentMesh:
continue
if g.fanout.addPeer(topic, peer):
if g.fanout.peers(topic) == g.parameters.d:
break
@@ -574,14 +644,14 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
var control: Table[PubSubPeer, ControlMessage]
let topics = toHashSet(toSeq(g.mesh.keys)) + toHashSet(toSeq(g.fanout.keys))
trace "getting gossip peers (iHave)", ntopics=topics.len
trace "getting gossip peers (iHave)", ntopics = topics.len
for topic in topics:
if topic notin g.gossipsub:
trace "topic not in gossip array, skipping", topic = topic
continue
let mids = g.mcache.window(topic)
if not(mids.len > 0):
if not (mids.len > 0):
trace "no messages to emit"
continue
@@ -589,7 +659,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
cacheWindowSize += midsSeq.len
trace "got messages to emit", size=midsSeq.len
trace "got messages to emit", size = midsSeq.len
# not in spec
# similar to rust: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/behaviour.rs#L2101
@@ -605,10 +675,9 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
gossipPeers = mesh + fanout
var allPeers = toSeq(g.gossipsub.getOrDefault(topic))
allPeers.keepIf do (x: PubSubPeer) -> bool:
x.peerId notin g.parameters.directPeers and
x notin gossipPeers and
x.score >= g.parameters.gossipThreshold
allPeers.keepIf do(x: PubSubPeer) -> bool:
x.peerId notin g.parameters.directPeers and x notin gossipPeers and
x.score >= g.parameters.gossipThreshold
# https://github.com/libp2p/specs/blob/98c5aa9421703fc31b0833ad8860a55db15be063/pubsub/gossipsub/gossipsub-v1.1.md#adaptive-gossip-dissemination
let
@@ -629,68 +698,77 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
return control
proc onHeartbeat(g: GossipSub) =
# reset IWANT budget
# reset IHAVE cap
# reset IWANT budget
# reset IHAVE cap
block:
for peer in g.peers.values:
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
if peer.sentIHaves.len > g.parameters.historyLength:
discard peer.sentIHaves.popLast()
peer.heDontWants.addFirst(default(HashSet[SaltedId]))
if peer.heDontWants.len > g.parameters.historyLength:
discard peer.heDontWants.popLast()
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
var meshMetrics = MeshMetrics()
for t in toSeq(g.topics.keys):
# remove expired backoffs
block:
for peer in g.peers.values:
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
if peer.sentIHaves.len > g.parameters.historyLength:
discard peer.sentIHaves.popLast()
peer.heDontWants.addFirst(default(HashSet[SaltedId]))
if peer.heDontWants.len > g.parameters.historyLength:
discard peer.heDontWants.popLast()
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
handleBackingOff(g.backingOff, t)
var meshMetrics = MeshMetrics()
# prune every negative score peer
# do this before relance
# in order to avoid grafted -> pruned in the same cycle
let meshPeers = g.mesh.getOrDefault(t)
var prunes: seq[PubSubPeer]
for peer in meshPeers:
if peer.score < 0.0:
trace "pruning negative score peer", peer, score = peer.score
g.pruned(peer, t)
g.mesh.removePeer(t, peer)
prunes &= peer
if prunes.len > 0:
let prune = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: t,
peers: g.peerExchangeList(t),
backoff: g.parameters.pruneBackoff.seconds.uint64,
)
]
)
)
)
g.broadcast(prunes, prune, isHighPriority = true)
for t in toSeq(g.topics.keys):
# remove expired backoffs
block:
handleBackingOff(g.backingOff, t)
# pass by ptr in order to both signal we want to update metrics
# and as well update the struct for each topic during this iteration
g.rebalanceMesh(t, addr meshMetrics)
# prune every negative score peer
# do this before relance
# in order to avoid grafted -> pruned in the same cycle
let meshPeers = g.mesh.getOrDefault(t)
var prunes: seq[PubSubPeer]
for peer in meshPeers:
if peer.score < 0.0:
trace "pruning negative score peer", peer, score = peer.score
g.pruned(peer, t)
g.mesh.removePeer(t, peer)
prunes &= peer
if prunes.len > 0:
let prune = RPCMsg(control: some(ControlMessage(
prune: @[ControlPrune(
topicID: t,
peers: g.peerExchangeList(t),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune, isHighPriority = true)
commitMetrics(meshMetrics)
# pass by ptr in order to both signal we want to update metrics
# and as well update the struct for each topic during this iteration
g.rebalanceMesh(t, addr meshMetrics)
g.dropFanoutPeers()
commitMetrics(meshMetrics)
# replenish known topics to the fanout
for t in toSeq(g.fanout.keys):
g.replenishFanout(t)
g.dropFanoutPeers()
let peers = g.getGossipPeers()
for peer, control in peers:
# only ihave from here
for ihave in control.ihave:
if g.knownTopics.contains(ihave.topicID):
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
else:
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
# replenish known topics to the fanout
for t in toSeq(g.fanout.keys):
g.replenishFanout(t)
let peers = g.getGossipPeers()
for peer, control in peers:
# only ihave from here
for ihave in control.ihave:
if g.knownTopics.contains(ihave.topicID):
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
else:
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
g.mcache.shift() # shift the cache
g.mcache.shift() # shift the cache
proc heartbeat*(g: GossipSub) {.async.} =
heartbeat "GossipSub", g.parameters.heartbeatInterval:

View File

@@ -21,16 +21,56 @@ import ../pubsub
logScope:
topics = "libp2p gossipsub"
declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"])
declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_firstMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_meshMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_meshFailurePenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
declareGauge(
libp2p_gossipsub_peers_scores,
"the scores of the peers in gossipsub",
labels = ["agent"],
)
declareCounter(
libp2p_gossipsub_bad_score_disconnection,
"the number of peers disconnected by gossipsub",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_firstMessageDeliveries,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_meshMessageDeliveries,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_meshFailurePenalty,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_invalidMessageDeliveries,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_appScore,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_behaviourPenalty,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declareGauge(
libp2p_gossipsub_peers_score_colocationFactor,
"Detailed gossipsub scoring metric",
labels = ["agent"],
)
declarePublicCounter(
libp2p_gossipsub_peers_rate_limit_hits,
"The number of times peers were above their rate limit",
labels = ["agent"],
)
proc init*(_: type[TopicParams]): TopicParams =
TopicParams(
@@ -50,21 +90,24 @@ proc init*(_: type[TopicParams]): TopicParams =
meshFailurePenaltyWeight: -1.0,
meshFailurePenaltyDecay: 0.5,
invalidMessageDeliveriesWeight: -1.0,
invalidMessageDeliveriesDecay: 0.5
invalidMessageDeliveriesDecay: 0.5,
)
proc withPeerStats*(
g: GossipSub,
peerId: PeerId,
action: proc (stats: var PeerStats) {.gcsafe, raises: [].}) =
action: proc(stats: var PeerStats) {.gcsafe, raises: [].},
) =
## Add or update peer statistics for a particular peer id - the statistics
## are retained across multiple connections until they expire
g.peerStats.withValue(peerId, stats) do:
g.peerStats.withValue(peerId, stats):
action(stats[])
do:
action(g.peerStats.mgetOrPut(peerId, PeerStats(
expire: Moment.now() + g.parameters.retainScore
)))
action(
g.peerStats.mgetOrPut(
peerId, PeerStats(expire: Moment.now() + g.parameters.retainScore)
)
)
func `/`(a, b: Duration): float64 =
let
@@ -72,14 +115,15 @@ func `/`(a, b: Duration): float64 =
fb = float64(b.nanoseconds)
fa / fb
func byScore*(x,y: PubSubPeer): int = system.cmp(x.score, y.score)
func byScore*(x, y: PubSubPeer): int =
system.cmp(x.score, y.score)
proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
let address = peer.address.valueOr: return 0.0
let address = peer.address.valueOr:
return 0.0
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
let
ipPeers = g.peersInIP.getOrDefault(address).len().float64
let ipPeers = g.peersInIP.getOrDefault(address).len().float64
if ipPeers > g.parameters.ipColocationFactorThreshold:
trace "colocationFactor over threshold", peer, address, ipPeers
let over = ipPeers - g.parameters.ipColocationFactorThreshold
@@ -95,7 +139,7 @@ proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
peer.peerId notin g.parameters.directPeers:
peer.peerId notin g.parameters.directPeers:
debug "disconnecting bad score peer", peer, score = peer.score
asyncSpawn(g.disconnectPeer(peer))
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
@@ -110,7 +154,7 @@ proc updateScores*(g: GossipSub) = # avoid async
for peerId, stats in g.peerStats.mpairs:
let peer = g.peers.getOrDefault(peerId)
if isNil(peer) or not(peer.connected):
if isNil(peer) or not (peer.connected):
if now > stats.expire:
evicting.add(peerId)
trace "evicted peer from memory", peer = peerId
@@ -147,12 +191,14 @@ proc updateScores*(g: GossipSub) = # avoid async
else:
info.meshMessageDeliveriesActive = false
topicScore += info.firstMessageDeliveries * topicParams.firstMessageDeliveriesWeight
topicScore +=
info.firstMessageDeliveries * topicParams.firstMessageDeliveriesWeight
trace "p2", peer, p2 = info.firstMessageDeliveries, topic, topicScore
if info.meshMessageDeliveriesActive:
if info.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold:
let deficit = topicParams.meshMessageDeliveriesThreshold - info.meshMessageDeliveries
let deficit =
topicParams.meshMessageDeliveriesThreshold - info.meshMessageDeliveries
let p3 = deficit * deficit
trace "p3", peer, p3, topic, topicScore
topicScore += p3 * topicParams.meshMessageDeliveriesWeight
@@ -160,20 +206,34 @@ proc updateScores*(g: GossipSub) = # avoid async
topicScore += info.meshFailurePenalty * topicParams.meshFailurePenaltyWeight
trace "p3b", peer, p3b = info.meshFailurePenalty, topic, topicScore
topicScore += info.invalidMessageDeliveries * info.invalidMessageDeliveries * topicParams.invalidMessageDeliveriesWeight
trace "p4", peer, p4 = info.invalidMessageDeliveries * info.invalidMessageDeliveries, topic, topicScore
topicScore +=
info.invalidMessageDeliveries * info.invalidMessageDeliveries *
topicParams.invalidMessageDeliveriesWeight
trace "p4",
peer,
p4 = info.invalidMessageDeliveries * info.invalidMessageDeliveries,
topic,
topicScore
scoreAcc += topicScore * topicParams.topicWeight
trace "updated peer topic's scores", peer, scoreAcc, topic, info, topicScore,
topicWeight = topicParams.topicWeight
trace "updated peer topic's scores",
peer, scoreAcc, topic, info, topicScore, topicWeight = topicParams.topicWeight
# Score metrics
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
libp2p_gossipsub_peers_score_invalidMessageDeliveries.inc(info.invalidMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(
info.firstMessageDeliveries, labelValues = [agent]
)
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(
info.meshMessageDeliveries, labelValues = [agent]
)
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(
info.meshFailurePenalty, labelValues = [agent]
)
libp2p_gossipsub_peers_score_invalidMessageDeliveries.inc(
info.invalidMessageDeliveries, labelValues = [agent]
)
# Score decay
info.firstMessageDeliveries *= topicParams.firstMessageDeliveriesDecay
@@ -197,23 +257,37 @@ proc updateScores*(g: GossipSub) = # avoid async
stats.topicInfos[topic] = info
scoreAcc += peer.appScore * g.parameters.appSpecificWeight
trace "appScore", peer, scoreAcc, appScore = peer.appScore,
appSpecificWeight = g.parameters.appSpecificWeight
trace "appScore",
peer,
scoreAcc,
appScore = peer.appScore,
appSpecificWeight = g.parameters.appSpecificWeight
# The value of the parameter is the square of the counter and is mixed with a negative weight.
scoreAcc += peer.behaviourPenalty * peer.behaviourPenalty * g.parameters.behaviourPenaltyWeight
trace "behaviourPenalty", peer, scoreAcc, behaviourPenalty = peer.behaviourPenalty,
behaviourPenaltyWeight = g.parameters.behaviourPenaltyWeight
scoreAcc +=
peer.behaviourPenalty * peer.behaviourPenalty * g.parameters.behaviourPenaltyWeight
trace "behaviourPenalty",
peer,
scoreAcc,
behaviourPenalty = peer.behaviourPenalty,
behaviourPenaltyWeight = g.parameters.behaviourPenaltyWeight
let colocationFactor = g.colocationFactor(peer)
scoreAcc += colocationFactor * g.parameters.ipColocationFactorWeight
trace "colocationFactor", peer, scoreAcc, colocationFactor,
ipColocationFactorWeight = g.parameters.ipColocationFactorWeight
trace "colocationFactor",
peer,
scoreAcc,
colocationFactor,
ipColocationFactorWeight = g.parameters.ipColocationFactorWeight
# Score metrics
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
libp2p_gossipsub_peers_score_behaviourPenalty.inc(
peer.behaviourPenalty, labelValues = [agent]
)
libp2p_gossipsub_peers_score_colocationFactor.inc(
colocationFactor, labelValues = [agent]
)
# decay behaviourPenalty
peer.behaviourPenalty *= g.parameters.behaviourPenaltyDecay
@@ -228,7 +302,8 @@ proc updateScores*(g: GossipSub) = # avoid async
stats.behaviourPenalty = peer.behaviourPenalty
stats.expire = now + g.parameters.retainScore # refresh expiration
trace "updated (accumulated) peer's score", peer, peerScore = peer.score, n_topics, is_grafted
trace "updated (accumulated) peer's score",
peer, peerScore = peer.score, n_topics, is_grafted
g.disconnectIfBadScorePeer(peer, stats.score)
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
@@ -243,15 +318,19 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent invalid message and it's above rate limit",
peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()])
# let's just measure at the beginning for test purposes.
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
raise newException(
PeerRateLimitError, "Peer disconnected because it's above rate limit."
)
let topic = msg.topic
if topic notin g.topics:
@@ -265,11 +344,7 @@ proc addCapped*[T](stat: var T, diff, cap: T) =
stat += min(diff, cap - stat)
proc rewardDelivered*(
g: GossipSub,
peer: PubSubPeer,
topic: string,
first: bool,
delay = ZeroDuration,
g: GossipSub, peer: PubSubPeer, topic: string, first: bool, delay = ZeroDuration
) =
if topic notin g.topics:
return
@@ -281,14 +356,16 @@ proc rewardDelivered*(
# Too old
return
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
g.withPeerStats(peer.peerId) do(stats: var PeerStats):
stats.topicInfos.withValue(topic, tstats):
if first:
tstats[].firstMessageDeliveries.addCapped(
1, topicParams.firstMessageDeliveriesCap)
1, topicParams.firstMessageDeliveriesCap
)
if tstats[].inMesh:
tstats[].meshMessageDeliveries.addCapped(
1, topicParams.meshMessageDeliveriesCap)
do: # make sure we don't lose this information
1, topicParams.meshMessageDeliveriesCap
)
do:
stats.topicInfos[topic] = TopicInfo(meshMessageDeliveries: 1)

View File

@@ -32,16 +32,14 @@ const
GossipSubHistoryLength* = 5
GossipSubHistoryGossip* = 3
# heartbeat interval
# heartbeat interval
GossipSubHeartbeatInterval* = 1.seconds
# fanout ttl
const
GossipSubFanoutTTL* = 1.minutes
const GossipSubFanoutTTL* = 1.minutes
# gossip parameters
const
GossipBackoffPeriod* = 1.minutes
const GossipBackoffPeriod* = 1.minutes
const
BackoffSlackTime* = 2 # seconds
@@ -53,8 +51,7 @@ const
IHaveMaxLength* = 5000
type
TopicInfo* = object
# gossip 1.1 related
TopicInfo* = object # gossip 1.1 related
graftTime*: Moment
meshTime*: Duration
inMesh*: bool
@@ -147,7 +144,8 @@ type
disconnectBadPeers*: bool
enablePX*: bool
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
bandwidthEstimatebps*: int
# This is currently used only for limting flood publishing. 0 disables flood-limiting completely
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
disconnectPeerAboveRateLimit*: bool
@@ -159,23 +157,25 @@ type
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
RoutingRecordsHandler* =
proc(peer: PeerId,
RoutingRecordsHandler* = proc(
peer: PeerId,
tag: string, # For gossipsub, the topic
peers: seq[RoutingRecordsPair])
{.gcsafe, raises: [].}
peers: seq[RoutingRecordsPair],
) {.gcsafe, raises: [].}
GossipSub* = ref object of FloodSub
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
gossipsub*: PeerTable # peers that are subscribed to a topic
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
mcache*: MCache # messages cache
validationSeen*: ValidationSeenTable # peers who sent us message in validation
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
scoringHeartbeatFut*: Future[void] # cancellation future for scoring heartbeat interval
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
fanout*: PeerTable
# peers that we send messages to when we're not subscribed to the topic
gossipsub*: PeerTable # peers that are subscribed to a topic
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
mcache*: MCache # messages cache
validationSeen*: ValidationSeenTable # peers who sent us message in validation
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
scoringHeartbeatFut*: Future[void]
# cancellation future for scoring heartbeat interval
heartbeatRunning*: bool
peerStats*: Table[PeerId, PeerStats]
@@ -187,8 +187,7 @@ type
heartbeatEvents*: seq[AsyncEvent]
MeshMetrics* = object
# scratch buffers for metrics
MeshMetrics* = object # scratch buffers for metrics
otherPeersPerTopicMesh*: int64
otherPeersPerTopicFanout*: int64
otherPeersPerTopicGossipsub*: int64

View File

@@ -28,8 +28,10 @@ type
func get*(c: MCache, msgId: MessageId): Opt[Message] =
if msgId in c.msgs:
try: Opt.some(c.msgs[msgId])
except KeyError: raiseAssert "checked"
try:
Opt.some(c.msgs[msgId])
except KeyError:
raiseAssert "checked"
else:
Opt.none(Message)
@@ -42,10 +44,9 @@ func put*(c: var MCache, msgId: MessageId, msg: Message) =
c.history[c.pos].add(CacheEntry(msgId: msgId, topic: msg.topic))
func window*(c: MCache, topic: string): HashSet[MessageId] =
let
len = min(c.windowSize, c.history.len)
let len = min(c.windowSize, c.history.len)
for i in 0..<len:
for i in 0 ..< len:
# Work backwards from `pos` in the circular buffer
for entry in c.history[(c.pos + c.history.len - i) mod c.history.len]:
if entry.topic == topic:
@@ -62,7 +63,4 @@ func shift*(c: var MCache) =
reset(c.history[c.pos])
func init*(T: type MCache, window, history: Natural): T =
T(
history: newSeq[seq[CacheEntry]](history),
windowSize: window
)
T(history: newSeq[seq[CacheEntry]](history), windowSize: window)

View File

@@ -14,8 +14,7 @@ import ./pubsubpeer, ../../peerid
export tables, sets
type
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
type PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
proc hasPeerId*(t: PeerTable, topic: string, peerId: PeerId): bool =
if topic in t:
@@ -23,15 +22,14 @@ proc hasPeerId*(t: PeerTable, topic: string, peerId: PeerId): bool =
for peer in t[topic]:
if peer.peerId == peerId:
return true
except KeyError: raiseAssert "checked with in"
except KeyError:
raiseAssert "checked with in"
false
func addPeer*(table: var PeerTable, topic: string, peer: PubSubPeer): bool =
# returns true if the peer was added,
# false if it was already in the collection
not table.mgetOrPut(topic,
initHashSet[PubSubPeer]())
.containsOrIncl(peer)
not table.mgetOrPut(topic, initHashSet[PubSubPeer]()).containsOrIncl(peer)
func removePeer*(table: var PeerTable, topic: string, peer: PubSubPeer) =
table.withValue(topic, peers):
@@ -42,18 +40,23 @@ func removePeer*(table: var PeerTable, topic: string, peer: PubSubPeer) =
func hasPeer*(table: PeerTable, topic: string, peer: PubSubPeer): bool =
try:
(topic in table) and (peer in table[topic])
except KeyError: raiseAssert "checked with in"
except KeyError:
raiseAssert "checked with in"
func peers*(table: PeerTable, topic: string): int =
if topic in table:
try: table[topic].len
except KeyError: raiseAssert "checked with in"
try:
table[topic].len
except KeyError:
raiseAssert "checked with in"
else:
0
func outboundPeers*(table: PeerTable, topic: string): int =
if topic in table:
try: table[topic].countIt(it.outbound)
except KeyError: raiseAssert "checked with in"
try:
table[topic].countIt(it.outbound)
except KeyError:
raiseAssert "checked with in"
else:
0

View File

@@ -18,17 +18,18 @@
import std/[tables, sequtils, sets, strutils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./errors as pubsub_errors,
./pubsubpeer,
./rpc/[message, messages, protobuf],
../../switch,
../protocol,
../../crypto/crypto,
../../stream/connection,
../../peerid,
../../peerinfo,
../../errors,
../../utility
import
./errors as pubsub_errors,
./pubsubpeer,
./rpc/[message, messages, protobuf],
../../switch,
../protocol,
../../crypto/crypto,
../../stream/connection,
../../peerid,
../../peerinfo,
../../errors,
../../utility
import stew/results
export results
@@ -50,73 +51,119 @@ declareGauge(libp2p_pubsub_peers, "pubsub peer instances")
declareGauge(libp2p_pubsub_topics, "pubsub subscribed topics")
declareCounter(libp2p_pubsub_subscriptions, "pubsub subscription operations")
declareCounter(libp2p_pubsub_unsubscriptions, "pubsub unsubscription operations")
declareGauge(libp2p_pubsub_topic_handlers, "pubsub subscribed topics handlers count", labels = ["topic"])
declareGauge(
libp2p_pubsub_topic_handlers,
"pubsub subscribed topics handlers count",
labels = ["topic"],
)
declareCounter(libp2p_pubsub_validation_success, "pubsub successfully validated messages")
declareCounter(
libp2p_pubsub_validation_success, "pubsub successfully validated messages"
)
declareCounter(libp2p_pubsub_validation_failure, "pubsub failed validated messages")
declareCounter(libp2p_pubsub_validation_ignore, "pubsub ignore validated messages")
declarePublicCounter(libp2p_pubsub_messages_published, "published messages", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_messages_rebroadcasted, "re-broadcasted messages", labels = ["topic"])
declarePublicCounter(
libp2p_pubsub_messages_published, "published messages", labels = ["topic"]
)
declarePublicCounter(
libp2p_pubsub_messages_rebroadcasted, "re-broadcasted messages", labels = ["topic"]
)
declarePublicCounter(libp2p_pubsub_broadcast_subscriptions, "pubsub broadcast subscriptions", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_broadcast_unsubscriptions, "pubsub broadcast unsubscriptions", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_broadcast_messages, "pubsub broadcast messages", labels = ["topic"])
declarePublicCounter(
libp2p_pubsub_broadcast_subscriptions,
"pubsub broadcast subscriptions",
labels = ["topic"],
)
declarePublicCounter(
libp2p_pubsub_broadcast_unsubscriptions,
"pubsub broadcast unsubscriptions",
labels = ["topic"],
)
declarePublicCounter(
libp2p_pubsub_broadcast_messages, "pubsub broadcast messages", labels = ["topic"]
)
declarePublicCounter(libp2p_pubsub_received_subscriptions, "pubsub received subscriptions", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_received_unsubscriptions, "pubsub received subscriptions", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_received_messages, "pubsub received messages", labels = ["topic"])
declarePublicCounter(
libp2p_pubsub_received_subscriptions,
"pubsub received subscriptions",
labels = ["topic"],
)
declarePublicCounter(
libp2p_pubsub_received_unsubscriptions,
"pubsub received subscriptions",
labels = ["topic"],
)
declarePublicCounter(
libp2p_pubsub_received_messages, "pubsub received messages", labels = ["topic"]
)
declarePublicCounter(libp2p_pubsub_broadcast_iwant, "pubsub broadcast iwant")
declarePublicCounter(libp2p_pubsub_broadcast_ihave, "pubsub broadcast ihave", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_broadcast_graft, "pubsub broadcast graft", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_broadcast_prune, "pubsub broadcast prune", labels = ["topic"])
declarePublicCounter(
libp2p_pubsub_broadcast_ihave, "pubsub broadcast ihave", labels = ["topic"]
)
declarePublicCounter(
libp2p_pubsub_broadcast_graft, "pubsub broadcast graft", labels = ["topic"]
)
declarePublicCounter(
libp2p_pubsub_broadcast_prune, "pubsub broadcast prune", labels = ["topic"]
)
declarePublicCounter(libp2p_pubsub_received_iwant, "pubsub broadcast iwant")
declarePublicCounter(libp2p_pubsub_received_ihave, "pubsub broadcast ihave", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_received_graft, "pubsub broadcast graft", labels = ["topic"])
declarePublicCounter(libp2p_pubsub_received_prune, "pubsub broadcast prune", labels = ["topic"])
declarePublicCounter(
libp2p_pubsub_received_ihave, "pubsub broadcast ihave", labels = ["topic"]
)
declarePublicCounter(
libp2p_pubsub_received_graft, "pubsub broadcast graft", labels = ["topic"]
)
declarePublicCounter(
libp2p_pubsub_received_prune, "pubsub broadcast prune", labels = ["topic"]
)
type
InitializationError* = object of LPError
TopicHandler* {.public.} = proc(topic: string,
data: seq[byte]): Future[void] {.gcsafe, raises: [].}
TopicHandler* {.public.} =
proc(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
ValidatorHandler* {.public.} = proc(topic: string,
message: Message): Future[ValidationResult] {.gcsafe, raises: [].}
ValidatorHandler* {.public.} = proc(
topic: string, message: Message
): Future[ValidationResult] {.gcsafe, raises: [].}
TopicPair* = tuple[topic: string, handler: TopicHandler]
MsgIdProvider* {.public.} =
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [], gcsafe.}
MsgIdProvider* {.public.} = proc(m: Message): Result[MessageId, ValidationResult] {.
noSideEffect, raises: [], gcsafe
.}
SubscriptionValidator* {.public.} =
proc(topic: string): bool {.raises: [], gcsafe.}
SubscriptionValidator* {.public.} = proc(topic: string): bool {.raises: [], gcsafe.}
## Every time a peer send us a subscription (even to an unknown topic),
## we have to store it, which may be an attack vector.
## This callback can be used to reject topic we're not interested in
PubSub* {.public.} = ref object of LPProtocol
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info
topics*: Table[string, seq[TopicHandler]] # the topics that _we_ are interested in
peers*: Table[PeerId, PubSubPeer] #\
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info
topics*: Table[string, seq[TopicHandler]] # the topics that _we_ are interested in
peers*: Table[PeerId, PubSubPeer]
#\
# Peers that we are interested to gossip with (but not necessarily
# yet connected to)
triggerSelf*: bool ## trigger own local handler on publish
verifySignature*: bool ## enable signature verification
sign*: bool ## enable message signing
triggerSelf*: bool ## trigger own local handler on publish
verifySignature*: bool ## enable signature verification
sign*: bool ## enable message signing
validators*: Table[string, HashSet[ValidatorHandler]]
observers: ref seq[PubSubObserver] # ref as in smart_ptr
msgIdProvider*: MsgIdProvider ## Turn message into message id (not nil)
msgIdProvider*: MsgIdProvider ## Turn message into message id (not nil)
msgSeqno*: uint64
anonymize*: bool ## if we omit fromPeer and seqno from RPC messages we send
subscriptionValidator*: SubscriptionValidator # callback used to validate subscriptions
topicsHigh*: int ## the maximum number of topics a peer is allowed to subscribe to
maxMessageSize*: int ##\
anonymize*: bool ## if we omit fromPeer and seqno from RPC messages we send
subscriptionValidator*: SubscriptionValidator
# callback used to validate subscriptions
topicsHigh*: int ## the maximum number of topics a peer is allowed to subscribe to
maxMessageSize*: int
##\
## the maximum raw message size we'll globally allow
## for finer tuning, check message size on topic validator
##
@@ -137,7 +184,9 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
proc send*(
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
## Parameters:
@@ -152,10 +201,11 @@ proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.rai
peer.send(msg, p.anonymize, isHighPriority)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg,
isHighPriority: bool) {.raises: [].} =
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg,
isHighPriority: bool,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
## Parameters:
@@ -205,8 +255,7 @@ proc broadcast*(
else:
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"])
trace "broadcasting messages to peers",
peers = sendPeers.len, msg = shortLog(msg)
trace "broadcasting messages to peers", peers = sendPeers.len, msg = shortLog(msg)
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
@@ -217,10 +266,9 @@ proc broadcast*(
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
proc sendSubs*(p: PubSub,
peer: PubSubPeer,
topics: openArray[string],
subscribe: bool) =
proc sendSubs*(
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
) =
## send subscriptions to remote peer
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
@@ -237,8 +285,10 @@ proc sendSubs*(p: PubSub,
libp2p_pubsub_broadcast_unsubscriptions.inc(labelValues = ["generic"])
proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
for i in 0..<min(rpcMsg.subscriptions.len, p.topicsHigh):
template sub(): untyped = rpcMsg.subscriptions[i]
for i in 0 ..< min(rpcMsg.subscriptions.len, p.topicsHigh):
template sub(): untyped =
rpcMsg.subscriptions[i]
if sub.subscribe:
if p.knownTopics.contains(sub.topic):
libp2p_pubsub_received_subscriptions.inc(labelValues = [sub.topic])
@@ -250,7 +300,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
else:
libp2p_pubsub_received_unsubscriptions.inc(labelValues = ["generic"])
for i in 0..<rpcMsg.messages.len():
for i in 0 ..< rpcMsg.messages.len():
let topic = rpcMsg.messages[i].topic
if p.knownTopics.contains(topic):
libp2p_pubsub_received_messages.inc(labelValues = [topic])
@@ -275,15 +325,18 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
else:
libp2p_pubsub_received_prune.inc(labelValues = ["generic"])
method rpcHandler*(p: PubSub,
peer: PubSubPeer,
data: seq[byte]): Future[void] {.base, async.} =
method rpcHandler*(
p: PubSub, peer: PubSubPeer, data: seq[byte]
): Future[void] {.base, async.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} = discard
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} =
discard
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
method onPubSubPeerEvent*(
p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent
) {.base, gcsafe.} =
# Peer event is raised for the send connection in particular
case event.kind
of PubSubPeerEventKind.StreamOpened:
@@ -294,12 +347,9 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
of PubSubPeerEventKind.DisconnectionRequested:
discard
method getOrCreatePeer*(
p: PubSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
p: PubSub, peerId: PeerId, protos: seq[string]
): PubSubPeer {.base, gcsafe.} =
p.peers.withValue(peerId, peer):
return peer[]
@@ -326,7 +376,7 @@ method getOrCreatePeer*(
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
# Start work on all data handlers without copying data into closure like
# happens on {.async.} transformation
p.topics.withValue(topic, handlers) do:
p.topics.withValue(topic, handlers):
var futs = newSeq[Future[void]]()
for handler in handlers[]:
@@ -343,7 +393,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
except CancelledError:
# propagate cancellation
for fut in futs:
if not(fut.finished):
if not (fut.finished):
fut.cancel()
# check for errors in futures
@@ -351,6 +401,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
if fut.failed:
let err = fut.readError()
warn "Error in topic handler", msg = err.msg
return waiter()
# Fast path - futures finished synchronously or nobody cared about data
@@ -358,9 +409,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
res.complete()
return res
method handleConn*(p: PubSub,
conn: Connection,
proto: string) {.base, async.} =
method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
## handle incoming connections
##
## this proc will:
@@ -402,18 +451,21 @@ proc updateTopicMetrics(p: PubSub, topic: string) =
libp2p_pubsub_topics.set(p.topics.len.int64)
if p.knownTopics.contains(topic):
p.topics.withValue(topic, handlers) do:
p.topics.withValue(topic, handlers):
libp2p_pubsub_topic_handlers.set(handlers[].len.int64, labelValues = [topic])
do:
libp2p_pubsub_topic_handlers.set(0, labelValues = [topic])
else:
var others: int64 = 0
for key, val in p.topics:
if key notin p.knownTopics: others += 1
if key notin p.knownTopics:
others += 1
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, gcsafe.} =
method onTopicSubscription*(
p: PubSub, topic: string, subscribed: bool
) {.base, gcsafe.} =
# Called when subscribe is called the first time for a topic or unsubscribe
# removes the last handler
@@ -430,9 +482,7 @@ method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base,
else:
libp2p_pubsub_unsubscriptions.inc()
proc unsubscribe*(p: PubSub,
topic: string,
handler: TopicHandler) {.public.} =
proc unsubscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
## unsubscribe from a ``topic`` string
##
p.topics.withValue(topic, handlers):
@@ -461,9 +511,7 @@ proc unsubscribeAll*(p: PubSub, topic: string) {.public, gcsafe.} =
p.updateTopicMetrics(topic)
proc subscribe*(p: PubSub,
topic: string,
handler: TopicHandler) {.public.} =
proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
## subscribe to a topic
##
## ``topic`` - a string topic to subscribe to
@@ -477,7 +525,7 @@ proc subscribe*(p: PubSub,
warn "Trying to subscribe to a topic not passing validation!", topic
return
p.topics.withValue(topic, handlers) do:
p.topics.withValue(topic, handlers):
# Already subscribed, just adding another handler
handlers[].add(handler)
do:
@@ -489,9 +537,9 @@ proc subscribe*(p: PubSub,
p.updateTopicMetrics(topic)
method publish*(p: PubSub,
topic: string,
data: seq[byte]): Future[int] {.base, async, public.} =
method publish*(
p: PubSub, topic: string, data: seq[byte]
): Future[int] {.base, async, public.} =
## publish to a ``topic``
##
## The return value is the number of neighbours that we attempted to send the
@@ -503,16 +551,15 @@ method publish*(p: PubSub,
return 0
method initPubSub*(p: PubSub)
{.base, raises: [InitializationError].} =
method initPubSub*(p: PubSub) {.base, raises: [InitializationError].} =
## perform pubsub initialization
p.observers = new(seq[PubSubObserver])
if p.msgIdProvider == nil:
p.msgIdProvider = defaultMsgIdProvider
method addValidator*(p: PubSub,
topic: varargs[string],
hook: ValidatorHandler) {.base, public, gcsafe.} =
method addValidator*(
p: PubSub, topic: varargs[string], hook: ValidatorHandler
) {.base, public, gcsafe.} =
## Add a validator to a `topic`. Each new message received in this
## will be sent to `hook`. `hook` can return either `Accept`,
## `Ignore` or `Reject` (which can descore the peer)
@@ -520,16 +567,18 @@ method addValidator*(p: PubSub,
trace "adding validator for topic", topic = t
p.validators.mgetOrPut(t, HashSet[ValidatorHandler]()).incl(hook)
method removeValidator*(p: PubSub,
topic: varargs[string],
hook: ValidatorHandler) {.base, public.} =
method removeValidator*(
p: PubSub, topic: varargs[string], hook: ValidatorHandler
) {.base, public.} =
for t in topic:
p.validators.withValue(t, validators):
validators[].excl(hook)
if validators[].len() == 0:
p.validators.del(t)
method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async, base.} =
method validate*(
p: PubSub, message: Message
): Future[ValidationResult] {.async, base.} =
var pending: seq[Future[ValidationResult]]
trace "about to validate message"
let topic = message.topic
@@ -561,21 +610,22 @@ method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async,
libp2p_pubsub_validation_ignore.inc()
proc init*[PubParams: object | bool](
P: typedesc[PubSub],
switch: Switch,
triggerSelf: bool = false,
anonymize: bool = false,
verifySignature: bool = true,
sign: bool = true,
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
subscriptionValidator: SubscriptionValidator = nil,
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false): P
{.raises: [InitializationError], public.} =
P: typedesc[PubSub],
switch: Switch,
triggerSelf: bool = false,
anonymize: bool = false,
verifySignature: bool = true,
sign: bool = true,
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
subscriptionValidator: SubscriptionValidator = nil,
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false,
): P {.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
P(switch: switch,
P(
switch: switch,
peerInfo: switch.peerInfo,
triggerSelf: triggerSelf,
anonymize: anonymize,
@@ -585,9 +635,11 @@ proc init*[PubParams: object | bool](
subscriptionValidator: subscriptionValidator,
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high)
topicsHigh: int.high,
)
else:
P(switch: switch,
P(
switch: switch,
peerInfo: switch.peerInfo,
triggerSelf: triggerSelf,
anonymize: anonymize,
@@ -598,7 +650,8 @@ proc init*[PubParams: object | bool](
parameters: parameters,
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high)
topicsHigh: int.high,
)
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined:
@@ -615,9 +668,10 @@ proc init*[PubParams: object | bool](
return pubsub
proc addObserver*(p: PubSub; observer: PubSubObserver) {.public.} = p.observers[] &= observer
proc addObserver*(p: PubSub, observer: PubSubObserver) {.public.} =
p.observers[] &= observer
proc removeObserver*(p: PubSub; observer: PubSubObserver) {.public.} =
proc removeObserver*(p: PubSub, observer: PubSubObserver) {.public.} =
let idx = p.observers[].find(observer)
if idx != -1:
p.observers[].del(idx)

View File

@@ -13,13 +13,14 @@ import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
import chronos/ratelimit
import rpc/[messages, message, protobuf],
../../peerid,
../../peerinfo,
../../stream/connection,
../../crypto/crypto,
../../protobuf/minprotobuf,
../../utility
import
rpc/[messages, message, protobuf],
../../peerid,
../../peerinfo,
../../stream/connection,
../../crypto/crypto,
../../protobuf/minprotobuf,
../../utility
export peerid, connection, deques
@@ -27,36 +28,58 @@ logScope:
topics = "libp2p pubsubpeer"
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
declareCounter(
libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"]
)
declareCounter(
libp2p_pubsub_skipped_received_messages,
"number of received skipped messages",
labels = ["id"],
)
declareCounter(
libp2p_pubsub_skipped_sent_messages,
"number of sent skipped messages",
labels = ["id"],
)
when defined(pubsubpeer_queue_metrics):
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
declareGauge(
libp2p_gossipsub_priority_queue_size,
"the number of messages in the priority queue",
labels = ["id"],
)
declareGauge(
libp2p_gossipsub_non_priority_queue_size,
"the number of messages in the non-priority queue",
labels = ["id"],
)
declareCounter(libp2p_pubsub_disconnects_over_non_priority_queue_limit, "number of peers disconnected due to over non-prio queue capacity")
declareCounter(
libp2p_pubsub_disconnects_over_non_priority_queue_limit,
"number of peers disconnected due to over non-prio queue capacity",
)
const
DefaultMaxNumElementsInNonPriorityQueue* = 1024
const DefaultMaxNumElementsInNonPriorityQueue* = 1024
type
PeerRateLimitError* = object of CatchableError
PubSubObserver* = ref object
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
onRecv*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
PubSubPeerEventKind* {.pure.} = enum
StreamOpened
StreamClosed
DisconnectionRequested # tells gossipsub that the transport connection to the peer should be closed
DisconnectionRequested
# tells gossipsub that the transport connection to the peer should be closed
PubSubPeerEvent* = object
kind*: PubSubPeerEventKind
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].}
# have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
RpcMessageQueue* = ref object
@@ -68,10 +91,10 @@ type
sendNonPriorityTask: Future[void]
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
codec*: string # the protocol that this peer joined from
sendConn*: Connection # cached send connection
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
codec*: string # the protocol that this peer joined from
sendConn*: Connection # cached send connection
connectedFut: Future[void]
address*: Option[MultiAddress]
peerId*: PeerId
@@ -92,11 +115,12 @@ type
overheadRateLimitOpt*: Opt[TokenBucket]
rpcmessagequeue: RpcMessageQueue
maxNumElementsInNonPriorityQueue*: int # The max number of elements allowed in the non-priority queue.
maxNumElementsInNonPriorityQueue*: int
# The max number of elements allowed in the non-priority queue.
disconnected: bool
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
{.gcsafe, raises: [].}
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
@@ -110,10 +134,7 @@ when defined(libp2p_agents_metrics):
proc getAgent*(peer: PubSubPeer): string =
return
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
if peer.shortAgent.len > 0: peer.shortAgent else: "unknown"
else:
"unknown"
@@ -124,13 +145,15 @@ func `==`*(a, b: PubSubPeer): bool =
a.peerId == b.peerId
func shortLog*(p: PubSubPeer): string =
if p.isNil: "PubSubPeer(nil)"
else: shortLog(p.peerId)
chronicles.formatIt(PubSubPeer): shortLog(it)
if p.isNil:
"PubSubPeer(nil)"
else:
shortLog(p.peerId)
chronicles.formatIt(PubSubPeer):
shortLog(it)
proc connected*(p: PubSubPeer): bool =
not p.sendConn.isNil and not
(p.sendConn.closed or p.sendConn.atEof)
not p.sendConn.isNil and not (p.sendConn.closed or p.sendConn.atEof)
proc hasObservers*(p: PubSubPeer): bool =
p.observers != nil and anyIt(p.observers[], it != nil)
@@ -140,28 +163,24 @@ func outbound*(p: PubSubPeer): bool =
# in order to give priotity to connections we make
# https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#outbound-mesh-quotas
# This behaviour is presrcibed to counter sybil attacks and ensures that a coordinated inbound attack can never fully take over the mesh
if not p.sendConn.isNil and p.sendConn.transportDir == Direction.Out:
true
else:
false
if not p.sendConn.isNil and p.sendConn.transportDir == Direction.Out: true else: false
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
# trigger hooks
if not(isNil(p.observers)) and p.observers[].len > 0:
if not (isNil(p.observers)) and p.observers[].len > 0:
for obs in p.observers[]:
if not(isNil(obs)): # TODO: should never be nil, but...
if not (isNil(obs)): # TODO: should never be nil, but...
obs.onRecv(p, msg)
proc sendObservers(p: PubSubPeer, msg: var RPCMsg) =
# trigger hooks
if not(isNil(p.observers)) and p.observers[].len > 0:
if not (isNil(p.observers)) and p.observers[].len > 0:
for obs in p.observers[]:
if not(isNil(obs)): # TODO: should never be nil, but...
if not (isNil(obs)): # TODO: should never be nil, but...
obs.onSend(p, msg)
proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
debug "starting pubsub read loop",
conn, peer = p, closed = conn.closed
debug "starting pubsub read loop", conn, peer = p, closed = conn.closed
try:
try:
while not conn.atEof:
@@ -169,13 +188,13 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
var data = await conn.readLp(p.maxMessageSize)
trace "read data from peer",
conn, peer = p, closed = conn.closed,
data = data.shortLog
conn, peer = p, closed = conn.closed, data = data.shortLog
await p.handler(p, data)
data = newSeq[byte]() # Release memory
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
debug "Peer rate limit exceeded, exiting read while",
conn, peer = p, error = exc.msg
except CatchableError as exc:
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
@@ -189,8 +208,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
trace "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
debug "exiting pubsub read loop",
conn, peer = p, closed = conn.closed
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
if p.sendConn != nil:
@@ -229,7 +247,11 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
# to be completed, or onEvent to be called later
p.connectedFut.complete()
p.sendConn = newConn
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
p.address =
if p.sendConn.observedAddr.isSome:
some(p.sendConn.observedAddr.get)
else:
none(MultiAddress)
if p.onEvent != nil:
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.StreamOpened))
@@ -281,8 +303,8 @@ proc clearSendPriorityQueue(p: PubSubPeer) =
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_priority_queue_size.set(
value = p.rpcmessagequeue.sendPriorityQueue.len.int64,
labelValues = [$p.peerId])
value = p.rpcmessagequeue.sendPriorityQueue.len.int64, labelValues = [$p.peerId]
)
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
# Continuation for a pending `sendMsg` future from below
@@ -344,14 +366,17 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
# When queues are empty, skipping the non-priority queue for low priority
# messages reduces latency
let emptyQueues =
(p.rpcmessagequeue.sendPriorityQueue.len() +
p.rpcmessagequeue.nonPriorityQueue.len()) == 0
(
p.rpcmessagequeue.sendPriorityQueue.len() +
p.rpcmessagequeue.nonPriorityQueue.len()
) == 0
if msg.len <= 0:
debug "empty message, skipping", p, msg = shortLog(msg)
Future[void].completed()
elif msg.len > p.maxMessageSize:
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
info "trying to send a msg too big for pubsub",
maxSize = p.maxMessageSize, msgSize = msg.len
Future[void].completed()
elif isHighPriority or emptyQueues:
let f = p.sendMsg(msg)
@@ -374,7 +399,9 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
f
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
iterator splitRPCMsg(
peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool
): seq[byte] =
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
@@ -389,7 +416,8 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
let msgSize = byteSize(msg)
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
if float(currentSize + msgSize) * 1.1 > float(maxSize):
# Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
continue # Skip this message
@@ -409,7 +437,9 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.raises: [].} =
proc send*(
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
) {.raises: [].} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
## Parameters:
@@ -424,17 +454,18 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.
# or malicious data on the wire - in particular, re-encoding protects against
# some forms of valid but redundantly encoded protobufs with unknown or
# duplicated fields
let encoded = if p.hasObservers():
var mm = msg
# trigger send hooks
p.sendObservers(mm)
sendMetrics(mm)
encodeRpcMsg(mm, anonymize)
else:
# If there are no send hooks, we redundantly re-encode the message to
# protobuf for every peer - this could easily be improved!
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
let encoded =
if p.hasObservers():
var mm = msg
# trigger send hooks
p.sendObservers(mm)
sendMetrics(mm)
encodeRpcMsg(mm, anonymize)
else:
# If there are no send hooks, we redundantly re-encode the message to
# protobuf for every peer - this could easily be improved!
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
@@ -453,21 +484,21 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
while true:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
p.clearSendPriorityQueue()
# waiting for the last future minimizes the number of times we have to
# wait for something (each wait = performance cost) -
# clearSendPriorityQueue ensures we're not waiting for an already-finished
# future
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
p.clearSendPriorityQueue()
# waiting for the last future minimizes the number of times we have to
# wait for something (each wait = performance cost) -
# clearSendPriorityQueue ensures we're not waiting for an already-finished
# future
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
# `race` prevents `p.rpcmessagequeue.sendPriorityQueue[^1]` from being
# cancelled when this task is cancelled
discard await race(p.rpcmessagequeue.sendPriorityQueue[^1])
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
proc startSendNonPriorityTask(p: PubSubPeer) =
debug "starting sendNonPriorityTask", p
@@ -489,19 +520,19 @@ proc stopSendNonPriorityTask*(p: PubSubPeer) =
proc new(T: typedesc[RpcMessageQueue]): T =
return T(
sendPriorityQueue: initDeque[Future[void]](),
nonPriorityQueue: newAsyncQueue[seq[byte]]()
nonPriorityQueue: newAsyncQueue[seq[byte]](),
)
proc new*(
T: typedesc[PubSubPeer],
peerId: PeerId,
getConn: GetConn,
onEvent: OnEvent,
codec: string,
maxMessageSize: int,
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
T: typedesc[PubSubPeer],
peerId: PeerId,
getConn: GetConn,
onEvent: OnEvent,
codec: string,
maxMessageSize: int,
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
): T =
result = T(
getConn: getConn,
onEvent: onEvent,
@@ -511,7 +542,7 @@ proc new*(
maxMessageSize: maxMessageSize,
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.heDontWants.addFirst(default(HashSet[SaltedId]))

View File

@@ -10,13 +10,14 @@
{.push raises: [].}
import chronicles, metrics, stew/[byteutils, endians2]
import ./messages,
./protobuf,
../../../peerid,
../../../peerinfo,
../../../crypto/crypto,
../../../protobuf/minprotobuf,
../../../protocols/pubsub/errors
import
./messages,
./protobuf,
../../../peerid,
../../../peerinfo,
../../../crypto/crypto,
../../../protobuf/minprotobuf,
../../../protocols/pubsub/errors
export errors, messages
@@ -25,7 +26,9 @@ logScope:
const PubSubPrefix = toBytes("libp2p-pubsub:")
declareCounter(libp2p_pubsub_sig_verify_success, "pubsub successfully validated messages")
declareCounter(
libp2p_pubsub_sig_verify_success, "pubsub successfully validated messages"
)
declareCounter(libp2p_pubsub_sig_verify_failure, "pubsub failed validated messages")
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
@@ -36,7 +39,7 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
err ValidationResult.Reject
proc sign*(msg: Message, privateKey: PrivateKey): CryptoResult[seq[byte]] =
ok((? privateKey.sign(PubSubPrefix & encodeMessage(msg, false))).getBytes())
ok((?privateKey.sign(PubSubPrefix & encodeMessage(msg, false))).getBytes())
proc verify*(m: Message): bool =
if m.signature.len > 0 and m.key.len > 0:
@@ -61,8 +64,8 @@ proc init*(
data: seq[byte],
topic: string,
seqno: Option[uint64],
sign: bool = true): Message
{.gcsafe, raises: [LPError].} =
sign: bool = true,
): Message {.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topic: topic)
# order matters, we want to include seqno in the signature
@@ -73,10 +76,14 @@ proc init*(
msg.fromPeer = peer.peerId
if sign:
msg.signature = sign(msg, peer.privateKey).expect("Couldn't sign message!")
msg.key = peer.privateKey.getPublicKey().expect("Invalid private key!")
.getBytes().expect("Couldn't get public key bytes!")
msg.key = peer.privateKey
.getPublicKey()
.expect("Invalid private key!")
.getBytes()
.expect("Couldn't get public key bytes!")
else:
if sign: raise (ref LPError)(msg: "Cannot sign message without peer info")
if sign:
raise (ref LPError)(msg: "Cannot sign message without peer info")
msg
@@ -85,8 +92,8 @@ proc init*(
peerId: PeerId,
data: seq[byte],
topic: string,
seqno: Option[uint64]): Message
{.gcsafe, raises: [LPError].} =
seqno: Option[uint64],
): Message {.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topic: topic)
msg.fromPeer = peerId

View File

@@ -10,21 +10,27 @@
{.push raises: [].}
import options, sequtils
import ../../../[
peerid,
routing_record,
utility
]
import ../../../[peerid, routing_record, utility]
export options
proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
proc expectedFields[T](
t: typedesc[T], existingFieldNames: seq[string]
) {.raises: [CatchableError].} =
var fieldNames: seq[string]
for name, _ in fieldPairs(T()):
fieldNames &= name
if fieldNames != existingFieldNames:
fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
fieldNames.keepIf(
proc(it: string): bool =
it notin existingFieldNames
)
raise newException(
CatchableError,
$T &
" fields changed, please search for and revise all relevant procs. New fields: " &
$fieldNames,
)
type
PeerInfoMsg* = object
@@ -80,38 +86,27 @@ type
ping*: seq[byte]
pong*: seq[byte]
func withSubs*(
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
T(
subscriptions: topics.mapIt(SubOpts(subscribe: subscribe, topic: it)))
func withSubs*(T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
T(subscriptions: topics.mapIt(SubOpts(subscribe: subscribe, topic: it)))
func shortLog*(s: ControlIHave): auto =
(
topic: s.topicID.shortLog,
messageIDs: mapIt(s.messageIDs, it.shortLog)
)
(topic: s.topicID.shortLog, messageIDs: mapIt(s.messageIDs, it.shortLog))
func shortLog*(s: ControlIWant): auto =
(
messageIDs: mapIt(s.messageIDs, it.shortLog)
)
(messageIDs: mapIt(s.messageIDs, it.shortLog))
func shortLog*(s: ControlGraft): auto =
(
topic: s.topicID.shortLog
)
(topic: s.topicID.shortLog)
func shortLog*(s: ControlPrune): auto =
(
topic: s.topicID.shortLog
)
(topic: s.topicID.shortLog)
func shortLog*(c: ControlMessage): auto =
(
ihave: mapIt(c.ihave, it.shortLog),
iwant: mapIt(c.iwant, it.shortLog),
graft: mapIt(c.graft, it.shortLog),
prune: mapIt(c.prune, it.shortLog)
prune: mapIt(c.prune, it.shortLog),
)
func shortLog*(msg: Message): auto =
@@ -121,25 +116,28 @@ func shortLog*(msg: Message): auto =
seqno: msg.seqno.shortLog,
topic: msg.topic,
signature: msg.signature.shortLog,
key: msg.key.shortLog
key: msg.key.shortLog,
)
func shortLog*(m: RPCMsg): auto =
(
subscriptions: m.subscriptions,
messages: mapIt(m.messages, it.shortLog),
control: m.control.get(ControlMessage()).shortLog
control: m.control.get(ControlMessage()).shortLog,
)
static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
static:
expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
proc byteSize(peerInfo: PeerInfoMsg): int =
peerInfo.peerId.len + peerInfo.signedPeerRecord.len
static: expectedFields(SubOpts, @["subscribe", "topic"])
static:
expectedFields(SubOpts, @["subscribe", "topic"])
proc byteSize(subOpts: SubOpts): int =
1 + subOpts.topic.len # 1 byte for the bool
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topic", "signature", "key"])
static:
expectedFields(Message, @["fromPeer", "data", "seqno", "topic", "signature", "key"])
proc byteSize*(msg: Message): int =
msg.fromPeer.len + msg.data.len + msg.seqno.len + msg.signature.len + msg.key.len +
msg.topic.len
@@ -147,37 +145,45 @@ proc byteSize*(msg: Message): int =
proc byteSize*(msgs: seq[Message]): int =
msgs.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIHave, @["topicID", "messageIDs"])
static:
expectedFields(ControlIHave, @["topicID", "messageIDs"])
proc byteSize(controlIHave: ControlIHave): int =
controlIHave.topicID.len + controlIHave.messageIDs.foldl(a + b.len, 0)
proc byteSize*(ihaves: seq[ControlIHave]): int =
ihaves.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIWant, @["messageIDs"])
static:
expectedFields(ControlIWant, @["messageIDs"])
proc byteSize(controlIWant: ControlIWant): int =
controlIWant.messageIDs.foldl(a + b.len, 0)
proc byteSize*(iwants: seq[ControlIWant]): int =
iwants.foldl(a + b.byteSize, 0)
static: expectedFields(ControlGraft, @["topicID"])
static:
expectedFields(ControlGraft, @["topicID"])
proc byteSize(controlGraft: ControlGraft): int =
controlGraft.topicID.len
static: expectedFields(ControlPrune, @["topicID", "peers", "backoff"])
static:
expectedFields(ControlPrune, @["topicID", "peers", "backoff"])
proc byteSize(controlPrune: ControlPrune): int =
controlPrune.topicID.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
controlPrune.topicID.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8
# 8 bytes for uint64
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
static:
expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
static:
expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
proc byteSize*(rpc: RPCMsg): int =
result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
rpc.ping.len + rpc.pong.len
result =
rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) + rpc.ping.len +
rpc.pong.len
rpc.control.withValue(ctrl):
result += ctrl.byteSize

View File

@@ -12,11 +12,7 @@
import options
import stew/assign2
import chronicles
import messages,
../../../peerid,
../../../utility,
../../../protobuf/minprotobuf
import messages, ../../../peerid, ../../../utility, ../../../protobuf/minprotobuf
logScope:
topics = "libp2p pubsubprotobuf"
@@ -24,8 +20,12 @@ logScope:
when defined(libp2p_protobuf_metrics):
import metrics
declareCounter(libp2p_pubsub_rpc_bytes_read, "pubsub rpc bytes read", labels = ["kind"])
declareCounter(libp2p_pubsub_rpc_bytes_write, "pubsub rpc bytes write", labels = ["kind"])
declareCounter(
libp2p_pubsub_rpc_bytes_read, "pubsub rpc bytes read", labels = ["kind"]
)
declareCounter(
libp2p_pubsub_rpc_bytes_write, "pubsub rpc bytes write", labels = ["kind"]
)
proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
var ipb = initProtoBuffer()
@@ -125,64 +125,61 @@ proc encodeMessage*(msg: Message, anonymize: bool): seq[byte] =
proc write*(pb: var ProtoBuffer, field: int, msg: Message, anonymize: bool) =
pb.write(field, encodeMessage(msg, anonymize))
proc decodeGraft*(pb: ProtoBuffer): ProtoResult[ControlGraft] {.
inline.} =
proc decodeGraft*(pb: ProtoBuffer): ProtoResult[ControlGraft] {.inline.} =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["graft"])
trace "decodeGraft: decoding message"
var control = ControlGraft()
if ? pb.getField(1, control.topicID):
if ?pb.getField(1, control.topicID):
trace "decodeGraft: read topicID", topicID = control.topicID
else:
trace "decodeGraft: topicID is missing"
ok(control)
proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.
inline.} =
proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.inline.} =
trace "decodePeerInfoMsg: decoding message"
var pi = PeerInfoMsg()
if ? pb.getField(1, pi.peerId):
if ?pb.getField(1, pi.peerId):
trace "decodePeerInfoMsg: read peerId", peerId = pi.peerId
else:
trace "decodePeerInfoMsg: peerId is missing"
if ? pb.getField(2, pi.signedPeerRecord):
trace "decodePeerInfoMsg: read signedPeerRecord", signedPeerRecord = pi.signedPeerRecord
if ?pb.getField(2, pi.signedPeerRecord):
trace "decodePeerInfoMsg: read signedPeerRecord",
signedPeerRecord = pi.signedPeerRecord
else:
trace "decodePeerInfoMsg: signedPeerRecord is missing"
ok(pi)
proc decodePrune*(pb: ProtoBuffer): ProtoResult[ControlPrune] {.
inline.} =
proc decodePrune*(pb: ProtoBuffer): ProtoResult[ControlPrune] {.inline.} =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["prune"])
trace "decodePrune: decoding message"
var control = ControlPrune()
if ? pb.getField(1, control.topicID):
if ?pb.getField(1, control.topicID):
trace "decodePrune: read topicID", topic = control.topicID
else:
trace "decodePrune: topicID is missing"
var bpeers: seq[seq[byte]]
if ? pb.getRepeatedField(2, bpeers):
if ?pb.getRepeatedField(2, bpeers):
for bpeer in bpeers:
control.peers &= ? decodePeerInfoMsg(initProtoBuffer(bpeer))
if ? pb.getField(3, control.backoff):
control.peers &= ?decodePeerInfoMsg(initProtoBuffer(bpeer))
if ?pb.getField(3, control.backoff):
trace "decodePrune: read backoff", backoff = control.backoff
ok(control)
proc decodeIHave*(pb: ProtoBuffer): ProtoResult[ControlIHave] {.
inline.} =
proc decodeIHave*(pb: ProtoBuffer): ProtoResult[ControlIHave] {.inline.} =
when defined(libp2p_protobuf_metrics):
libp2p_pubsub_rpc_bytes_read.inc(pb.getLen().int64, labelValues = ["ihave"])
trace "decodeIHave: decoding message"
var control = ControlIHave()
if ? pb.getField(1, control.topicID):
if ?pb.getField(1, control.topicID):
trace "decodeIHave: read topicID", topic = control.topicID
else:
trace "decodeIHave: topicID is missing"
if ? pb.getRepeatedField(2, control.messageIDs):
if ?pb.getRepeatedField(2, control.messageIDs):
trace "decodeIHave: read messageIDs", message_ids = control.messageIDs
else:
trace "decodeIHave: no messageIDs"
@@ -194,17 +191,16 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
trace "decodeIWant: decoding message"
var control = ControlIWant()
if ? pb.getRepeatedField(1, control.messageIDs):
if ?pb.getRepeatedField(1, control.messageIDs):
trace "decodeIWant: read messageIDs", message_ids = control.messageIDs
else:
trace "decodeIWant: no messageIDs"
ok(control)
proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
inline.} =
proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.inline.} =
trace "decodeControl: decoding message"
var buffer: seq[byte]
if ? pb.getField(3, buffer):
if ?pb.getField(3, buffer):
var control: ControlMessage
var cpb = initProtoBuffer(buffer)
var ihavepbs: seq[seq[byte]]
@@ -212,25 +208,26 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
var graftpbs: seq[seq[byte]]
var prunepbs: seq[seq[byte]]
var idontwant: seq[seq[byte]]
if ? cpb.getRepeatedField(1, ihavepbs):
if ?cpb.getRepeatedField(1, ihavepbs):
for item in ihavepbs:
control.ihave.add(? decodeIHave(initProtoBuffer(item)))
if ? cpb.getRepeatedField(2, iwantpbs):
control.ihave.add(?decodeIHave(initProtoBuffer(item)))
if ?cpb.getRepeatedField(2, iwantpbs):
for item in iwantpbs:
control.iwant.add(? decodeIWant(initProtoBuffer(item)))
if ? cpb.getRepeatedField(3, graftpbs):
control.iwant.add(?decodeIWant(initProtoBuffer(item)))
if ?cpb.getRepeatedField(3, graftpbs):
for item in graftpbs:
control.graft.add(? decodeGraft(initProtoBuffer(item)))
if ? cpb.getRepeatedField(4, prunepbs):
control.graft.add(?decodeGraft(initProtoBuffer(item)))
if ?cpb.getRepeatedField(4, prunepbs):
for item in prunepbs:
control.prune.add(? decodePrune(initProtoBuffer(item)))
if ? cpb.getRepeatedField(5, idontwant):
control.prune.add(?decodePrune(initProtoBuffer(item)))
if ?cpb.getRepeatedField(5, idontwant):
for item in idontwant:
control.idontwant.add(? decodeIWant(initProtoBuffer(item)))
trace "decodeControl: message statistics", graft_count = len(control.graft),
prune_count = len(control.prune),
ihave_count = len(control.ihave),
iwant_count = len(control.iwant)
control.idontwant.add(?decodeIWant(initProtoBuffer(item)))
trace "decodeControl: message statistics",
graft_count = len(control.graft),
prune_count = len(control.prune),
ihave_count = len(control.ihave),
iwant_count = len(control.iwant)
ok(some(control))
else:
ok(none[ControlMessage]())
@@ -242,27 +239,26 @@ proc decodeSubscription*(pb: ProtoBuffer): ProtoResult[SubOpts] {.inline.} =
trace "decodeSubscription: decoding message"
var subflag: uint64
var sub = SubOpts()
if ? pb.getField(1, subflag):
if ?pb.getField(1, subflag):
sub.subscribe = bool(subflag)
trace "decodeSubscription: read subscribe", subscribe = subflag
else:
trace "decodeSubscription: subscribe is missing"
if ? pb.getField(2, sub.topic):
if ?pb.getField(2, sub.topic):
trace "decodeSubscription: read topic", topic = sub.topic
else:
trace "decodeSubscription: topic is missing"
ok(sub)
proc decodeSubscriptions*(pb: ProtoBuffer): ProtoResult[seq[SubOpts]] {.
inline.} =
proc decodeSubscriptions*(pb: ProtoBuffer): ProtoResult[seq[SubOpts]] {.inline.} =
trace "decodeSubscriptions: decoding message"
var subpbs: seq[seq[byte]]
var subs: seq[SubOpts]
let res = ? pb.getRepeatedField(1, subpbs)
let res = ?pb.getRepeatedField(1, subpbs)
if res:
trace "decodeSubscriptions: read subscriptions", count = len(subpbs)
for item in subpbs:
subs.add(? decodeSubscription(initProtoBuffer(item)))
subs.add(?decodeSubscription(initProtoBuffer(item)))
if len(subs) == 0:
trace "decodeSubscription: no subscriptions found"
ok(subs)
@@ -273,15 +269,15 @@ proc decodeMessage*(pb: ProtoBuffer): ProtoResult[Message] {.inline.} =
trace "decodeMessage: decoding message"
var msg: Message
if ? pb.getField(1, msg.fromPeer):
if ?pb.getField(1, msg.fromPeer):
trace "decodeMessage: read fromPeer", fromPeer = msg.fromPeer
else:
trace "decodeMessage: fromPeer is missing"
if ? pb.getField(2, msg.data):
if ?pb.getField(2, msg.data):
trace "decodeMessage: read data", data = msg.data.shortLog()
else:
trace "decodeMessage: data is missing"
if ? pb.getField(3, msg.seqno):
if ?pb.getField(3, msg.seqno):
trace "decodeMessage: read seqno", seqno = msg.seqno
else:
trace "decodeMessage: seqno is missing"
@@ -290,11 +286,11 @@ proc decodeMessage*(pb: ProtoBuffer): ProtoResult[Message] {.inline.} =
else:
trace "decodeMessage: topic is required"
return err(ProtoError.RequiredFieldMissing)
if ? pb.getField(5, msg.signature):
if ?pb.getField(5, msg.signature):
trace "decodeMessage: read signature", signature = msg.signature.shortLog()
else:
trace "decodeMessage: signature is missing"
if ? pb.getField(6, msg.key):
if ?pb.getField(6, msg.key):
trace "decodeMessage: read public key", key = msg.key.shortLog()
else:
trace "decodeMessage: public key is missing"
@@ -304,11 +300,11 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
trace "decodeMessages: decoding message"
var msgpbs: seq[seq[byte]]
var msgs: seq[Message]
if ? pb.getRepeatedField(2, msgpbs):
if ?pb.getRepeatedField(2, msgpbs):
trace "decodeMessages: read messages", count = len(msgpbs)
for item in msgpbs:
# size is constrained at the network level
msgs.add(? decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
msgs.add(?decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
else:
trace "decodeMessages: no messages found"
ok(msgs)
@@ -336,9 +332,9 @@ proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
var rpcMsg = RPCMsg()
assign(rpcMsg.messages, ? pb.decodeMessages())
assign(rpcMsg.subscriptions, ? pb.decodeSubscriptions())
assign(rpcMsg.control, ? pb.decodeControl())
discard ? pb.getField(60, rpcMsg.ping)
discard ? pb.getField(61, rpcMsg.pong)
assign(rpcMsg.messages, ?pb.decodeMessages())
assign(rpcMsg.subscriptions, ?pb.decodeSubscriptions())
assign(rpcMsg.control, ?pb.decodeControl())
discard ?pb.getField(60, rpcMsg.ping)
discard ?pb.getField(61, rpcMsg.pong)
ok(rpcMsg)

View File

@@ -47,23 +47,29 @@ func expire*(t: var TimedCache, now: Moment = Moment.now()) =
t.entries.excl(t.head)
t.head.prev = nil
t.head = t.head.next
if t.head == nil: t.tail = nil
if t.head == nil:
t.tail = nil
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
# Removes existing key from cache, returning the previous value if present
let tmp = TimedEntry[K](key: key)
if tmp in t.entries:
let item = try:
t.entries[tmp] # use the shared instance in the set
except KeyError:
raiseAssert "just checked"
let item =
try:
t.entries[tmp] # use the shared instance in the set
except KeyError:
raiseAssert "just checked"
t.entries.excl(item)
if t.head == item: t.head = item.next
if t.tail == item: t.tail = item.prev
if t.head == item:
t.head = item.next
if t.tail == item:
t.tail = item.prev
if item.next != nil: item.next.prev = item.prev
if item.prev != nil: item.prev.next = item.next
if item.next != nil:
item.next.prev = item.prev
if item.prev != nil:
item.prev.next = item.next
Opt.some(item)
else:
Opt.none(TimedEntry[K])
@@ -76,10 +82,11 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
let
previous = t.del(k) # Refresh existing item
addedAt = if previous.isSome():
previous[].addedAt
else:
now
addedAt =
if previous.isSome():
previous[].addedAt
else:
now
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
if t.head == nil:
@@ -122,6 +129,4 @@ func addedAt*[K](t: var TimedCache[K], k: K): Moment =
default(Moment)
func init*[K](T: type TimedCache[K], timeout: Duration = Timeout): T =
T(
timeout: timeout
)
T(timeout: timeout)

View File

@@ -11,17 +11,15 @@
import tables, sequtils, sugar, sets
import metrics except collect
import chronos,
chronicles,
bearssl/rand,
stew/[byteutils, objects, results]
import ./protocol,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects, results]
import
./protocol,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore
export chronicles
@@ -62,11 +60,11 @@ type
Unavailable = 400
Cookie = object
offset : uint64
ns : string
offset: uint64
ns: string
Register = object
ns : string
ns: string
signedPeerRecord: seq[byte]
ttl: Opt[uint64] # in seconds
@@ -166,7 +164,8 @@ proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
if r1.isErr() or r2.isErr(): return Opt.none(Cookie)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
@@ -178,8 +177,10 @@ proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr(): return Opt.none(Register)
if r3.get(false): r.ttl = Opt.some(ttl)
if r1.isErr() or r2.isErr() or r3.isErr():
return Opt.none(Register)
if r3.get(false):
r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
@@ -194,9 +195,12 @@ proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd): return Opt.none(RegisterResponse)
if r2.get(false): rr.text = Opt.some(text)
if r3.get(false): rr.ttl = Opt.some(ttl)
not checkedEnumAssign(rr.status, statusOrd):
return Opt.none(RegisterResponse)
if r2.get(false):
rr.text = Opt.some(text)
if r3.get(false):
rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
@@ -204,7 +208,8 @@ proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr(): return Opt.none(Unregister)
if r1.isErr():
return Opt.none(Unregister)
Opt.some(u)
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
@@ -217,9 +222,12 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
r1 = pb.getRequiredField(1, d.ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr: return Opt.none(Discover)
if r2.get(false): d.limit = Opt.some(limit)
if r3.get(false): d.cookie = Opt.some(cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
@@ -236,14 +244,17 @@ proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd): return Opt.none(DiscoverResponse)
not checkedEnumAssign(dr.status, statusOrd):
return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false): dr.cookie = Opt.some(cookie)
if r4.get(false): dr.text = Opt.some(text)
if r2.get(false):
dr.cookie = Opt.some(cookie)
if r4.get(false):
dr.text = Opt.some(text)
Opt.some(dr)
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
@@ -253,32 +264,37 @@ proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let pb = initProtoBuffer(buf)
? pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd): return Opt.none(Message)
?pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd):
return Opt.none(Message)
if ? pb.getField(2, pbr).optValue:
if ?pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone(): return Opt.none(Message)
if msg.register.isNone():
return Opt.none(Message)
if ? pb.getField(3, pbrr).optValue:
if ?pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone(): return Opt.none(Message)
if msg.registerResponse.isNone():
return Opt.none(Message)
if ? pb.getField(4, pbu).optValue:
if ?pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone(): return Opt.none(Message)
if msg.unregister.isNone():
return Opt.none(Message)
if ? pb.getField(5, pbd).optValue:
if ?pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone(): return Opt.none(Message)
if msg.discover.isNone():
return Opt.none(Message)
if ? pb.getField(6, pbdr).optValue:
if ?pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone(): return Opt.none(Message)
if msg.discoverResponse.isNone():
return Opt.none(Message)
Opt.some(msg)
type
RendezVousError* = object of LPError
RegisteredData = object
@@ -306,46 +322,57 @@ type
switch: Switch
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0: return err("Empty peer record")
let signedEnv = ? SignedPeerRecord.decode(spr).mapErr(x => $x)
if spr.len == 0:
return err("Empty peer record")
let signedEnv = ?SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(conn: Connection,
ttl: uint64) {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl)))))
proc sendRegisterResponse(conn: Connection, ttl: uint64) {.async.} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl))),
)
)
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(conn: Connection,
status: ResponseStatus,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text)))))
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(conn: Connection,
s: seq[Register],
cookie: Cookie) {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(
status: Ok,
registrations: s,
cookie: Opt.some(cookie.encode().buffer)
))
))
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async.} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(
DiscoverResponse(
status: Ok, registrations: s, cookie: Opt.some(cookie.encode().buffer)
)
),
)
)
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(conn: Connection,
status: ResponseStatus,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text)))))
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text))),
)
)
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
@@ -354,37 +381,36 @@ proc countRegister(rdv: RendezVous, peerId: PeerId): int =
if data.peerId == peerId and data.expiration > n:
result.inc()
proc save(rdv: RendezVous,
ns: string,
peerId: PeerId,
r: Register,
update: bool = true) =
proc save(
rdv: RendezVous, ns: string, peerId: PeerId, r: Register, update: bool = true
) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false: return
if update == false:
return
rdv.registered[index].expiration = rdv.defaultDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(MinimumTTL).int64.seconds,
data: r
data: r,
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
# rdv.registerEvent.fire()
except KeyError:
doAssert false, "Should have key"
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len notin 1..255:
if r.ns.len notin 1 .. 255:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(MinimumTTL)
if ttl notin MinimumTTL..MaximumTTL:
if ttl notin MinimumTTL .. MaximumTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
@@ -410,21 +436,21 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.len notin 0..255:
if d.ns.len notin 0 .. 255:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var
cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else: Cookie(offset: rdv.registered.low().uint64 - 1)
var cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else:
Cookie(offset: rdv.registered.low().uint64 - 1)
if cookie.ns != d.ns or
cookie.offset notin rdv.registered.low().uint64..rdv.registered.high().uint64:
cookie.offset notin rdv.registered.low().uint64 .. rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let
nsSalted = d.ns & rdv.salt
@@ -435,32 +461,33 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else: toSeq(cookie.offset.int..rdv.registered.high())
else:
toSeq(cookie.offset.int .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var offset = namespaces[^1]
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
offset = index
break
if reg.expiration < n or index.uint64 <= cookie.offset: continue
limit.dec()
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
offset = index
break
if reg.expiration < n or index.uint64 <= cookie.offset:
continue
limit.dec()
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
proc advertisePeer(rdv: RendezVous,
peer: PeerId,
msg: seq[byte]) {.async.} =
proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
proc advertiseWrap() {.async.} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
defer:
await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
@@ -475,17 +502,18 @@ proc advertisePeer(rdv: RendezVous,
trace "exception in the advertise", error = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
method advertise*(rdv: RendezVous,
ns: string,
ttl: Duration = MinimumDuration) {.async, base.} =
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = MinimumDuration
) {.async, base.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1..255:
if ns.len notin 1 .. 255:
raise newException(RendezVousError, "Invalid namespace")
if ttl notin MinimumDuration..MaximumDuration:
if ttl notin MinimumDuration .. MaximumDuration:
raise newException(RendezVousError, "Invalid time to live")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
@@ -511,9 +539,9 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
except KeyError as exc:
@[]
proc request*(rdv: RendezVous,
ns: string,
l: int = DiscoverLimit.int): Future[seq[PeerRecord]] {.async.} =
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async.} =
let nsSalted = ns & rdv.salt
var
s: Table[PeerId, (PeerRecord, Register)]
@@ -522,21 +550,22 @@ proc request*(rdv: RendezVous,
if l <= 0 or l > DiscoverLimit.int:
raise newException(RendezVousError, "Invalid limit")
if ns.len notin 0..255:
if ns.len notin 0 .. 255:
raise newException(RendezVousError, "Invalid namespace")
limit = l.uint64
proc requestPeer(peer: PeerId) {.async.} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
try:
Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
Opt.none(seq[byte])
await conn.writeLp(encode(Message(
msgType: MessageType.Discover,
discover: Opt.some(d))).buffer)
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(65536)
msgRcv = Message.decode(buf).valueOr:
@@ -552,19 +581,23 @@ proc request*(rdv: RendezVous,
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if cookie.len() < 1000 and rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
rdv.cookiesSaved[peer][ns] = cookie
for r in resp.registrations:
if limit == 0: return
if limit == 0:
return
let ttl = r.ttl.get(MaximumTTL + 1)
if ttl > MaximumTTL: continue
if ttl > MaximumTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr: continue
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
@@ -575,8 +608,10 @@ proc request*(rdv: RendezVous,
# copy to avoid resizes during the loop
let peers = rdv.peers
for peer in peers:
if limit == 0: break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]: continue
if limit == 0:
break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]:
continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
@@ -597,17 +632,18 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
# TODO: find a way to improve this, maybe something similar to the advertise
if ns.len notin 1..255:
if ns.len notin 1 .. 255:
raise newException(RendezVousError, "Invalid namespace")
rdv.unsubscribeLocally(ns)
let msg = encode(Message(
msgType: MessageType.Unregister,
unregister: Opt.some(Unregister(ns: ns))))
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer: await conn.close()
defer:
await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", error = exc.msg
@@ -622,33 +658,37 @@ proc setup*(rdv: RendezVous, switch: Switch) =
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng()): T =
proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](1),
defaultDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize)
sema: newAsyncSemaphore(SemaphoreDefaultSize),
)
logScope: topics = "libp2p discovery rendezvous"
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
case msg.msgType:
of MessageType.Register: await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover: await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
case msg.msgType
of MessageType.Register:
await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister:
rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover:
await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -660,9 +700,9 @@ proc new*(T: typedesc[RendezVous],
rdv.codec = RendezVousCodec
return rdv
proc new*(T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng()): T =
proc new*(
T: typedesc[RendezVous], switch: Switch, rng: ref HmacDrbgContext = newRng()
): T =
let rdv = T.new(rng)
rdv.setup(switch)
return rdv

View File

@@ -22,8 +22,7 @@ import ../../protobuf/minprotobuf
import ../../utility
import ../../errors
import secure,
../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]
when defined(libp2p_dump):
import ../../debugutils
@@ -99,12 +98,15 @@ type
func shortLog*(conn: NoiseConnection): auto =
try:
if conn == nil: "NoiseConnection(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
if conn == nil:
"NoiseConnection(nil)"
else:
&"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(NoiseConnection): shortLog(it)
chronicles.formatIt(NoiseConnection):
shortLog(it)
proc genKeyPair(rng: var HmacDrbgContext): KeyPair =
result.privateKey = Curve25519Key.random(rng)
@@ -116,7 +118,7 @@ proc hashProtocol(name: string): MDigest[256] =
# Otherwise sets h = HASH(protocol_name).
if name.len <= 32:
result.data[0..name.high] = name.toBytes
result.data[0 .. name.high] = name.toBytes
else:
result = sha256.digest(name)
@@ -130,13 +132,10 @@ proc hasKey(cs: CipherState): bool =
cs.k != EmptyKey
proc encrypt(
state: var CipherState,
data: var openArray[byte],
ad: openArray[byte]): ChaChaPolyTag
{.noinit, raises: [NoiseNonceMaxError].} =
state: var CipherState, data: var openArray[byte], ad: openArray[byte]
): ChaChaPolyTag {.noinit, raises: [NoiseNonceMaxError].} =
var nonce: ChaChaPolyNonce
nonce[4..<12] = toBytesLE(state.n)
nonce[4 ..< 12] = toBytesLE(state.n)
ChaChaPoly.encrypt(state.k, nonce, result, data, ad)
@@ -144,8 +143,9 @@ proc encrypt(
if state.n > NonceMax:
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [NoiseNonceMaxError].} =
proc encryptWithAd(
state: var CipherState, ad, data: openArray[byte]
): seq[byte] {.raises: [NoiseNonceMaxError].} =
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
result.add(data)
@@ -156,20 +156,21 @@ proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
trace "encryptWithAd",
tag = byteutils.toHex(tag), data = result.shortLog, nonce = state.n - 1
proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
proc decryptWithAd(
state: var CipherState, ad, data: openArray[byte]
): seq[byte] {.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
var
tagIn = data.toOpenArray(data.len - ChaChaPolyTag.len, data.high).intoChaChaPolyTag
tagOut: ChaChaPolyTag
nonce: ChaChaPolyNonce
nonce[4..<12] = toBytesLE(state.n)
result = data[0..(data.high - ChaChaPolyTag.len)]
nonce[4 ..< 12] = toBytesLE(state.n)
result = data[0 .. (data.high - ChaChaPolyTag.len)]
ChaChaPoly.decrypt(state.k, nonce, tagOut, result, ad)
trace "decryptWithAd", tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
trace "decryptWithAd",
tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
if tagIn != tagOut:
debug "decryptWithAd failed", data = shortLog(data)
raise (ref NoiseDecryptTagError)(msg:
"decryptWithAd failed tag authentication.")
raise (ref NoiseDecryptTagError)(msg: "decryptWithAd failed tag authentication.")
inc state.n
if state.n > NonceMax:
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
@@ -204,8 +205,9 @@ proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
ss.mixHash(temp_keys[1])
ss.cs = CipherState(k: temp_keys[2])
proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
{.raises: [NoiseNonceMaxError].} =
proc encryptAndHash(
ss: var SymmetricState, data: openArray[byte]
): seq[byte] {.raises: [NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey:
result = ss.cs.encryptWithAd(ss.h.data, data)
@@ -213,8 +215,9 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
result = @data
ss.mixHash(result)
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
proc decryptAndHash(
ss: var SymmetricState, data: openArray[byte]
): seq[byte] {.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
result = ss.cs.decryptWithAd(ss.h.data, data)
@@ -223,15 +226,14 @@ proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
ss.mixHash(data)
proc split(ss: var SymmetricState): tuple[cs1, cs2: CipherState] =
var
temp_keys: array[2, ChaChaPolyKey]
var temp_keys: array[2, ChaChaPolyKey]
sha256.hkdf(ss.ck, [], [], temp_keys)
return (CipherState(k: temp_keys[0]), CipherState(k: temp_keys[1]))
proc init(_: type[HandshakeState]): HandshakeState =
result.ss = SymmetricState.init()
template write_e: untyped =
template write_e(): untyped =
trace "noise write e"
# Sets e (which must be empty) to GENERATE_KEYPAIR().
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
@@ -239,17 +241,17 @@ template write_e: untyped =
msg.add hs.e.publicKey
hs.ss.mixHash(hs.e.publicKey)
template write_s: untyped =
template write_s(): untyped =
trace "noise write s"
# Appends EncryptAndHash(s.public_key) to the buffer.
msg.add hs.ss.encryptAndHash(hs.s.publicKey)
template dh_ee: untyped =
template dh_ee(): untyped =
trace "noise dh ee"
# Calls MixKey(DH(e, re)).
hs.ss.mixKey(dh(hs.e.privateKey, hs.re))
template dh_es: untyped =
template dh_es(): untyped =
trace "noise dh es"
# Calls MixKey(DH(e, rs)) if initiator, MixKey(DH(s, re)) if responder.
when initiator:
@@ -257,7 +259,7 @@ template dh_es: untyped =
else:
hs.ss.mixKey(dh(hs.s.privateKey, hs.re))
template dh_se: untyped =
template dh_se(): untyped =
trace "noise dh se"
# Calls MixKey(DH(s, re)) if initiator, MixKey(DH(e, rs)) if responder.
when initiator:
@@ -266,12 +268,12 @@ template dh_se: untyped =
hs.ss.mixKey(dh(hs.e.privateKey, hs.rs))
# might be used for other token/handshakes
template dh_ss: untyped {.used.} =
template dh_ss(): untyped {.used.} =
trace "noise dh ss"
# Calls MixKey(DH(s, rs)).
hs.ss.mixKey(dh(hs.s.privateKey, hs.rs))
template read_e: untyped =
template read_e(): untyped =
trace "noise read e", size = msg.len
if msg.len < Curve25519Key.len:
@@ -279,27 +281,25 @@ template read_e: untyped =
# Sets re (which must be empty) to the next DHLEN bytes from the message.
# Calls MixHash(re.public_key).
hs.re[0..Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
hs.re[0 .. Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
msg.consume(Curve25519Key.len)
hs.ss.mixHash(hs.re)
template read_s: untyped =
template read_s(): untyped =
trace "noise read s", size = msg.len
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
# or to the next DHLEN bytes otherwise.
# Sets rs (which must be empty) to DecryptAndHash(temp).
let
rsLen =
if hs.ss.cs.hasKey:
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len + ChaChaPolyTag.len
else:
if msg.len < Curve25519Key.len:
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len
hs.rs[0..Curve25519Key.high] =
hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
let rsLen =
if hs.ss.cs.hasKey:
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len + ChaChaPolyTag.len
else:
if msg.len < Curve25519Key.len:
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len
hs.rs[0 .. Curve25519Key.high] = hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
msg.consume(rsLen)
@@ -318,10 +318,8 @@ proc readFrame(
return buffer
proc writeFrame(
sconn: Connection,
buf: openArray[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
sconn: Connection, buf: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
doAssert buf.len <= uint16.high.int
var
lesize = buf.len.uint16
@@ -334,25 +332,19 @@ proc writeFrame(
proc receiveHSMessage(
sconn: Connection
): Future[seq[byte]] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
readFrame(sconn)
proc sendHSMessage(
sconn: Connection,
buf: openArray[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
sconn: Connection, buf: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
writeFrame(sconn, buf)
proc handshakeXXOutbound(
p: Noise, conn: Connection,
p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [
CancelledError, LPStreamError]).} =
p: Noise, conn: Connection, p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [CancelledError, LPStreamError]).} =
const initiator = true
var
hs = HandshakeState.init()
var hs = HandshakeState.init()
try:
hs.ss.mixHash(p.commonPrologue)
@@ -391,20 +383,17 @@ proc handshakeXXOutbound(
await conn.sendHSMessage(msg.data)
let (cs1, cs2) = hs.ss.split()
return HandshakeResult(
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
return
HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
finally:
burnMem(hs)
proc handshakeXXInbound(
p: Noise, conn: Connection,
p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [
CancelledError, LPStreamError]).} =
p: Noise, conn: Connection, p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [CancelledError, LPStreamError]).} =
const initiator = false
var
hs = HandshakeState.init()
var hs = HandshakeState.init()
try:
hs.ss.mixHash(p.commonPrologue)
@@ -444,8 +433,8 @@ proc handshakeXXInbound(
let
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
(cs1, cs2) = hs.ss.split()
return HandshakeResult(
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
return
HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
finally:
burnMem(hs)
@@ -467,32 +456,26 @@ method readMessage*(
trace "Received 0-length message", sconn
proc encryptFrame(
sconn: NoiseConnection,
cipherFrame: var openArray[byte],
src: openArray[byte])
{.raises: [NoiseNonceMaxError].} =
sconn: NoiseConnection, cipherFrame: var openArray[byte], src: openArray[byte]
) {.raises: [NoiseNonceMaxError].} =
# Frame consists of length + cipher data + tag
doAssert src.len <= MaxPlainSize
doAssert cipherFrame.len == 2 + src.len + sizeof(ChaChaPolyTag)
cipherFrame[0..<2] = toBytesBE(uint16(src.len + sizeof(ChaChaPolyTag)))
cipherFrame[2..<2 + src.len()] = src
cipherFrame[0 ..< 2] = toBytesBE(uint16(src.len + sizeof(ChaChaPolyTag)))
cipherFrame[2 ..< 2 + src.len()] = src
let tag = encrypt(
sconn.writeCs, cipherFrame.toOpenArray(2, 2 + src.len() - 1), [])
let tag = encrypt(sconn.writeCs, cipherFrame.toOpenArray(2, 2 + src.len() - 1), [])
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
cipherFrame[2 + src.len() ..< cipherFrame.len] = tag
method write*(
sconn: NoiseConnection,
message: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
sconn: NoiseConnection, message: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
# Fast path: `{.async.}` would introduce a copy of `message`
const FramingSize = 2 + sizeof(ChaChaPolyTag)
let
frames = (message.len + MaxPlainSize - 1) div MaxPlainSize
let frames = (message.len + MaxPlainSize - 1) div MaxPlainSize
var
cipherFrames = newSeqUninitialized[byte](message.len + frames * FramingSize)
@@ -501,15 +484,14 @@ method write*(
woffset = 0
while left > 0:
let
chunkSize = min(MaxPlainSize, left)
let chunkSize = min(MaxPlainSize, left)
try:
encryptFrame(
sconn,
cipherFrames.toOpenArray(
woffset, woffset + chunkSize + FramingSize - 1),
message.toOpenArray(offset, offset + chunkSize - 1))
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
message.toOpenArray(offset, offset + chunkSize - 1),
)
except NoiseNonceMaxError as exc:
debug "Noise nonce exceeded"
let fut = newFuture[void]("noise.write.nonce")
@@ -518,8 +500,10 @@ method write*(
when defined(libp2p_dump):
dumpMessage(
sconn, FlowDirection.Outgoing,
message.toOpenArray(offset, offset + chunkSize - 1))
sconn,
FlowDirection.Outgoing,
message.toOpenArray(offset, offset + chunkSize - 1),
)
left = left - chunkSize
offset += chunkSize
@@ -532,10 +516,7 @@ method write*(
sconn.stream.write(cipherFrames)
method handshake*(
p: Noise,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Starting Noise handshake", conn, initiator
@@ -543,14 +524,14 @@ method handshake*(
conn.timeout = HandshakeTimeout
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
let signedPayload = p.localPrivateKey.sign(
PayloadString & p.noiseKeys.publicKey.getBytes)
let signedPayload =
p.localPrivateKey.sign(PayloadString & p.noiseKeys.publicKey.getBytes)
if signedPayload.isErr():
raise (ref NoiseHandshakeError)(msg:
"Failed to sign public key: " & $signedPayload.error())
raise (ref NoiseHandshakeError)(
msg: "Failed to sign public key: " & $signedPayload.error()
)
var
libp2pProof = initProtoBuffer()
var libp2pProof = initProtoBuffer()
libp2pProof.write(1, p.localPublicKey)
libp2pProof.write(2, signedPayload.get().getBytes())
# data field also there but not used!
@@ -562,70 +543,76 @@ method handshake*(
else:
await handshakeXXInbound(p, conn, libp2pProof.buffer)
var secure = try:
var
remoteProof = initProtoBuffer(handshakeRes.remoteP2psecret)
remotePubKey: PublicKey
remotePubKeyBytes: seq[byte]
remoteSig: Signature
remoteSigBytes: seq[byte]
var secure =
try:
var
remoteProof = initProtoBuffer(handshakeRes.remoteP2psecret)
remotePubKey: PublicKey
remotePubKeyBytes: seq[byte]
remoteSig: Signature
remoteSigBytes: seq[byte]
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
raise (ref NoiseHandshakeError)(msg:
"Failed to deserialize remote public key bytes. (initiator: " &
$initiator & ")")
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
raise (ref NoiseHandshakeError)(msg:
"Failed to deserialize remote signature bytes. (initiator: " &
$initiator & ")")
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
raise (ref NoiseHandshakeError)(
msg:
"Failed to deserialize remote public key bytes. (initiator: " & $initiator &
")"
)
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
raise (ref NoiseHandshakeError)(
msg:
"Failed to deserialize remote signature bytes. (initiator: " & $initiator &
")"
)
if not remotePubKey.init(remotePubKeyBytes):
raise (ref NoiseHandshakeError)(msg:
"Failed to decode remote public key. (initiator: " & $initiator & ")")
if not remoteSig.init(remoteSigBytes):
raise (ref NoiseHandshakeError)(msg:
"Failed to decode remote signature. (initiator: " & $initiator & ")")
if not remotePubKey.init(remotePubKeyBytes):
raise (ref NoiseHandshakeError)(
msg: "Failed to decode remote public key. (initiator: " & $initiator & ")"
)
if not remoteSig.init(remoteSigBytes):
raise (ref NoiseHandshakeError)(
msg: "Failed to decode remote signature. (initiator: " & $initiator & ")"
)
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
if not remoteSig.verify(verifyPayload, remotePubKey):
raise (ref NoiseHandshakeError)(msg:
"Noise handshake signature verify failed.")
else:
trace "Remote signature verified", conn
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
if not remoteSig.verify(verifyPayload, remotePubKey):
raise (ref NoiseHandshakeError)(msg: "Noise handshake signature verify failed.")
else:
trace "Remote signature verified", conn
let pid = PeerId.init(remotePubKey).valueOr:
raise (ref NoiseHandshakeError)(msg:
"Invalid remote peer id: " & $error)
let pid = PeerId.init(remotePubKey).valueOr:
raise (ref NoiseHandshakeError)(msg: "Invalid remote peer id: " & $error)
trace "Remote peer id", pid = $pid
trace "Remote peer id", pid = $pid
peerId.withValue(targetPid):
if not targetPid.validate():
raise (ref NoiseHandshakeError)(msg:
"Failed to validate expected peerId.")
peerId.withValue(targetPid):
if not targetPid.validate():
raise (ref NoiseHandshakeError)(msg: "Failed to validate expected peerId.")
if pid != targetPid:
var
failedKey: PublicKey
discard extractPublicKey(targetPid, failedKey)
debug "Noise handshake, peer id doesn't match!",
initiator, dealt_peer = conn,
dealt_key = $failedKey, received_peer = $pid,
received_key = $remotePubKey
raise (ref NoiseHandshakeError)(msg:
"Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
conn.peerId = pid
if pid != targetPid:
var failedKey: PublicKey
discard extractPublicKey(targetPid, failedKey)
debug "Noise handshake, peer id doesn't match!",
initiator,
dealt_peer = conn,
dealt_key = $failedKey,
received_peer = $pid,
received_key = $remotePubKey
raise (ref NoiseHandshakeError)(
msg: "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid
)
conn.peerId = pid
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
if initiator:
tmp.readCs = handshakeRes.cs2
tmp.writeCs = handshakeRes.cs1
else:
tmp.readCs = handshakeRes.cs1
tmp.writeCs = handshakeRes.cs2
tmp
finally:
burnMem(handshakeRes)
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
if initiator:
tmp.readCs = handshakeRes.cs2
tmp.writeCs = handshakeRes.cs1
else:
tmp.readCs = handshakeRes.cs1
tmp.writeCs = handshakeRes.cs2
tmp
finally:
burnMem(handshakeRes)
trace "Noise handshake completed!", initiator, peer = shortLog(secure.peerId)
@@ -648,10 +635,13 @@ proc new*(
rng: ref HmacDrbgContext,
privateKey: PrivateKey,
outgoing: bool = true,
commonPrologue: seq[byte] = @[]): T =
let pkBytes = privateKey.getPublicKey()
commonPrologue: seq[byte] = @[],
): T =
let pkBytes = privateKey
.getPublicKey()
.expect("Expected valid Private Key")
.getBytes().expect("Couldn't get public Key bytes")
.getBytes()
.expect("Couldn't get public Key bytes")
var noise = Noise(
rng: rng,

View File

@@ -14,13 +14,12 @@ import secure, ../../stream/connection
const PlainTextCodec* = "/plaintext/1.0.0"
type
PlainText* = ref object of Secure
type PlainText* = ref object of Secure
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async.} = discard
proc handle(conn: Connection, proto: string) {.async.} =
## plain text doesn't do anything
discard
p.codec = PlainTextCodec
p.handler = handle

View File

@@ -13,20 +13,20 @@
import std/[strformat]
import stew/results
import chronos, chronicles
import ../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
import
../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
export protocol, results
logScope:
topics = "libp2p secure"
const
SecureConnTrackerName* = "SecureConn"
const SecureConnTrackerName* = "SecureConn"
type
Secure* = ref object of LPProtocol # base type for secure managers
@@ -37,25 +37,31 @@ type
func shortLog*(conn: SecureConn): auto =
try:
if conn == nil: "SecureConn(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
if conn == nil:
"SecureConn(nil)"
else:
&"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(SecureConn): shortLog(it)
chronicles.formatIt(SecureConn):
shortLog(it)
proc new*(
T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout): T =
result = T(stream: conn,
peerId: peerId,
observedAddr: observedAddr,
closeEvent: conn.closeEvent,
timeout: timeout,
dir: conn.dir)
timeout: Duration = DefaultConnectionTimeout,
): T =
result = T(
stream: conn,
peerId: peerId,
observedAddr: observedAddr,
closeEvent: conn.closeEvent,
timeout: timeout,
dir: conn.dir,
)
result.initStream()
method initStream*(s: SecureConn) =
@@ -73,26 +79,23 @@ method closeImpl*(s: SecureConn) {.async: (raises: []).} =
method readMessage*(
c: SecureConn
): Future[seq[byte]] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
): Future[seq[byte]] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
method getWrapped*(s: SecureConn): Connection = s.stream
method getWrapped*(s: SecureConn): Connection =
s.stream
method handshake*(
s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
): Future[SecureConn] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
): Future[SecureConn] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
proc handleConn(
s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
var sconn = await s.handshake(conn, initiator, peerId)
# mark connection bottom level transport direction
@@ -106,20 +109,26 @@ proc handleConn(
let
fut1 = conn.join()
fut2 = sconn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError: raiseAssert("Futures list is not empty")
except ValueError:
raiseAssert("Futures list is not empty")
# at least one join() completed, cancel pending one, if any
if not fut1.finished: await fut1.cancelAndWait()
if not fut2.finished: await fut2.cancelAndWait()
if not fut1.finished:
await fut1.cancelAndWait()
if not fut2.finished:
await fut2.cancelAndWait()
block:
let
fut1 = sconn.close()
fut2 = conn.close()
await allFutures(fut1, fut2)
static: doAssert typeof(fut1).E is void # Cannot fail
static: doAssert typeof(fut2).E is void # Cannot fail
static:
doAssert typeof(fut1).E is void
# Cannot fail
static:
doAssert typeof(fut2).E is void
# Cannot fail
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
@@ -152,17 +161,14 @@ method init*(s: Secure) =
s.handler = handle
method secure*(
s: Secure,
conn: Connection,
peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
s: Secure, conn: Connection, peerId: Opt[PeerId]
): Future[Connection] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(
s: SecureConn,
pbytes: pointer,
nbytes: int
s: SecureConn, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
doAssert(nbytes > 0, "nbytes must be positive integer")
@@ -182,9 +188,7 @@ method readOnce*(
raise exc
except LPStreamError as err:
debug "Error while reading message from secure connection, closing.",
error = err.name,
message = err.msg,
connection = s
error = err.name, message = err.msg, connection = s
await s.close()
raise err

View File

@@ -13,12 +13,7 @@
import std/[sequtils, times]
import pkg/stew/results
import
multiaddress,
multicodec,
peerid,
protobuf/minprotobuf,
signed_envelope
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
export peerid, multiaddress, signed_envelope
@@ -32,23 +27,22 @@ type
addresses*: seq[AddressInfo]
proc decode*(
T: typedesc[PeerRecord],
buffer: seq[byte]): Result[PeerRecord, ProtoError] =
T: typedesc[PeerRecord], buffer: seq[byte]
): Result[PeerRecord, ProtoError] =
let pb = initProtoBuffer(buffer)
var record = PeerRecord()
? pb.getRequiredField(1, record.peerId)
? pb.getRequiredField(2, record.seqNo)
?pb.getRequiredField(1, record.peerId)
?pb.getRequiredField(2, record.seqNo)
var addressInfos: seq[seq[byte]]
if ? pb.getRepeatedField(3, addressInfos):
if ?pb.getRepeatedField(3, addressInfos):
for address in addressInfos:
var addressInfo = AddressInfo()
let subProto = initProtoBuffer(address)
let f = subProto.getField(1, addressInfo.address)
if f.get(false):
record.addresses &= addressInfo
record.addresses &= addressInfo
if record.addresses.len == 0:
return err(ProtoError.RequiredFieldMissing)
@@ -69,24 +63,25 @@ proc encode*(record: PeerRecord): seq[byte] =
pb.finish()
pb.buffer
proc init*(T: typedesc[PeerRecord],
peerId: PeerId,
addresses: seq[MultiAddress],
seqNo = getTime().toUnix().uint64 # follows the recommended implementation, using unix epoch as seq no.
): T =
proc init*(
T: typedesc[PeerRecord],
peerId: PeerId,
addresses: seq[MultiAddress],
seqNo = getTime().toUnix().uint64,
# follows the recommended implementation, using unix epoch as seq no.
): T =
PeerRecord(
peerId: peerId,
seqNo: seqNo,
addresses: addresses.mapIt(AddressInfo(address: it))
peerId: peerId, seqNo: seqNo, addresses: addresses.mapIt(AddressInfo(address: it))
)
## Functions related to signed peer records
type SignedPeerRecord* = SignedPayload[PeerRecord]
proc payloadDomain*(T: typedesc[PeerRecord]): string = $multiCodec("libp2p-peer-record")
proc payloadType*(T: typedesc[PeerRecord]): seq[byte] = @[(byte) 0x03, (byte) 0x01]
proc payloadDomain*(T: typedesc[PeerRecord]): string =
$multiCodec("libp2p-peer-record")
proc payloadType*(T: typedesc[PeerRecord]): seq[byte] =
@[(byte) 0x03, (byte) 0x01]
proc checkValid*(spr: SignedPeerRecord): Result[void, EnvelopeError] =
if not spr.data.peerId.match(spr.envelope.publicKey):

View File

@@ -10,14 +10,13 @@
{.push raises: [].}
import chronos, chronicles, times, tables, sequtils
import ../switch,
../protocols/connectivity/relay/[client, utils]
import ../switch, ../protocols/connectivity/relay/[client, utils]
logScope:
topics = "libp2p autorelay"
type
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
OnReservationHandler = proc(addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
AutoRelayService* = ref object of Service
running: bool
@@ -36,16 +35,17 @@ proc isRunning*(self: AutoRelayService): bool =
return self.running
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
self: AutoRelayService, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values)) & listenAddrs
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
proc reserveAndUpdate(
self: AutoRelayService, relayPid: PeerId, switch: Switch
) {.async.} =
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
relayedAddr = rsvp.addrs.mapIt(
MultiAddress.init($it & "/p2p-circuit").tryGet())
relayedAddr = rsvp.addrs.mapIt(MultiAddress.init($it & "/p2p-circuit").tryGet())
ttl = rsvp.expire.int64 - times.now().utc.toTime.toUnix
if ttl <= 60:
# A reservation under a minute is basically useless
@@ -59,7 +59,9 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
@@ -68,10 +70,12 @@ method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
trace "Peer Joined", peerId
if self.relayPeers.len < self.numRelays:
self.peerAvailable.fire()
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
trace "Peer Left", peerId
self.relayPeers.withValue(peerId, future):
future[].cancel()
switch.addPeerEventHandler(handlePeerJoined, Joined)
switch.addPeerEventHandler(handlePeerLeft, Left)
switch.peerInfo.addressMappers.add(self.addressMapper)
@@ -100,9 +104,10 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
# Get all connected relayPeers
self.peerAvailable.clear()
var connectedPeers = switch.connectedPeers(Direction.Out)
connectedPeers.keepItIf(RelayV2HopCodec in switch.peerStore[ProtoBook][it] and
it notin self.relayPeers and
it notin self.backingOff)
connectedPeers.keepItIf(
RelayV2HopCodec in switch.peerStore[ProtoBook][it] and it notin self.relayPeers and
it notin self.backingOff
)
self.rng.shuffle(connectedPeers)
for relayPid in connectedPeers:
@@ -135,13 +140,17 @@ method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
proc getAddresses*(self: AutoRelayService): seq[MultiAddress] =
result = concat(toSeq(self.relayAddresses.values))
proc new*(T: typedesc[AutoRelayService],
numRelays: int,
client: RelayClient,
onReservation: OnReservationHandler,
rng: ref HmacDrbgContext): T =
T(numRelays: numRelays,
proc new*(
T: typedesc[AutoRelayService],
numRelays: int,
client: RelayClient,
onReservation: OnReservationHandler,
rng: ref HmacDrbgContext,
): T =
T(
numRelays: numRelays,
client: client,
onReservation: onReservation,
peerAvailable: newAsyncEvent(),
rng: rng)
rng: rng,
)

View File

@@ -24,17 +24,22 @@ import ../multicodec
logScope:
topics = "libp2p hpservice"
type
HPService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
onNewStatusHandler: StatusAndConfidenceHandler
autoRelayService: AutoRelayService
autonatService: AutonatService
type HPService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
onNewStatusHandler: StatusAndConfidenceHandler
autoRelayService: AutoRelayService
autonatService: AutonatService
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService): T =
proc new*(
T: typedesc[HPService],
autonatService: AutonatService,
autoRelayService: AutoRelayService,
): T =
return T(autonatService: autonatService, autoRelayService: autoRelayService)
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
proc tryStartingDirectConn(
self: HPService, switch: Switch, peerId: PeerId
): Future[bool] {.async.} =
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
debug "Trying to create direct connection", peerId, address
await switch.connect(peerId, @[address], true, false)
@@ -56,7 +61,9 @@ proc closeRelayConn(relayedConn: Connection) {.async.} =
await sleepAsync(2000.milliseconds) # grace period before closing relayed connection
await relayedConn.close()
proc newConnectedPeerHandler(self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent) {.async.} =
proc newConnectedPeerHandler(
self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent
) {.async.} =
try:
# Get all connections to the peer. If there is at least one non-relayed connection, return.
let connections = switch.connManager.getConnections()[peerId].mapIt(it.connection)
@@ -75,7 +82,8 @@ proc newConnectedPeerHandler(self: HPService, switch: Switch, peerId: PeerId, ev
let dcutrClient = DcutrClient.new()
var natAddrs = switch.peerStore.getMostObservedProtosAndPorts()
if natAddrs.len == 0:
natAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
natAddrs =
switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
await dcutrClient.startSync(switch, peerId, natAddrs)
await closeRelayConn(relayedConn)
except CatchableError as err:
@@ -89,15 +97,21 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent) {.async.} =
self.newConnectedPeerHandler = proc(peerId: PeerId, event: PeerEvent) {.async.} =
await newConnectedPeerHandler(self, switch, peerId, event)
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
self.onNewStatusHandler = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async.} =
if networkReachability == NetworkReachability.NotReachable and
not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
elif networkReachability == NetworkReachability.Reachable and
self.autoRelayService.isRunning():
discard await self.autoRelayService.stop(switch)
# We do it here instead of in the AutonatService because this is useful only when hole punching.
@@ -113,4 +127,6 @@ method run*(self: HPService, switch: Switch) {.async, public.} =
method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} =
discard await self.autonatService.stop(switch)
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
switch.connManager.removePeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)

View File

@@ -13,18 +13,15 @@
import std/sugar
import pkg/stew/[results, byteutils]
import multicodec,
crypto/crypto,
protobuf/minprotobuf,
vbuffer
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
export crypto
type
EnvelopeError* = enum
EnvelopeInvalidProtobuf,
EnvelopeFieldMissing,
EnvelopeInvalidSignature,
EnvelopeInvalidProtobuf
EnvelopeFieldMissing
EnvelopeInvalidSignature
EnvelopeWrongType
Envelope* = object
@@ -35,11 +32,9 @@ type
signature*: Signature
proc mapProtobufError(e: ProtoError): EnvelopeError =
case e:
of RequiredFieldMissing:
EnvelopeFieldMissing
else:
EnvelopeInvalidProtobuf
case e
of RequiredFieldMissing: EnvelopeFieldMissing
else: EnvelopeInvalidProtobuf
proc getSignatureBuffer(e: Envelope): seq[byte] =
var buffer = initVBuffer()
@@ -48,40 +43,42 @@ proc getSignatureBuffer(e: Envelope): seq[byte] =
buffer.writeSeq(domainBytes)
buffer.writeSeq(e.payloadType)
buffer.writeSeq(e.payload)
buffer.buffer
proc decode*(T: typedesc[Envelope],
buf: seq[byte],
domain: string): Result[Envelope, EnvelopeError] =
proc decode*(
T: typedesc[Envelope], buf: seq[byte], domain: string
): Result[Envelope, EnvelopeError] =
let pb = initProtoBuffer(buf)
var envelope = Envelope()
envelope.domain = domain
? pb.getRequiredField(1, envelope.publicKey).mapErr(mapProtobufError)
discard ? pb.getField(2, envelope.payloadType).mapErr(mapProtobufError)
? pb.getRequiredField(3, envelope.payload).mapErr(mapProtobufError)
? pb.getRequiredField(5, envelope.signature).mapErr(mapProtobufError)
?pb.getRequiredField(1, envelope.publicKey).mapErr(mapProtobufError)
discard ?pb.getField(2, envelope.payloadType).mapErr(mapProtobufError)
?pb.getRequiredField(3, envelope.payload).mapErr(mapProtobufError)
?pb.getRequiredField(5, envelope.signature).mapErr(mapProtobufError)
if envelope.signature.verify(envelope.getSignatureBuffer(), envelope.publicKey) == false:
if envelope.signature.verify(envelope.getSignatureBuffer(), envelope.publicKey) ==
false:
err(EnvelopeInvalidSignature)
else:
ok(envelope)
proc init*(T: typedesc[Envelope],
proc init*(
T: typedesc[Envelope],
privateKey: PrivateKey,
payloadType: seq[byte],
payload: seq[byte],
domain: string): Result[Envelope, CryptoError] =
domain: string,
): Result[Envelope, CryptoError] =
var envelope = Envelope(
publicKey: ? privateKey.getPublicKey(),
publicKey: ?privateKey.getPublicKey(),
domain: domain,
payloadType: payloadType,
payload: payload,
)
envelope.signature = ? privateKey.sign(envelope.getSignatureBuffer())
envelope.signature = ?privateKey.sign(envelope.getSignatureBuffer())
ok(envelope)
@@ -103,66 +100,61 @@ proc payload*(env: Envelope): seq[byte] =
# Payload is readonly
env.payload
proc getField*(pb: ProtoBuffer, field: int,
value: var Envelope,
domain: string): ProtoResult[bool] {.
inline.} =
proc getField*(
pb: ProtoBuffer, field: int, value: var Envelope, domain: string
): ProtoResult[bool] {.inline.} =
var buffer: seq[byte]
let res = ? pb.getField(field, buffer)
if not(res):
let res = ?pb.getField(field, buffer)
if not (res):
ok(false)
else:
value = Envelope.decode(buffer, domain).valueOr: return err(ProtoError.IncorrectBlob)
value = Envelope.decode(buffer, domain).valueOr:
return err(ProtoError.IncorrectBlob)
ok(true)
proc write*(pb: var ProtoBuffer, field: int, env: Envelope): Result[void, CryptoError] =
let e = ? env.encode()
let e = ?env.encode()
pb.write(field, e)
ok()
type
SignedPayload*[T] = object
# T needs to have .encode(), .decode(), .payloadType(), .domain()
envelope*: Envelope
data*: T
type SignedPayload*[T] = object
# T needs to have .encode(), .decode(), .payloadType(), .domain()
envelope*: Envelope
data*: T
proc init*[T](_: typedesc[SignedPayload[T]],
privateKey: PrivateKey,
data: T): Result[SignedPayload[T], CryptoError] =
proc init*[T](
_: typedesc[SignedPayload[T]], privateKey: PrivateKey, data: T
): Result[SignedPayload[T], CryptoError] =
mixin encode
let envelope = ? Envelope.init(privateKey,
T.payloadType(),
data.encode(),
T.payloadDomain)
let envelope =
?Envelope.init(privateKey, T.payloadType(), data.encode(), T.payloadDomain)
ok(SignedPayload[T](data: data, envelope: envelope))
proc getField*[T](pb: ProtoBuffer, field: int,
value: var SignedPayload[T]): ProtoResult[bool] {.
inline.} =
if not ? getField(pb, field, value.envelope, T.payloadDomain):
proc getField*[T](
pb: ProtoBuffer, field: int, value: var SignedPayload[T]
): ProtoResult[bool] {.inline.} =
if not ?getField(pb, field, value.envelope, T.payloadDomain):
ok(false)
else:
mixin decode
value.data = ? T.decode(value.envelope.payload).mapErr(x => ProtoError.IncorrectBlob)
value.data = ?T.decode(value.envelope.payload).mapErr(x => ProtoError.IncorrectBlob)
ok(true)
proc decode*[T](
_: typedesc[SignedPayload[T]],
buffer: seq[byte]
): Result[SignedPayload[T], EnvelopeError] =
_: typedesc[SignedPayload[T]], buffer: seq[byte]
): Result[SignedPayload[T], EnvelopeError] =
let
envelope = ? Envelope.decode(buffer, T.payloadDomain)
data = ? T.decode(envelope.payload).mapErr(x => EnvelopeInvalidProtobuf)
envelope = ?Envelope.decode(buffer, T.payloadDomain)
data = ?T.decode(envelope.payload).mapErr(x => EnvelopeInvalidProtobuf)
signedPayload = SignedPayload[T](envelope: envelope, data: data)
if envelope.payloadType != T.payloadType:
return err(EnvelopeWrongType)
when compiles(? signedPayload.checkValid()):
? signedPayload.checkValid()
when compiles(?signedPayload.checkValid()):
?signedPayload.checkValid()
ok(signedPayload)

View File

@@ -20,29 +20,32 @@ export connection
logScope:
topics = "libp2p bufferstream"
const
BufferStreamTrackerName* = "BufferStream"
const BufferStreamTrackerName* = "BufferStream"
type
BufferStream* = ref object of Connection
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
readBuf*: StreamSeq # overflow buffer for readOnce
pushing*: bool # number of ongoing push operations
reading*: bool # is there an ongoing read? (only allow one)
pushedEof*: bool # eof marker has been put on readQueue
returnedEof*: bool # 0-byte readOnce has been completed
type BufferStream* = ref object of Connection
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
readBuf*: StreamSeq # overflow buffer for readOnce
pushing*: bool # number of ongoing push operations
reading*: bool # is there an ongoing read? (only allow one)
pushedEof*: bool # eof marker has been put on readQueue
returnedEof*: bool # 0-byte readOnce has been completed
func shortLog*(s: BufferStream): auto =
try:
if s == nil: "BufferStream(nil)"
else: &"{shortLog(s.peerId)}:{s.oid}"
if s == nil:
"BufferStream(nil)"
else:
&"{shortLog(s.peerId)}:{s.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(BufferStream): shortLog(it)
chronicles.formatIt(BufferStream):
shortLog(it)
proc len*(s: BufferStream): int =
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len() else: 0)
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
else: 0
)
method initStream*(s: BufferStream) =
if s.objName.len == 0:
@@ -54,16 +57,13 @@ method initStream*(s: BufferStream) =
trace "BufferStream created", s
proc new*(
T: typedesc[BufferStream],
timeout: Duration = DefaultConnectionTimeout): T =
proc new*(T: typedesc[BufferStream], timeout: Duration = DefaultConnectionTimeout): T =
let bufferStream = T(timeout: timeout)
bufferStream.initStream()
bufferStream
method pushData*(
s: BufferStream,
data: seq[byte]
s: BufferStream, data: seq[byte]
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
## Write bytes to internal read buffer, use this to fill up the
## buffer with data.
@@ -71,8 +71,7 @@ method pushData*(
## `pushTo` will block if the queue is full, thus maintaining backpressure.
##
doAssert(not s.pushing,
"Only one concurrent push allowed for stream " & s.shortLog())
doAssert(not s.pushing, "Only one concurrent push allowed for stream " & s.shortLog())
if s.isClosed or s.pushedEof:
raise newLPStreamClosedError()
@@ -95,8 +94,7 @@ method pushEof*(
if s.pushedEof:
return
doAssert(not s.pushing,
"Only one concurrent push allowed for stream " & s.shortLog())
doAssert(not s.pushing, "Only one concurrent push allowed for stream " & s.shortLog())
s.pushedEof = true
@@ -113,19 +111,15 @@ method atEof*(s: BufferStream): bool =
s.isEof and s.readBuf.len == 0
method readOnce*(
s: BufferStream,
pbytes: pointer,
nbytes: int
s: BufferStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
doAssert(nbytes > 0, "nbytes must be positive integer")
doAssert(not s.reading,
"Only one concurrent read allowed for stream " & s.shortLog())
doAssert(not s.reading, "Only one concurrent read allowed for stream " & s.shortLog())
if s.returnedEof:
raise newLPStreamEOFError()
var
p = cast[ptr UncheckedArray[byte]](pbytes)
var p = cast[ptr UncheckedArray[byte]](pbytes)
# First consume leftovers from previous read
var rbytes = s.readBuf.consumeTo(toOpenArray(p, 0, nbytes - 1))
@@ -149,7 +143,7 @@ method readOnce*(
s.isEof = true
else:
let remaining = min(buf.len, nbytes - rbytes)
toOpenArray(p, rbytes, nbytes - 1)[0..<remaining] =
toOpenArray(p, rbytes, nbytes - 1)[0 ..< remaining] =
buf.toOpenArray(0, remaining - 1)
rbytes += remaining
@@ -171,8 +165,7 @@ method readOnce*(
return rbytes
method closeImpl*(
s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
## close the stream and clear the buffer
trace "Closing BufferStream", s, len = s.len
@@ -199,7 +192,7 @@ method closeImpl*(
# Reading | Push Eof | Na
# Pushing | Na | Pop
try:
if not(s.reading and s.pushing):
if not (s.reading and s.pushing):
if s.reading:
if s.readQueue.empty():
# There is an active reader

View File

@@ -24,31 +24,29 @@ const
DefaultChronosStreamTimeout = 10.minutes
ChronosStreamTrackerName* = "ChronosStream"
type
ChronosStream* = ref object of Connection
client: StreamTransport
when defined(libp2p_agents_metrics):
tracked: bool
type ChronosStream* = ref object of Connection
client: StreamTransport
when defined(libp2p_agents_metrics):
tracked: bool
when defined(libp2p_agents_metrics):
declareGauge libp2p_peers_identity,
"peers identities", labels = ["agent"]
declareCounter libp2p_peers_traffic_read,
"incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write,
"outgoing traffic", labels = ["agent"]
declareGauge libp2p_peers_identity, "peers identities", labels = ["agent"]
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
declareCounter libp2p_network_bytes,
"total traffic", labels = ["direction"]
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
func shortLog*(conn: ChronosStream): auto =
try:
if conn == nil: "ChronosStream(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
if conn == nil:
"ChronosStream(nil)"
else:
&"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(ChronosStream): shortLog(it)
chronicles.formatIt(ChronosStream):
shortLog(it)
method initStream*(s: ChronosStream) =
if s.objName.len == 0:
@@ -65,11 +63,9 @@ proc init*(
client: StreamTransport,
dir: Direction,
timeout = DefaultChronosStreamTimeout,
observedAddr: Opt[MultiAddress]): ChronosStream =
result = C(client: client,
timeout: timeout,
dir: dir,
observedAddr: observedAddr)
observedAddr: Opt[MultiAddress],
): ChronosStream =
result = C(client: client, timeout: timeout, dir: dir, observedAddr: observedAddr)
result.initStream()
template withExceptions(body: untyped) =
@@ -100,9 +96,7 @@ when defined(libp2p_agents_metrics):
s.tracked = false
method readOnce*(
s: ChronosStream,
pbytes: pointer,
nbytes: int
s: ChronosStream, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
if s.atEof:
raise newLPStreamEOFError()
@@ -118,7 +112,7 @@ method readOnce*(
proc completeWrite(
s: ChronosStream,
fut: Future[int].Raising([TransportError, CancelledError]),
msgLen: int
msgLen: int,
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
withExceptions:
# StreamTransport will only return written < msg.len on fatal failures where
@@ -137,10 +131,8 @@ proc completeWrite(
libp2p_peers_traffic_write.inc(msgLen.int64, labelValues = [s.shortAgent])
method write*(
s: ChronosStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
s: ChronosStream, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage
if msg.len == 0:
@@ -161,8 +153,7 @@ method closed*(s: ChronosStream): bool =
method atEof*(s: ChronosStream): bool =
s.client.atEof()
method closeImpl*(
s: ChronosStream) {.async: (raises: []).} =
method closeImpl*(s: ChronosStream) {.async: (raises: []).} =
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
if not s.client.closed():
@@ -176,4 +167,5 @@ method closeImpl*(
await procCall Connection(s).closeImpl()
method getWrapped*(s: ChronosStream): Connection = nil
method getWrapped*(s: ChronosStream): Connection =
nil

View File

@@ -12,10 +12,7 @@
import std/[hashes, oids, strformat]
import stew/results
import chronicles, chronos, metrics
import lpstream,
../multiaddress,
../peerinfo,
../errors
import lpstream, ../multiaddress, ../peerinfo, ../errors
export lpstream, peerinfo, errors, results
@@ -30,14 +27,14 @@ type
TimeoutHandler* = proc(): Future[void] {.async: (raises: []).}
Connection* = ref object of LPStream
activity*: bool # reset every time data is sent or received
timeout*: Duration # channel timeout if no activity
timerTaskFut: Future[void].Raising([]) # the current timer instance
activity*: bool # reset every time data is sent or received
timeout*: Duration # channel timeout if no activity
timerTaskFut: Future[void].Raising([]) # the current timer instance
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: Opt[MultiAddress]
protocol*: string # protocol used by the connection, used as metrics tag
transportDir*: Direction # underlying transport (usually socket) direction
protocol*: string # protocol used by the connection, used as metrics tag
transportDir*: Direction # underlying transport (usually socket) direction
when defined(libp2p_agents_metrics):
shortAgent*: string
@@ -45,12 +42,15 @@ proc timeoutMonitor(s: Connection) {.async: (raises: []).}
func shortLog*(conn: Connection): string =
try:
if conn == nil: "Connection(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
if conn == nil:
"Connection(nil)"
else:
&"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(Connection): shortLog(it)
chronicles.formatIt(Connection):
shortLog(it)
method initStream*(s: Connection) =
if s.objName.len == 0:
@@ -65,10 +65,9 @@ method initStream*(s: Connection) =
s.timerTaskFut = s.timeoutMonitor()
if s.timeoutHandler == nil:
s.timeoutHandler =
proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, closing connection", s
s.close()
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, closing connection", s
s.close()
method closeImpl*(s: Connection): Future[void] {.async: (raises: []).} =
# Cleanup timeout timer
@@ -140,11 +139,14 @@ proc new*(
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
timeoutHandler: TimeoutHandler = nil): Connection =
result = C(peerId: peerId,
dir: dir,
timeout: timeout,
timeoutHandler: timeoutHandler,
observedAddr: observedAddr)
timeoutHandler: TimeoutHandler = nil,
): Connection =
result = C(
peerId: peerId,
dir: dir,
timeout: timeout,
timeoutHandler: timeoutHandler,
observedAddr: observedAddr,
)
result.initStream()

Some files were not shown because too many files have changed in this diff Show More