From 55ee9199068ec1632b5b147eb0770cc7742ff5ed Mon Sep 17 00:00:00 2001 From: parazyd Date: Mon, 26 Jun 2023 15:48:38 +0200 Subject: [PATCH] net: Perform full p2p code cleanup and improve certain pieces. Notable changes: * Rewritten transport protocols into Dialer and Listener (Nym is TODO) This simplifies using the transports a lot, as can be seen for example in src/rpc, and generally around the p2p library. It also defines features for each transport (all of which are enabled by default). We drop the socks client for Tor and Nym and use first-class support with the Arti Tor library, and nym-sphinx/nym-websockets (to be used with nym-client). * Outbound session healing The outbound session will now poll and try to fill all the requested slots more efficiently, and if needed, will activate peer discovery to find more peers if we can't connect to any known ones. Also if we're unable to connect to any, we shall drop them from our set. Additionally, transport mixing is enabled by default, so when we're allowing transport mixing, and we use Tor, we will also be able to connect to other transports that Tor can connect to (e.g. tcp://). * Unix socket transport dropped We haven't been using this, and it seems we're not going down this path, so the code has been obsoleted and removed. * TLS session verification We fully verify server and client TLS certificates upon connection so we're able to perform TLS1.3 with forward secrecy. * lilith pruning lilith now periodically prunes known peers from its sets if it's unable to connect to them. --- .gitignore | 1 + Cargo.lock | 2531 ++++++++++++++--- Cargo.toml | 40 +- Makefile | 8 +- bin/darkfid/Cargo.toml | 3 +- bin/darkfid/src/main.rs | 40 +- bin/darkfid/src/rpc_tx.rs | 5 +- bin/darkfid/src/rpc_wallet.rs | 10 +- bin/darkirc/Cargo.toml | 2 +- bin/darkirc/darkirc_config.toml | 4 +- bin/darkirc/src/irc/server.rs | 4 +- bin/darkirc/src/main.rs | 3 +- bin/dnetview/src/main.rs | 4 +- bin/drk/Cargo.toml | 1 - bin/drk/src/main.rs | 4 +- bin/faucetd/src/main.rs | 18 +- bin/lilith/src/main.rs | 4 + src/consensus/block.rs | 32 +- src/consensus/proto/protocol_proposal.rs | 19 +- src/consensus/proto/protocol_sync.rs | 28 +- .../proto/protocol_sync_consensus.rs | 6 +- src/consensus/proto/protocol_tx.rs | 27 +- src/consensus/state.rs | 88 +- src/consensus/task/block_sync.rs | 8 +- src/consensus/task/consensus_sync.rs | 11 +- src/consensus/task/proposal.rs | 24 +- src/contract/dao/Cargo.toml | 2 +- src/contract/money/Cargo.toml | 2 +- src/dht/messages.rs | 37 +- src/dht/mod.rs | 17 +- src/dht/protocol.rs | 28 +- src/dht2/net_hashmap.rs | 12 +- src/error.rs | 36 +- src/event_graph/protocol_event.rs | 48 +- src/lib.rs | 2 + src/net/acceptor.rs | 140 +- src/net/channel.rs | 426 ++- src/net/connector.rs | 99 +- src/net/constants.rs | 74 - src/net/hosts.rs | 599 ++-- src/net/message.rs | 185 +- src/net/message_subscriber.rs | 355 ++- src/net/mod.rs | 165 +- src/net/p2p.rs | 570 ++-- src/net/protocol/mod.rs | 114 +- src/net/protocol/protocol_address.rs | 184 +- src/net/protocol/protocol_base.rs | 3 +- src/net/protocol/protocol_jobs_manager.rs | 59 +- src/net/protocol/protocol_ping.rs | 155 +- src/net/protocol/protocol_registry.rs | 40 +- src/net/protocol/protocol_seed.rs | 79 +- src/net/protocol/protocol_version.rs | 201 +- src/net/session/inbound_session.rs | 135 +- src/net/session/manual_session.rs | 219 +- src/net/session/mod.rs | 145 +- src/net/session/outbound_session.rs | 432 +-- src/net/session/seedsync_session.rs | 197 +- src/net/settings.rs | 225 +- src/net/transport.rs | 321 ++- src/net/transport/nym.rs | 139 +- src/net/transport/tcp.rs | 208 +- src/net/transport/{upgrade_tls.rs => tls.rs} | 34 +- src/net/transport/tor.rs | 318 +-- src/net/transport/unix.rs | 142 - src/rpc/client.rs | 62 +- src/rpc/server.rs | 70 +- src/system/stoppable_task.rs | 1 + src/system/subscriber.rs | 4 +- tests/network_transports.rs | 238 +- 69 files changed, 4894 insertions(+), 4553 deletions(-) delete mode 100644 src/net/constants.rs rename src/net/transport/{upgrade_tls.rs => tls.rs} (92%) delete mode 100644 src/net/transport/unix.rs diff --git a/.gitignore b/.gitignore index 2dbe9e824..a00879585 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ .#* *.profraw +/vendor/* /target/* /tmp/* diff --git a/Cargo.lock b/Cargo.lock index 13d46b720..1db992dd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.27.2", + "gimli 0.27.3", ] [[package]] @@ -27,13 +27,25 @@ dependencies = [ "generic-array", ] +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", + "zeroize", +] + [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -60,9 +72,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4f263788a35611fba42eb41ff811c5d0360c58b97402570312a350736e2542e" +checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" [[package]] name = "alsa" @@ -71,7 +83,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8512c9117059663fb5606788fbca3619e2a91dac0e3fe516242eab1fa6be5e44" dependencies = [ "alsa-sys", - "bitflags", + "bitflags 1.3.2", "libc", "nix 0.24.3", ] @@ -86,6 +98,47 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "amplify" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26966af46e0d200e8bf2b7f16230997c1c3f2d141bc27ccc091c012ed527b58" +dependencies = [ + "amplify_derive", + "amplify_num", + "ascii", + "wasm-bindgen", +] + +[[package]] +name = "amplify_derive" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87df0f28e6eb1f2d355f29ba6793fa9ca643967528609608d5cbd70bd68f9d1" +dependencies = [ + "amplify_syn", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "amplify_num" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddce3bc63e807ea02065e8d8b702695f3d302ae4158baddff8b0ce5c73947251" + +[[package]] +name = "amplify_syn" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7736fb8d473c0d83098b5bac44df6a561e20470375cd8bcae30516dc889fd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -127,15 +180,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -173,9 +226,58 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + +[[package]] +name = "arti-client" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "cfg-if", + "derive_builder_fork_arti", + "derive_more", + "directories", + "educe", + "fs-mistrust", + "futures", + "hostname-validator", + "humantime-serde", + "libc", + "pin-project", + "postage", + "safelog", + "serde", + "thiserror", + "tor-async-utils", + "tor-basic-utils", + "tor-cell", + "tor-chanmgr", + "tor-checkable", + "tor-circmgr", + "tor-config", + "tor-dirmgr", + "tor-error", + "tor-guardmgr", + "tor-hsclient", + "tor-hscrypto", + "tor-keymgr", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-rtcompat", + "tracing", + "void", +] + +[[package]] +name = "ascii" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" [[package]] name = "asn1-rs" @@ -237,6 +339,19 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-compression" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11" +dependencies = [ + "flate2", + "futures-core", + "futures-io", + "memchr", + "pin-project-lite", +] + [[package]] name = "async-executor" version = "1.5.1" @@ -345,7 +460,17 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", +] + +[[package]] +name = "async-rustls" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29479d362e242e320fa8f5c831940a5b83c1679af014068196cd20d4bf497b6b" +dependencies = [ + "futures-io", + "rustls", ] [[package]] @@ -390,31 +515,43 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] -name = "async-tungstenite" -version = "0.22.2" +name = "async_executors" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce01ac37fdc85f10a43c43bc582cbd566720357011578a935761075f898baf58" +checksum = "c0b2463773401e1f684136f9cdb956cf611f22172472cf3f049e72123f59e359" dependencies = [ - "futures-io", + "async-std", + "blanket", + "futures-core", + "futures-task", "futures-util", - "log", - "pin-project-lite", - "tungstenite", + "pin-project", + "rustc_version 0.4.0", ] [[package]] -name = "atoi" -version = "1.0.0" +name = "asynchronous-codec" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e" +checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" dependencies = [ - "num-traits", + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", ] +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + [[package]] name = "atomic-waker" version = "1.1.1" @@ -453,6 +590,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.13.1" @@ -465,13 +608,19 @@ version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bindgen" version = "0.64.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -506,6 +655,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" + [[package]] name = "bitvec" version = "1.0.1" @@ -544,6 +699,26 @@ dependencies = [ "rayon", ] +[[package]] +name = "blanket" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b04ce3d2372d05d1ef4ea3fdf427da6ae3c17ca06d688a107b5344836276bc3" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -568,6 +743,12 @@ dependencies = [ "log", ] +[[package]] +name = "bounded-vec-deque" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2225b558afc76c596898f5f1b3fc35cfce0eb1b13635cbd7d1b2a7177dc10ccd" + [[package]] name = "bridgetree" version = "0.3.0" @@ -592,6 +773,12 @@ version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +[[package]] +name = "by_address" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf8dba2868114ed769a1f2590fc9ae5eb331175b44313b6c9b922f8f7ca813d0" + [[package]] name = "bytecheck" version = "0.6.11" @@ -647,6 +834,11 @@ dependencies = [ "serde", ] +[[package]] +name = "caret" +version = "0.4.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" + [[package]] name = "cargo-platform" version = "0.1.2" @@ -739,6 +931,7 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -774,7 +967,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap 0.11.0", "unicode-width", @@ -783,9 +976,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.3" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8f255e4b8027970e78db75e78831229c9815fdbfa67eb1a1b777a62e24b4a0" +checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" dependencies = [ "clap_builder", "clap_derive", @@ -794,13 +987,13 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.3" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd4f3c17c83b0ba34ffbc4f8bbd74f079413f747f84a6f89292f138057e36ab" +checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" dependencies = [ "anstream", "anstyle", - "bitflags", + "bitflags 1.3.2", "clap_lex", "strsim 0.10.0", ] @@ -811,7 +1004,7 @@ version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b" dependencies = [ - "clap 4.3.3", + "clap 4.3.8", ] [[package]] @@ -823,7 +1016,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -841,6 +1034,18 @@ dependencies = [ "cc", ] +[[package]] +name = "coarsetime" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90d114103adbc625300f346d4d09dfb4ab1c4a8df6868435dd903392ecf4354" +dependencies = [ + "libc", + "once_cell", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -883,6 +1088,20 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +dependencies = [ + "async-trait", + "lazy_static", + "nom", + "pathdiff", + "serde", + "toml 0.5.11", +] + [[package]] name = "console" version = "0.15.7" @@ -902,12 +1121,24 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3d0b5ff30645a68f35ece8cea4556ca14ef8a1651455f789a099a0513532a6" +[[package]] +name = "const-oid" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" + [[package]] name = "constant_time_eq" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -936,7 +1167,7 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-graphics-types", "foreign-types", @@ -945,13 +1176,12 @@ dependencies = [ [[package]] name = "core-graphics-types" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b" +checksum = "2bb142d41022986c1d8ff29103a1411c8a3dfad3552f87a4f8dc50d61d4f4e33" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", - "foreign-types", "libc", ] @@ -973,7 +1203,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb17e2d1795b1996419648915df94bc7103c28f7b48062d7acf4652fc371b2ff" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation-sys 0.6.2", "coreaudio-sys", ] @@ -1027,9 +1257,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -1088,7 +1318,7 @@ dependencies = [ "cranelift-entity", "fxhash", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.9.3", "log", "smallvec", ] @@ -1117,21 +1347,6 @@ version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "393bc73c451830ff8dbb3a07f61843d6cb41a084f9996319917c0b291ed785bb" -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" - [[package]] name = "crc32fast" version = "1.3.2" @@ -1200,7 +1415,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crossterm_winapi", "libc", "mio", @@ -1225,6 +1440,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1261,7 +1488,7 @@ dependencies = [ "chacha20", "chacha20poly1305", "salsa20", - "x25519-dalek", + "x25519-dalek 1.1.1", "xsalsa20poly1305", "zeroize", ] @@ -1293,6 +1520,15 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + [[package]] name = "ctrlc" version = "3.4.0" @@ -1316,18 +1552,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek" +version = "4.0.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +dependencies = [ + "cfg-if", + "fiat-crypto", + "packed_simd_2", + "platforms", + "subtle", + "zeroize", +] + [[package]] name = "darkfi" version = "0.4.1" dependencies = [ + "arti-client", + "async-rustls", "async-std", "async-trait", - "async-tungstenite", "blake3", "bs58", "chrono", - "clap 4.3.3", + "clap 4.3.8", "crypto_api_chachapoly", + "curve25519-dalek 4.0.0-rc.2", "darkfi-derive", "darkfi-derive-internal", "darkfi-sdk", @@ -1335,13 +1587,11 @@ dependencies = [ "dashu", "easy-parallel", "ed25519-compact", - "fast-socks5", "futures", - "futures-rustls 0.24.0", "halo2_gadgets", "halo2_proofs", "hex", - "indexmap", + "indexmap 1.9.3", "indicatif", "ipnet", "iprange", @@ -1351,10 +1601,12 @@ dependencies = [ "log", "plotters", "prettytable-rs", - "rand", + "rand 0.8.5", "rcgen", "ripemd", + "rusqlite", "rustls-pemfile", + "semver 1.0.17", "serde", "serde_json", "simplelog", @@ -1362,11 +1614,10 @@ dependencies = [ "sled-overlay", "smol", "socket2 0.5.3", - "sqlx", "structopt", "structopt-toml", "thiserror", - "toml 0.7.4", + "toml 0.7.5", "url", "wasmer", "wasmer-compiler-singlepass", @@ -1387,10 +1638,10 @@ dependencies = [ "darkfi-money-contract", "darkfi-sdk", "darkfi-serial", - "getrandom", + "getrandom 0.2.10", "halo2_proofs", "log", - "rand", + "rand 0.8.5", "simplelog", "sled", "thiserror", @@ -1410,7 +1661,7 @@ dependencies = [ "darkfi-sdk", "darkfi-serial", "log", - "rand", + "rand 0.8.5", "simplelog", "sled", ] @@ -1426,13 +1677,12 @@ dependencies = [ "darkfi-money-contract", "darkfi-sdk", "darkfi-serial", - "getrandom", + "getrandom 0.2.10", "halo2_proofs", "log", - "rand", + "rand 0.8.5", "simplelog", "sled", - "sqlx", "thiserror", ] @@ -1442,7 +1692,7 @@ version = "0.4.1" dependencies = [ "darkfi-sdk", "darkfi-serial", - "getrandom", + "getrandom 0.2.10", "thiserror", ] @@ -1476,13 +1726,12 @@ dependencies = [ "darkfi-contract-test-harness", "darkfi-sdk", "darkfi-serial", - "getrandom", + "getrandom 0.2.10", "halo2_proofs", "log", - "rand", + "rand 0.8.5", "simplelog", "sled", - "sqlx", "thiserror", ] @@ -1502,9 +1751,9 @@ dependencies = [ "num-bigint", "num-traits", "pasta_curves", - "rand", + "rand 0.8.5", "rand_core 0.6.4", - "sha2", + "sha2 0.10.7", "subtle", "thiserror", ] @@ -1517,7 +1766,7 @@ dependencies = [ "darkfi-sdk", "halo2_gadgets", "pyo3", - "rand", + "rand 0.8.5", ] [[package]] @@ -1529,6 +1778,7 @@ dependencies = [ "darkfi-derive", "futures-lite", "pasta_curves", + "semver 1.0.17", "url", ] @@ -1551,7 +1801,6 @@ dependencies = [ "simplelog", "sled", "smol", - "sqlx", "structopt", "structopt-toml", "url", @@ -1561,20 +1810,20 @@ dependencies = [ name = "darkirc" version = "0.4.1" dependencies = [ + "async-rustls", "async-std", "async-trait", "bs58", "chrono", - "clap 4.3.3", + "clap 4.3.8", "crypto_box", "darkfi", "darkfi-serial", "easy-parallel", "futures", - "futures-rustls 0.24.0", "hex", "log", - "rand", + "rand 0.8.5", "ripemd", "rustls-pemfile", "serde", @@ -1586,7 +1835,7 @@ dependencies = [ "smol", "structopt", "structopt-toml", - "toml 0.7.4", + "toml 0.7.5", "url", ] @@ -1600,6 +1849,16 @@ dependencies = [ "darling_macro 0.10.2", ] +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + [[package]] name = "darling" version = "0.20.1" @@ -1624,6 +1883,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + [[package]] name = "darling_core" version = "0.20.1" @@ -1634,7 +1907,8 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "syn 2.0.18", + "strsim 0.10.0", + "syn 2.0.20", ] [[package]] @@ -1648,6 +1922,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.20.1" @@ -1656,7 +1941,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core 0.20.1", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -1700,7 +1985,7 @@ dependencies = [ "dashu-base", "dashu-int", "num-traits", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -1714,7 +1999,7 @@ dependencies = [ "dashu-base", "num-order", "num-traits", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -1742,7 +2027,7 @@ dependencies = [ "dashu-float", "dashu-int", "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -1771,10 +2056,32 @@ dependencies = [ "serde_json", "simplelog", "smol", - "toml 0.7.4", + "toml 0.7.5", "url", ] +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "pem-rfc7468 0.6.0", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17" +dependencies = [ + "const-oid", + "pem-rfc7468 0.7.0", + "zeroize", +] + [[package]] name = "der-parser" version = "8.2.0" @@ -1825,6 +2132,50 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder_core_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c1b715c79be6328caa9a5e1a387a196ea503740f0722ec3dd8f67a9e72314d" +dependencies = [ + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3eae24d595f4d0ecc90a9a5a6d11c2bd8dafe2375ec4a1ec63250e5ade7d228" +dependencies = [ + "derive_builder_macro_fork_arti", +] + +[[package]] +name = "derive_builder_macro_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69887769a2489cd946bf782eb2b1bb2cb7bc88551440c94a765d4f040c08ebf3" +dependencies = [ + "derive_builder_core_fork_arti", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version 0.4.0", + "syn 1.0.109", +] + [[package]] name = "dhtd" version = "0.4.1" @@ -1836,7 +2187,7 @@ dependencies = [ "darkfi-serial", "easy-parallel", "log", - "rand", + "rand 0.8.5", "simplelog", "smol", "url", @@ -1857,11 +2208,30 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer", + "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -1872,6 +2242,18 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -1891,7 +2273,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -1909,12 +2291,12 @@ version = "0.4.1" dependencies = [ "async-channel", "async-std", - "clap 4.3.3", + "clap 4.3.8", "darkfi", "easy-parallel", "hex", "log", - "rand", + "rand 0.8.5", "serde", "serde_json", "simplelog", @@ -1926,10 +2308,10 @@ dependencies = [ ] [[package]] -name = "dotenvy" -version = "0.15.7" +name = "downcast-rs" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] name = "drk" @@ -1939,7 +2321,7 @@ dependencies = [ "async-std", "blake3", "bs58", - "clap 4.3.3", + "clap 4.3.8", "clap_complete", "darkfi", "darkfi-dao-contract", @@ -1947,14 +2329,13 @@ dependencies = [ "darkfi-sdk", "darkfi-serial", "prettytable-rs", - "rand", + "rand 0.8.5", "rodio", "serde_json", "signal-hook", "signal-hook-async-std", "simplelog", "smol", - "sqlx", "url", ] @@ -1970,13 +2351,19 @@ dependencies = [ "wio", ] +[[package]] +name = "dyn-clone" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" + [[package]] name = "dynasm" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "add9a102807b524ec050363f09e06f1504214b0e1c7797f64261c891022dce8b" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "lazy_static", "proc-macro-error", @@ -1993,7 +2380,7 @@ checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" dependencies = [ "byteorder", "dynasm", - "memmap2", + "memmap2 0.5.10", ] [[package]] @@ -2002,6 +2389,27 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4604923390fcaf8b65a1e10b430cc34a3f87958a3b35ebea978b529d776e001" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve", + "rfc6979", + "signature 1.6.4", +] + +[[package]] +name = "ed25519" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +dependencies = [ + "signature 1.6.4", +] + [[package]] name = "ed25519-compact" version = "2.0.4" @@ -2009,7 +2417,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a3d382e8464107391c8706b4c14b087808ecb909f6c15c34114bc42e53a9e4c" dependencies = [ "ct-codecs", - "getrandom", + "getrandom 0.2.10", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.2.0", + "ed25519", + "merlin", + "rand 0.7.3", + "serde", + "sha2 0.9.9", + "zeroize", +] + +[[package]] +name = "educe" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -2018,6 +2453,25 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -2050,6 +2504,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "enum-ordinalize" +version = "3.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4f76552f53cefc9a7f64987c3701b99d982f7690606fd67de1d09712fbf52f1" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.20", +] + [[package]] name = "enumset" version = "1.1.2" @@ -2068,9 +2535,15 @@ dependencies = [ "darling 0.20.1", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "errno" version = "0.3.1" @@ -2114,17 +2587,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] -name = "fast-socks5" -version = "0.4.3" +name = "fallible-streaming-iterator" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba337793c1ee49731350a8d971d791651ed51d6e814ab4ddabd79c12b5366140" -dependencies = [ - "anyhow", - "async-std", - "futures", - "log", - "thiserror", -] +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" @@ -2149,7 +2615,8 @@ dependencies = [ "darkfi-serial", "easy-parallel", "log", - "rand", + "rand 0.8.5", + "rusqlite", "serde", "serde_json", "signal-hook", @@ -2157,7 +2624,6 @@ dependencies = [ "simplelog", "sled", "smol", - "sqlx", "structopt", "structopt-toml", "url", @@ -2172,6 +2638,16 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.0" @@ -2183,6 +2659,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" + [[package]] name = "flate2" version = "1.0.26" @@ -2200,16 +2682,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bad48618fdb549078c333a7a8528acb57af271d0433bdecd523eb620628364e" [[package]] -name = "flume" -version = "0.10.14" +name = "fluid-let" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project", - "spin 0.9.8", -] +checksum = "749cff877dc1af878a0b31a41dd221a753634401ea0ef2f87b62d3171522485a" [[package]] name = "fnv" @@ -2223,7 +2699,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21fe28504d371085fae9ac7a3450f0b289ab71e07c8e57baa3fb68b9e57d6ce5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "core-foundation", "core-graphics", @@ -2287,6 +2763,22 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "fs-mistrust" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "derive_builder_fork_arti", + "dirs", + "educe", + "libc", + "once_cell", + "serde", + "thiserror", + "users", + "walkdir", +] + [[package]] name = "fs2" version = "0.4.3" @@ -2297,12 +2789,22 @@ dependencies = [ "winapi", ] +[[package]] +name = "fslock" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "fu" version = "0.4.1" dependencies = [ "async-std", - "clap 4.3.3", + "clap 4.3.8", "darkfi", "log", "serde_json", @@ -2379,17 +2881,6 @@ dependencies = [ "futures-util", ] -[[package]] -name = "futures-intrusive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" -dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.11.2", -] - [[package]] name = "futures-io" version = "0.3.28" @@ -2419,18 +2910,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", -] - -[[package]] -name = "futures-rustls" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" -dependencies = [ - "futures-io", - "rustls 0.20.8", - "webpki", + "syn 2.0.20", ] [[package]] @@ -2440,7 +2920,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.1", + "rustls", ] [[package]] @@ -2497,7 +2977,7 @@ name = "genev" version = "0.4.1" dependencies = [ "async-std", - "clap 4.3.3", + "clap 4.3.8", "darkfi", "darkfi-serial", "log", @@ -2527,6 +3007,17 @@ dependencies = [ "url", ] +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + [[package]] name = "getrandom" version = "0.2.10" @@ -2534,15 +3025,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] name = "gif" -version = "0.11.4" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" +checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045" dependencies = [ "color_quant", "weezl", @@ -2555,15 +3048,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ "fallible-iterator", - "indexmap", + "indexmap 1.9.3", "stable_deref_trait", ] [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -2583,13 +3076,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] @@ -2601,14 +3105,14 @@ source = "git+https://github.com/parazyd/halo2?branch=v3#481f00066dbfa5199195062 dependencies = [ "arrayvec", "bitvec", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "halo2_proofs", "lazy_static", "pasta_curves", "plotters", "proptest", - "rand", + "rand 0.8.5", "subtle", "uint", ] @@ -2620,8 +3124,8 @@ source = "git+https://github.com/parazyd/halo2?branch=v3#481f00066dbfa5199195062 dependencies = [ "backtrace", "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "maybe-rayon", "pasta_curves", "plotters", @@ -2672,9 +3176,6 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -dependencies = [ - "unicode-segmentation", -] [[package]] name = "hermit-abi" @@ -2706,6 +3207,30 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "hostname-validator" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + [[package]] name = "http" version = "0.2.9" @@ -2723,6 +3248,28 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -2794,6 +3341,17 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", ] [[package]] @@ -2868,17 +3426,17 @@ dependencies = [ "async-trait", "bs58", "chrono", - "clap 4.3.3", + "clap 4.3.8", "crypto_box", "ctrlc", "darkfi", "darkfi-serial", "easy-parallel", "futures", - "futures-rustls 0.24.0", + "futures-rustls", "hex", "log", - "rand", + "rand 0.8.5", "ripemd", "rustls-pemfile", "serde", @@ -2887,7 +3445,7 @@ dependencies = [ "smol", "structopt", "structopt-toml", - "toml 0.7.4", + "toml 0.7.5", "url", ] @@ -2976,6 +3534,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -2991,7 +3558,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] @@ -3032,6 +3599,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "libm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" + [[package]] name = "libm" version = "0.2.7" @@ -3040,11 +3613,10 @@ checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libsqlite3-sys" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ - "cc", "pkg-config", "vcpkg", ] @@ -3055,17 +3627,19 @@ version = "0.4.1" dependencies = [ "async-std", "async-trait", - "ctrlc", "darkfi", "easy-parallel", + "futures", "log", "serde", "serde_json", + "signal-hook", + "signal-hook-async-std", "simplelog", "smol", "structopt", "structopt-toml", - "toml 0.7.4", + "toml 0.7.5", "url", ] @@ -3137,6 +3711,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180d4b35be83d33392d1d1bfbd2ae1eca7ff5de1a94d3fc87faaa99a069e7cbd" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.8.0" @@ -3155,6 +3738,18 @@ dependencies = [ "autocfg", ] +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3224,10 +3819,10 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" dependencies = [ - "bitflags", + "bitflags 1.3.2", "jni-sys", "ndk-sys", - "num_enum", + "num_enum 0.5.11", "raw-window-handle", "thiserror", ] @@ -3253,7 +3848,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", ] @@ -3264,7 +3859,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "static_assertions", @@ -3291,6 +3886,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" +dependencies = [ + "byteorder", + "lazy_static", + "libm 0.2.7", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -3312,6 +3924,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-modular" version = "0.5.1" @@ -3350,7 +3973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm", + "libm 0.2.7", ] [[package]] @@ -3369,7 +3992,16 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.5.11", +] + +[[package]] +name = "num_enum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +dependencies = [ + "num_enum_derive 0.6.1", ] [[package]] @@ -3384,6 +4016,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "num_enum_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.20", +] + [[package]] name = "num_threads" version = "0.1.6" @@ -3458,6 +4102,44 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.7", +] + +[[package]] +name = "p384" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.7", +] + +[[package]] +name = "packed_simd_2" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" +dependencies = [ + "cfg-if", + "libm 0.1.4", +] + [[package]] name = "parking" version = "2.1.0" @@ -3519,10 +4201,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" dependencies = [ "blake2b_simd", - "ff", - "group", + "ff 0.13.0", + "group 0.13.0", "lazy_static", - "rand", + "rand 0.8.5", "static_assertions", "subtle", ] @@ -3533,6 +4215,12 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "pathfinder_geometry" version = "0.5.1" @@ -3549,7 +4237,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39fe46acc5503595e5949c17b818714d26fdf9b4920eacf3b2947f0199f4a6ff" dependencies = [ - "rustc_version", + "rustc_version 0.3.3", ] [[package]] @@ -3567,6 +4255,24 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.0" @@ -3575,14 +4281,56 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" +checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9" dependencies = [ "thiserror", "ucd-trie", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.20", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.0" @@ -3600,7 +4348,7 @@ checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -3615,6 +4363,49 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719" +dependencies = [ + "der 0.6.1", + "pkcs8 0.9.0", + "spki 0.6.0", + "zeroize", +] + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.6", + "pkcs8 0.10.2", + "spki 0.7.2", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.6", + "spki 0.7.2", +] + [[package]] name = "pkg-config" version = "0.3.27" @@ -3622,10 +4413,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] -name = "plotters" -version = "0.3.4" +name = "platforms" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "chrono", "font-kit", @@ -3643,15 +4440,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-bitmap" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4a1f21490a6cf4a84c272ad20bd7844ed99a3178187a4c5ab7f2051295beef" +checksum = "0cebbe1f70205299abc69e8b295035bb52a6a70ee35474ad10011f0a4efb8543" dependencies = [ "gif", "image", @@ -3660,9 +4457,9 @@ dependencies = [ [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -3673,7 +4470,7 @@ version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crc32fast", "fdeflate", "flate2", @@ -3687,7 +4484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", - "bitflags", + "bitflags 1.3.2", "cfg-if", "concurrent-queue", "libc", @@ -3713,6 +4510,21 @@ version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" +[[package]] +name = "postage" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af3fb618632874fb76937c2361a7f22afd393c982a2165595407edc75b06d3c1" +dependencies = [ + "atomic", + "crossbeam-queue", + "futures", + "parking_lot 0.12.1", + "pin-project", + "static_assertions", + "thiserror", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -3783,12 +4595,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", - "bitflags", + "bitflags 1.3.2", "byteorder", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.6.29", "rusty-fork", @@ -3822,7 +4634,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" dependencies = [ - "bitflags", + "bitflags 1.3.2", "memchr", "unicase", ] @@ -3908,6 +4720,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + [[package]] name = "rand" version = "0.8.5" @@ -3915,10 +4740,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", + "rand_chacha 0.3.1", "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -3934,6 +4769,9 @@ name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] [[package]] name = "rand_core" @@ -3941,7 +4779,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.10", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -3999,7 +4846,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4008,7 +4855,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4026,7 +4873,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] @@ -4072,7 +4919,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e" dependencies = [ - "bitflags", + "bitflags 1.3.2", "libc", "mach", "winapi", @@ -4087,6 +4934,28 @@ dependencies = [ "bytecheck", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + +[[package]] +name = "retry-error" +version = "0.4.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint", + "hmac", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -4096,7 +4965,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin 0.5.2", + "spin", "untrusted", "web-sys", "winapi", @@ -4120,7 +4989,7 @@ dependencies = [ "bitvec", "bytecheck", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.9.3", "ptr_meta", "rend", "rkyv_derive", @@ -4150,6 +5019,64 @@ dependencies = [ "minimp3", ] +[[package]] +name = "rsa" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "094052d5470cbcef561cb848a7209968c9f12dfa6d668f4bca048ac5de51099c" +dependencies = [ + "byteorder", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1 0.4.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "signature 1.6.4", + "smallvec", + "subtle", + "zeroize", +] + +[[package]] +name = "rsa" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" +dependencies = [ + "byteorder", + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1 0.7.5", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "signature 2.1.0", + "spki 0.7.2", + "subtle", + "zeroize", +] + +[[package]] +name = "rusqlite" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +dependencies = [ + "bitflags 2.3.2", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", + "time 0.3.22", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -4171,6 +5098,15 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.17", +] + [[package]] name = "rusticata-macros" version = "4.1.0" @@ -4186,7 +5122,7 @@ version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -4196,21 +5132,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" -dependencies = [ - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", @@ -4261,6 +5185,18 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +[[package]] +name = "safelog" +version = "0.3.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "derive_more", + "educe", + "either", + "fluid-let", + "thiserror", +] + [[package]] name = "salsa20" version = "0.10.2" @@ -4279,6 +5215,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sanitize-filename" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c502bdb638f1396509467cb0580ef3b29aa2a45c5d43e5d84928241280296c" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -4301,6 +5247,20 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "semver" version = "0.11.0" @@ -4356,14 +5316,23 @@ checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", +] + +[[package]] +name = "serde_ignored" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94eb4a4087ba8bdf14a9208ac44fddbf55c01a6195f7edfc511ddaff6cae45a6" +dependencies = [ + "serde", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ "itoa", "ryu", @@ -4372,13 +5341,41 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" +dependencies = [ + "base64 0.21.2", + "chrono", + "hex", + "indexmap 1.9.3", + "serde", + "serde_json", + "serde_with_macros", + "time 0.3.22", +] + +[[package]] +name = "serde_with_macros" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" +dependencies = [ + "darling 0.20.1", + "proc-macro2", + "quote", + "syn 2.0.20", +] + [[package]] name = "sha1" version = "0.10.5" @@ -4392,15 +5389,47 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha2" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", "digest 0.10.7", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "shellexpand" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" +dependencies = [ + "dirs", +] + [[package]] name = "shlex" version = "1.1.0" @@ -4449,6 +5478,26 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "simd-adler32" version = "0.3.5" @@ -4461,6 +5510,18 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time 0.3.22", +] + [[package]] name = "simplelog" version = "0.12.1" @@ -4472,6 +5533,12 @@ dependencies = [ "time 0.3.22", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "skeptic" version = "0.13.7" @@ -4538,6 +5605,15 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" +[[package]] +name = "slotmap" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" +dependencies = [ + "version_check", +] + [[package]] name = "smallvec" version = "1.10.0" @@ -4594,107 +5670,52 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "spin" -version = "0.9.8" +name = "spki" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ - "lock_api", + "base64ct", + "der 0.6.1", ] [[package]] -name = "sqlformat" -version = "0.2.1" +name = "spki" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ - "itertools", - "nom", - "unicode_categories", + "base64ct", + "der 0.7.6", ] [[package]] -name = "sqlx" -version = "0.6.3" +name = "ssh-encoding" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188" +checksum = "19cfdc32e0199062113edf41f344fbf784b8205a94600233c84eb838f45191e1" dependencies = [ - "sqlx-core", - "sqlx-macros", + "base64ct", + "pem-rfc7468 0.6.0", + "sha2 0.10.7", ] [[package]] -name = "sqlx-core" -version = "0.6.3" +name = "ssh-key" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029" +checksum = "288d8f5562af5a3be4bda308dd374b2c807b940ac370b5efa1c99311da91d9a1" dependencies = [ - "ahash 0.7.6", - "atoi", - "bitflags", - "byteorder", - "bytes", - "crc", - "crossbeam-queue", - "dotenvy", - "either", - "event-listener", - "flume", - "futures-channel", - "futures-core", - "futures-executor", - "futures-intrusive", - "futures-util", - "hashlink", - "hex", - "indexmap", - "itoa", - "libc", - "libsqlite3-sys", - "log", - "memchr", - "once_cell", - "paste", - "percent-encoding", - "rustls 0.20.8", - "rustls-pemfile", - "sha2", - "smallvec", - "sqlformat", - "sqlx-rt", - "stringprep", - "thiserror", - "url", - "webpki-roots", -] - -[[package]] -name = "sqlx-macros" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9" -dependencies = [ - "dotenvy", - "either", - "heck 0.4.1", - "once_cell", - "proc-macro2", - "quote", - "sha2", - "sqlx-core", - "sqlx-rt", - "syn 1.0.109", - "url", -] - -[[package]] -name = "sqlx-rt" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024" -dependencies = [ - "async-std", - "futures-rustls 0.22.2", + "ed25519-dalek", + "p256", + "p384", + "rand_core 0.6.4", + "rsa 0.7.2", + "sec1", + "sha2 0.10.7", + "signature 1.6.4", + "ssh-encoding", + "zeroize", ] [[package]] @@ -4709,16 +5730,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stringprep" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "strsim" version = "0.8.0" @@ -4789,6 +5800,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9f3bd7d2e45dcc5e265fbb88d6513e4747d8ef9444cf01a533119bce28a157" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.20", +] + [[package]] name = "subtle" version = "2.5.0" @@ -4808,9 +5841,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.18" +version = "2.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +checksum = "fcb8d4cebc40aa517dfb69618fa647a346562e67228e2236ae0042ee6ac14775" dependencies = [ "proc-macro2", "quote", @@ -4848,9 +5881,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" +checksum = "1b1c7f239eb94671427157bd93b3694320f3668d4e1eff08c7285366fd777fac" [[package]] name = "tau" @@ -4858,7 +5891,7 @@ version = "0.4.1" dependencies = [ "async-std", "chrono", - "clap 4.3.3", + "clap 4.3.8", "colored", "darkfi", "log", @@ -4888,7 +5921,7 @@ dependencies = [ "hex", "libc", "log", - "rand", + "rand 0.8.5", "serde", "serde_json", "simplelog", @@ -4896,7 +5929,7 @@ dependencies = [ "structopt", "structopt-toml", "thiserror", - "toml 0.7.4", + "toml 0.7.5", "url", ] @@ -5004,7 +6037,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -5047,6 +6080,15 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ac3f5b6856e931e15e07b478e98c8045239829a65f9156d4fa7e7788197a5ef" +dependencies = [ + "displaydoc", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -5073,9 +6115,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" dependencies = [ "serde", "serde_spanned", @@ -5085,26 +6127,678 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.10" +version = "0.19.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" +checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", "winnow", ] +[[package]] +name = "tor-async-utils" +version = "0.1.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "futures", + "pin-project", + "postage", + "thiserror", + "void", +] + +[[package]] +name = "tor-basic-utils" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "hex", + "libc", + "paste", + "rand 0.8.5", + "rand_chacha 0.3.1", + "slab", + "thiserror", +] + +[[package]] +name = "tor-bytes" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "bytes", + "digest 0.10.7", + "educe", + "generic-array", + "getrandom 0.2.10", + "signature 1.6.4", + "thiserror", + "tor-error", + "tor-llcrypto", + "zeroize", +] + +[[package]] +name = "tor-cell" +version = "0.11.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "bitflags 2.3.2", + "bytes", + "caret", + "derive_more", + "educe", + "paste", + "rand 0.8.5", + "thiserror", + "tor-basic-utils", + "tor-bytes", + "tor-cert", + "tor-error", + "tor-hscrypto", + "tor-linkspec", + "tor-llcrypto", + "tor-units", +] + +[[package]] +name = "tor-cert" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "caret", + "digest 0.10.7", + "signature 1.6.4", + "thiserror", + "tor-bytes", + "tor-checkable", + "tor-llcrypto", +] + +[[package]] +name = "tor-chanmgr" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-trait", + "derive_builder_fork_arti", + "derive_more", + "educe", + "futures", + "postage", + "rand 0.8.5", + "safelog", + "serde", + "thiserror", + "tor-basic-utils", + "tor-cell", + "tor-config", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdir", + "tor-proto", + "tor-rtcompat", + "tor-socksproto", + "tor-units", + "tracing", + "void", +] + +[[package]] +name = "tor-checkable" +version = "0.5.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "humantime", + "signature 1.6.4", + "thiserror", + "tor-llcrypto", +] + +[[package]] +name = "tor-circmgr" +version = "0.9.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-trait", + "bounded-vec-deque", + "derive_builder_fork_arti", + "derive_more", + "downcast-rs", + "dyn-clone", + "educe", + "futures", + "humantime-serde", + "itertools", + "once_cell", + "pin-project", + "rand 0.8.5", + "retry-error", + "safelog", + "serde", + "static_assertions", + "thiserror", + "tor-basic-utils", + "tor-chanmgr", + "tor-config", + "tor-error", + "tor-guardmgr", + "tor-hscrypto", + "tor-linkspec", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-rtcompat", + "tracing", + "weak-table", +] + +[[package]] +name = "tor-config" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "config", + "derive_builder_fork_arti", + "directories", + "educe", + "either", + "fs-mistrust", + "itertools", + "once_cell", + "paste", + "regex", + "serde", + "serde_ignored", + "shellexpand", + "strum", + "thiserror", + "toml 0.7.5", + "tor-basic-utils", + "tor-error", + "tracing", +] + +[[package]] +name = "tor-consdiff" +version = "0.5.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "digest 0.10.7", + "hex", + "thiserror", + "tor-llcrypto", +] + +[[package]] +name = "tor-dirclient" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-compression", + "base64ct", + "derive_more", + "futures", + "hex", + "http", + "httparse", + "httpdate", + "itertools", + "memchr", + "thiserror", + "tor-circmgr", + "tor-error", + "tor-hscrypto", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-proto", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-dirmgr" +version = "0.10.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-trait", + "base64ct", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "event-listener", + "fs-mistrust", + "fslock", + "futures", + "hex", + "humantime", + "humantime-serde", + "itertools", + "memmap2 0.7.0", + "once_cell", + "paste", + "postage", + "rand 0.8.5", + "retry-error", + "rusqlite", + "safelog", + "scopeguard", + "serde", + "signature 1.6.4", + "strum", + "thiserror", + "time 0.3.22", + "tor-basic-utils", + "tor-checkable", + "tor-circmgr", + "tor-config", + "tor-consdiff", + "tor-dirclient", + "tor-error", + "tor-guardmgr", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-proto", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-error" +version = "0.5.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "backtrace", + "derive_more", + "futures", + "once_cell", + "strum", + "thiserror", +] + +[[package]] +name = "tor-guardmgr" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "base64ct", + "derive_builder_fork_arti", + "derive_more", + "dyn-clone", + "educe", + "futures", + "humantime", + "humantime-serde", + "itertools", + "num_enum 0.6.1", + "pin-project", + "postage", + "rand 0.8.5", + "retain_mut", + "safelog", + "serde", + "strum", + "thiserror", + "tor-basic-utils", + "tor-config", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-rtcompat", + "tor-units", + "tracing", +] + +[[package]] +name = "tor-hsclient" +version = "0.2.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-trait", + "derive_more", + "educe", + "either", + "futures", + "itertools", + "postage", + "rand 0.8.5", + "rand_core 0.6.4", + "retry-error", + "safelog", + "slotmap", + "strum", + "thiserror", + "tor-bytes", + "tor-cell", + "tor-checkable", + "tor-circmgr", + "tor-config", + "tor-dirclient", + "tor-error", + "tor-hscrypto", + "tor-keymgr", + "tor-linkspec", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-proto", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-hscrypto" +version = "0.2.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "data-encoding", + "derive_more", + "digest 0.10.7", + "itertools", + "paste", + "rand 0.8.5", + "rand_core 0.6.4", + "safelog", + "serde", + "signature 1.6.4", + "thiserror", + "tor-basic-utils", + "tor-bytes", + "tor-error", + "tor-llcrypto", + "tor-units", +] + +[[package]] +name = "tor-keymgr" +version = "0.1.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "derive_more", + "fs-mistrust", + "ssh-key", + "thiserror", + "tor-error", + "tor-hscrypto", + "tor-llcrypto", + "zeroize", +] + +[[package]] +name = "tor-linkspec" +version = "0.8.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "base64ct", + "by_address", + "caret", + "cfg-if", + "derive_builder_fork_arti", + "derive_more", + "educe", + "hex", + "itertools", + "safelog", + "serde", + "serde_with", + "strum", + "thiserror", + "tor-basic-utils", + "tor-bytes", + "tor-config", + "tor-llcrypto", + "tor-protover", +] + +[[package]] +name = "tor-llcrypto" +version = "0.5.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "aes", + "base64ct", + "ctr", + "curve25519-dalek 3.2.0", + "derive_more", + "digest 0.10.7", + "ed25519-dalek", + "getrandom 0.2.10", + "hex", + "rand_core 0.5.1", + "rand_core 0.6.4", + "rsa 0.9.2", + "safelog", + "serde", + "sha1", + "sha2 0.10.7", + "sha3", + "signature 1.6.4", + "simple_asn1", + "subtle", + "thiserror", + "x25519-dalek 2.0.0-rc.2", + "zeroize", +] + +[[package]] +name = "tor-netdir" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "bitflags 2.3.2", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "futures", + "hex", + "humantime", + "itertools", + "num_enum 0.6.1", + "rand 0.8.5", + "serde", + "signature 1.6.4", + "static_assertions", + "strum", + "thiserror", + "time 0.3.22", + "tor-basic-utils", + "tor-checkable", + "tor-config", + "tor-error", + "tor-hscrypto", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-protover", + "tor-units", + "tracing", + "typed-index-collections", +] + +[[package]] +name = "tor-netdoc" +version = "0.8.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "amplify", + "base64ct", + "bitflags 2.3.2", + "cipher", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "hex", + "humantime", + "itertools", + "once_cell", + "phf", + "rand 0.8.5", + "serde", + "serde_with", + "signature 1.6.4", + "smallvec", + "subtle", + "thiserror", + "time 0.3.22", + "tinystr", + "tor-basic-utils", + "tor-bytes", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-hscrypto", + "tor-linkspec", + "tor-llcrypto", + "tor-protover", + "tor-units", + "weak-table", + "zeroize", +] + +[[package]] +name = "tor-persist" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "derive_more", + "fs-mistrust", + "fslock", + "sanitize-filename", + "serde", + "serde_json", + "thiserror", + "tor-error", + "tracing", +] + +[[package]] +name = "tor-proto" +version = "0.11.0" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "asynchronous-codec", + "bytes", + "cipher", + "coarsetime", + "derive_builder_fork_arti", + "derive_more", + "digest 0.10.7", + "educe", + "futures", + "generic-array", + "hkdf", + "hmac", + "pin-project", + "rand 0.8.5", + "rand_core 0.6.4", + "safelog", + "subtle", + "thiserror", + "tor-async-utils", + "tor-basic-utils", + "tor-bytes", + "tor-cell", + "tor-cert", + "tor-checkable", + "tor-config", + "tor-error", + "tor-hscrypto", + "tor-linkspec", + "tor-llcrypto", + "tor-protover", + "tor-rtcompat", + "tor-rtmock", + "tor-units", + "tracing", + "typenum", + "visibility", + "zeroize", +] + +[[package]] +name = "tor-protover" +version = "0.5.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "caret", + "thiserror", +] + +[[package]] +name = "tor-rtcompat" +version = "0.9.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-io", + "async-rustls", + "async-std", + "async-trait", + "async_executors", + "educe", + "futures", + "pin-project", + "rustls", + "thiserror", + "x509-signature", +] + +[[package]] +name = "tor-rtmock" +version = "0.8.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "async-trait", + "futures", + "humantime", + "pin-project", + "thiserror", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-socksproto" +version = "0.7.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "caret", + "subtle", + "thiserror", + "tor-bytes", + "tor-error", +] + +[[package]] +name = "tor-units" +version = "0.6.1" +source = "git+https://gitlab.torproject.org/tpo/core/arti?rev=08d1155cb92568176d8b54b85ec5437dff112e01#08d1155cb92568176d8b54b85ec5437dff112e01" +dependencies = [ + "derive_more", + "thiserror", +] + [[package]] name = "tracing" version = "0.1.37" @@ -5119,13 +6813,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] @@ -5139,9 +6833,9 @@ dependencies = [ [[package]] name = "ttf-parser" -version = "0.15.2" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3e06c9b9d80ed6b745c7159c40b311ad2916abb34a49e9be2653b90db0d8dd" +checksum = "375812fa44dab6df41c195cd2f7fecb488f6c09fbaafb62807488cefab642bff" [[package]] name = "tui" @@ -5149,7 +6843,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccdd26cbd674007e649a272da4475fb666d3aa0ad0531da7136db6fab0e5bad1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cassowary", "crossterm", "termion 1.5.6", @@ -5158,23 +6852,10 @@ dependencies = [ ] [[package]] -name = "tungstenite" -version = "0.19.0" +name = "typed-index-collections" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http", - "httparse", - "log", - "rand", - "sha1", - "thiserror", - "url", - "utf-8", -] +checksum = "183496e014253d15abbe6235677b1392dba2d40524c88938991226baa38ac7c4" [[package]] name = "typenum" @@ -5264,12 +6945,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - [[package]] name = "unindent" version = "0.1.11" @@ -5305,10 +6980,14 @@ dependencies = [ ] [[package]] -name = "utf-8" -version = "0.7.6" +name = "users" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032" +dependencies = [ + "libc", + "log", +] [[package]] name = "utf8parse" @@ -5333,12 +7012,12 @@ name = "vanityaddr" version = "0.4.1" dependencies = [ "bs58", - "clap 4.3.3", + "clap 4.3.8", "ctrlc", "darkfi", "darkfi-sdk", "indicatif", - "rand", + "rand 0.8.5", "rayon", ] @@ -5360,6 +7039,23 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "visibility" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8881d5cc0ae34e3db2f1de5af81e5117a420d2f937506c2dc20d6f4cfb069051" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "wait-timeout" version = "0.2.0" @@ -5385,6 +7081,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "wasi" version = "0.10.0+wasi-snapshot-preview1" @@ -5418,7 +7120,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", "wasm-bindgen-shared", ] @@ -5475,7 +7177,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5504,7 +7206,7 @@ dependencies = [ "bytes", "cfg-if", "derivative", - "indexmap", + "indexmap 1.9.3", "js-sys", "more-asserts", "rustc-demangle", @@ -5535,7 +7237,7 @@ dependencies = [ "enumset", "lazy_static", "leb128", - "memmap2", + "memmap2 0.5.10", "more-asserts", "region", "smallvec", @@ -5616,7 +7318,7 @@ dependencies = [ "bytecheck", "enum-iterator", "enumset", - "indexmap", + "indexmap 1.9.3", "more-asserts", "rkyv", "target-lexicon", @@ -5637,7 +7339,7 @@ dependencies = [ "derivative", "enum-iterator", "fnv", - "indexmap", + "indexmap 1.9.3", "lazy_static", "libc", "mach", @@ -5656,7 +7358,7 @@ version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2ea896273ea99b15132414be1da01ab0d8836415083298ecaffbe308eaac87a" dependencies = [ - "indexmap", + "indexmap 1.9.3", "url", ] @@ -5681,6 +7383,12 @@ dependencies = [ "wast", ] +[[package]] +name = "weak-table" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" + [[package]] name = "web-sys" version = "0.3.64" @@ -5691,25 +7399,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] - [[package]] name = "weezl" version = "0.1.7" @@ -5942,9 +7631,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -5973,11 +7662,23 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "rand_core 0.5.1", "zeroize", ] +[[package]] +name = "x25519-dalek" +version = "2.0.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" +dependencies = [ + "curve25519-dalek 4.0.0-rc.2", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "x509-parser" version = "0.15.0" @@ -5996,6 +7697,16 @@ dependencies = [ "time 0.3.22", ] +[[package]] +name = "x509-signature" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb2bc2a902d992cd5f471ee3ab0ffd6603047a4207384562755b9d6de977518" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "xsalsa20poly1305" version = "0.9.1" @@ -6047,14 +7758,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.20", ] [[package]] name = "zkas" version = "0.4.1" dependencies = [ - "clap 4.3.3", + "clap 4.3.8", "darkfi", ] @@ -6062,7 +7773,7 @@ dependencies = [ name = "zktool" version = "0.4.1" dependencies = [ - "clap 4.3.3", + "clap 4.3.8", "darkfi", "darkfi-sdk", ] diff --git a/Cargo.toml b/Cargo.toml index 7857fbfeb..7508548d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,11 +71,15 @@ futures = {version = "0.3.28", optional = true} smol = {version = "1.3.0", optional = true} # Networking -futures-rustls = {version = "0.24.0", features = ["dangerous_configuration"], optional = true} +async-rustls = {version = "0.4.0", features = ["dangerous_configuration"], optional = true} iprange = {version = "0.6.7", optional = true} ipnet = {version = "2.7.2", optional = true} socket2 = {version = "0.5.3", optional = true, features = ["all"]} +# Pluggable Transports +arti-client = {version = "0.9.1", default-features = false, features = ["async-std", "rustls", "onion-service-client"], optional = true} +# TODO: nym ( Read this to figure out impl https://github.com/ChainSafe/rust-libp2p-nym ) + # TLS cert utilities ed25519-compact = {version = "2.0.4", optional = true} rcgen = {version = "0.10.0", optional = true} @@ -87,6 +91,7 @@ bs58 = {version = "0.5.0", optional = true} hex = {version = "0.4.3", optional = true} serde_json = {version = "1.0.96", optional = true} serde = {version = "1.0.164", features = ["derive"], optional = true} +semver = {version = "1.0.17", optional = true} structopt = {version= "0.3.26", optional = true} structopt-toml = {version= "0.5.1", optional = true} toml = {version = "0.7.4", optional = true} @@ -111,12 +116,6 @@ indicatif = {version = "0.17.5", optional = true} simplelog = {version = "0.12.1", optional = true} ripemd = {version = "0.1.3", optional = true} -# Websockets -async-tungstenite = {version = "0.22.2", optional = true} - -# socks5 -fast-socks5 = {version = "0.4.3", optional = true} - # Crypto rand = {version = "0.8.5", optional = true} blake3 = {version = "1.4.0", features = ["rayon"], optional = true} @@ -131,12 +130,15 @@ wasmer-compiler-singlepass = {version = "3.3.0", optional = true} wasmer-middlewares = {version = "3.3.0", optional = true} # Wallet management -sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"], optional = true} +rusqlite = {version = "0.29.0", optional = true} # Blockchain store sled = {version = "0.34.7", optional = true} sled-overlay = {version = "0.0.7", optional = true} +# Temporary version lock +curve25519-dalek = {version = "=4.0.0-rc.2", default-features = false, optional = true} + [dev-dependencies] clap = {version = "4.3.3", features = ["derive"]} halo2_proofs = {version = "0.3.0", features = ["dev-graph", "gadget-traces", "sanity-checks"]} @@ -147,6 +149,10 @@ prettytable-rs = "0.10.0" # -----BEGIN LIBRARY FEATURES----- [features] +p2p-transport-tcp = [] +p2p-transport-tor = ["arti-client"] +p2p-transport-nym = [] + async-runtime = [ "async-std", "async-trait", @@ -156,7 +162,7 @@ async-runtime = [ blockchain = [ "blake3", - "bs58", # <-- remove after we get rid of json for notifications + "bs58", # <-- TODO: remove after we get rid of json for notifications "chrono", "crypto_api_chachapoly", "dashu", @@ -165,7 +171,6 @@ blockchain = [ "rand", "sled", "sled-overlay", - "sqlx", "url", "async-runtime", @@ -204,8 +209,7 @@ event-graph = [ net = [ "ed25519-compact", - "fast-socks5", - "futures-rustls", + "async-rustls", "hex", "iprange", "ipnet", @@ -215,6 +219,7 @@ net = [ "rcgen", "rustls-pemfile", "x509-parser", + "semver", "serde", "serde_json", "socket2", @@ -226,6 +231,10 @@ net = [ "darkfi-serial/url", "system", "util", + + "p2p-transport-tcp", + "p2p-transport-tor", + "p2p-transport-nym", ] raft = [ @@ -281,7 +290,7 @@ util = [ wallet = [ "async-std", "rand", - "sqlx", + "rusqlite", "darkfi-serial", "util", @@ -296,10 +305,6 @@ wasm-runtime = [ "darkfi-sdk", ] -websockets = [ - "async-tungstenite", -] - zk = [ "halo2_proofs", "halo2_gadgets", @@ -333,5 +338,6 @@ path = "example/zk-inclusion-proof.rs" required-features = ["zk"] [patch.crates-io] +arti-client = {git="https://gitlab.torproject.org/tpo/core/arti", rev="08d1155cb92568176d8b54b85ec5437dff112e01"} halo2_proofs = {git="https://github.com/parazyd/halo2", branch="v3"} halo2_gadgets = {git="https://github.com/parazyd/halo2", branch="v3"} diff --git a/Makefile b/Makefile index 39c22fb17..c64735cf3 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ CARGO = cargo #TARGET_PRFX = --target= # Binaries to be built -BINS = drk darkfid ircd dnetview faucetd vanityaddr +BINS = darkfid faucetd drk darkirc dnetview vanityaddr # zkas dependencies ZKASDEPS = \ @@ -36,8 +36,7 @@ BINDEPS = \ all: $(BINS) zkas: $(ZKASDEPS) - $(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) \ - --all-features --release --package $@ + $(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) --all-features --release --package $@ cp -f target/$(RUST_TARGET)/release/$@ $@ $(PROOFS_BIN): zkas $(PROOFS_SRC) @@ -50,8 +49,7 @@ contracts: zkas $(MAKE) -C src/contract/deployooor $(BINS): contracts $(PROOFS_BIN) $(BINDEPS) - $(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) \ - --all-features --release --package $@ + $(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) --all-features --release --package $@ cp -f target/$(RUST_TARGET)/release/$@ $@ check: contracts $(PROOFS_BIN) diff --git a/bin/darkfid/Cargo.toml b/bin/darkfid/Cargo.toml index 59f597c2d..811974242 100644 --- a/bin/darkfid/Cargo.toml +++ b/bin/darkfid/Cargo.toml @@ -14,7 +14,7 @@ async-trait = "0.1.68" blake3 = "1.4.0" bs58 = "0.5.0" ctrlc = { version = "3.4.0", features = ["termination"] } -darkfi = {path = "../../", features = ["blockchain", "wallet", "rpc", "net"]} +darkfi = {path = "../../", features = ["blockchain", "wallet", "rpc", "net", "zkas"]} darkfi-sdk = {path = "../../src/sdk"} darkfi-serial = {path = "../../src/serial"} easy-parallel = "3.3.0" @@ -23,7 +23,6 @@ serde_json = "1.0.96" simplelog = "0.12.1" sled = "0.34.7" smol = "1.3.0" -sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} url = "2.4.0" # Argument parsing diff --git a/bin/darkfid/src/main.rs b/bin/darkfid/src/main.rs index 49007ec51..4a92c7e86 100644 --- a/bin/darkfid/src/main.rs +++ b/bin/darkfid/src/main.rs @@ -49,7 +49,7 @@ use darkfi::{ server::{listen_and_serve, RequestHandler}, }, util::path::expand_path, - wallet::{walletdb::init_wallet, WalletPtr}, + wallet::{WalletDb, WalletPtr}, Error, Result, }; @@ -105,7 +105,7 @@ struct Args { #[structopt(long, default_value = "8")] /// Connection slots for the consensus protocol - consensus_slots: u32, + consensus_slots: usize, #[structopt(long)] /// Connect to peer for the consensus protocol (repeatable flag) @@ -137,7 +137,7 @@ struct Args { #[structopt(long, default_value = "8")] /// Connection slots for the syncing protocol - sync_slots: u32, + sync_slots: usize, #[structopt(long)] /// Connect to peer for the syncing protocol (repeatable flag) @@ -297,7 +297,7 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { .unwrap(); // Initialize or load wallet - let wallet = init_wallet(&args.wallet_path, &args.wallet_pass).await?; + let wallet = WalletDb::new(Some(expand_path(&args.wallet_path)?), &args.wallet_pass).await?; // Initialize or open sled database let db_path = @@ -357,14 +357,13 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { let sync_p2p = { info!("Registering block sync P2P protocols..."); let sync_network_settings = net::Settings { - inbound: args.sync_p2p_accept, + inbound_addrs: args.sync_p2p_accept, outbound_connections: args.sync_slots, - external_addr: args.sync_p2p_external, + external_addrs: args.sync_p2p_external, peers: args.sync_p2p_peer.clone(), seeds: args.sync_p2p_seed.clone(), - outbound_transports: net::settings::get_outbound_transports(args.sync_p2p_transports), + allowed_transports: args.sync_p2p_transports, localnet: args.localnet, - channel_log: args.channel_log, ..Default::default() }; @@ -401,16 +400,13 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { } else { info!("Registering consensus P2P protocols..."); let consensus_network_settings = net::Settings { - inbound: args.consensus_p2p_accept, + inbound_addrs: args.consensus_p2p_accept, outbound_connections: args.consensus_slots, - external_addr: args.consensus_p2p_external, + external_addrs: args.consensus_p2p_external, peers: args.consensus_p2p_peer.clone(), seeds: args.consensus_p2p_seed.clone(), - outbound_transports: net::settings::get_outbound_transports( - args.consensus_p2p_transports, - ), + allowed_transports: args.consensus_p2p_transports, localnet: args.localnet, - channel_log: args.channel_log, ..Default::default() }; let p2p = net::P2p::new(consensus_network_settings).await; @@ -457,8 +453,9 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { }) .detach(); - info!("Waiting for sync P2P outbound connections"); - sync_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?; + // TODO: I think this is not necessary anymore + //info!("Waiting for sync P2P outbound connections"); + //sync_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?; match block_sync_task(sync_p2p.clone().unwrap(), state.clone()).await { Ok(()) => *darkfid.synced.lock().await = true, @@ -478,8 +475,9 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { }) .detach(); - info!("Waiting for consensus P2P outbound connections"); - consensus_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?; + // TODO: I think this is not necessary anymore + //info!("Waiting for consensus P2P outbound connections"); + //consensus_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?; info!("Starting consensus protocol task"); let _ex = ex.clone(); @@ -493,13 +491,11 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { print!("\r"); info!("Caught termination signal, cleaning up and exiting..."); + // TODO: STOP P2P NETS + info!("Flushing sled database..."); let flushed_bytes = sled_db.flush_async().await?; info!("Flushed {} bytes", flushed_bytes); - info!("Closing wallet connection..."); - wallet.conn.close().await; - info!("Closed wallet connection"); - Ok(()) } diff --git a/bin/darkfid/src/rpc_tx.rs b/bin/darkfid/src/rpc_tx.rs index 9346da851..bbcc824e1 100644 --- a/bin/darkfid/src/rpc_tx.rs +++ b/bin/darkfid/src/rpc_tx.rs @@ -143,8 +143,9 @@ impl Darkfid { } if let Some(sync_p2p) = &self.sync_p2p { - if let Err(e) = sync_p2p.broadcast(tx.clone()).await { - error!("[RPC] tx.broadcast: Failed broadcasting transaction: {}", e); + sync_p2p.broadcast(&tx).await; + if sync_p2p.channels().lock().await.is_empty() { + error!("[RPC] tx.broadcast: Failed broadcasting tx, no connected channels"); return server_error(RpcError::TxBroadcastFail, id, None) } } else { diff --git a/bin/darkfid/src/rpc_wallet.rs b/bin/darkfid/src/rpc_wallet.rs index 2d5c34abc..294927867 100644 --- a/bin/darkfid/src/rpc_wallet.rs +++ b/bin/darkfid/src/rpc_wallet.rs @@ -18,7 +18,6 @@ use log::{debug, error}; use serde_json::{json, Value}; -use sqlx::Row; use darkfi::{ rpc::jsonrpc::{ @@ -51,6 +50,8 @@ impl Darkfid { // --> {"jsonrpc": "2.0", "method": "wallet.query_row_single", "params": [...], "id": 1} // <-- {"jsonrpc": "2.0", "result": ["va", "lu", "es", ...], "id": 1} pub async fn wallet_query_row_single(&self, id: Value, params: &[Value]) -> JsonResult { + todo!(); + /* TODO: This will be abstracted away // We need at least 3 params for something we want to fetch, and we want them in pairs. // Also the first param should be a String if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() { @@ -189,6 +190,7 @@ impl Darkfid { } JsonResponse::new(json!(ret), id).into() + */ } // RPCAPI: @@ -201,6 +203,8 @@ impl Darkfid { // --> {"jsonrpc": "2.0", "method": "wallet.query_row_multi", "params": [...], "id": 1} // <-- {"jsonrpc": "2.0", "result": [["va", "lu"], ["es", "es"], ...], "id": 1} pub async fn wallet_query_row_multi(&self, id: Value, params: &[Value]) -> JsonResult { + todo!(); + /* TODO: This will be abstracted away // We need at least 3 params for something we want to fetch, and we want them in pairs. // Also the first param (the query) should be a String. if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() { @@ -313,6 +317,7 @@ impl Darkfid { } JsonResponse::new(json!(ret), id).into() + */ } // RPCAPI: @@ -322,6 +327,8 @@ impl Darkfid { // --> {"jsonrpc": "2.0", "method": "wallet.exec_sql", "params": ["CREATE TABLE ..."], "id": 1} // <-- {"jsonrpc": "2.0", "result": true, "id": 1} pub async fn wallet_exec_sql(&self, id: Value, params: &[Value]) -> JsonResult { + todo!(); + /* TODO: This will be abstracted away if params.is_empty() || !params[0].is_string() { return JsonError::new(InvalidParams, None, id).into() } @@ -423,5 +430,6 @@ impl Darkfid { }; JsonResponse::new(json!(true), id).into() + */ } } diff --git a/bin/darkirc/Cargo.toml b/bin/darkirc/Cargo.toml index 2fe88af8a..3bf9770a2 100644 --- a/bin/darkirc/Cargo.toml +++ b/bin/darkirc/Cargo.toml @@ -15,7 +15,7 @@ darkfi-serial = {path = "../../src/serial"} # Async smol = "1.3.0" futures = "0.3.28" -futures-rustls = "0.24.0" +async-rustls = "0.4.0" rustls-pemfile = "1.0.2" async-std = "1.12.0" async-trait = "0.1.68" diff --git a/bin/darkirc/darkirc_config.toml b/bin/darkirc/darkirc_config.toml index ce3314298..2c8b05aca 100644 --- a/bin/darkirc/darkirc_config.toml +++ b/bin/darkirc/darkirc_config.toml @@ -40,10 +40,10 @@ outbound_connections=5 #peers = ["tls://127.0.0.1:26661"] ## Seed nodes to connect to -seeds = ["tls://lilith0.dark.fi:26661", "tls://lilith1.dark.fi:26661"] +seeds = ["tcp+tls://lilith0.dark.fi:26661", "tcp+tls://lilith1.dark.fi:26661"] # Prefered transports for outbound connections -outbound_transports = ["tls"] +outbound_transports = ["tcp", "tcp+tls"] ## Only used for debugging. Compromises privacy when set. #node_id = "foo" diff --git a/bin/darkirc/src/irc/server.rs b/bin/darkirc/src/irc/server.rs index 72cd7b790..f75a60a15 100644 --- a/bin/darkirc/src/irc/server.rs +++ b/bin/darkirc/src/irc/server.rs @@ -18,12 +18,12 @@ use std::{fs::File, net::SocketAddr}; +use async_rustls::{rustls, TlsAcceptor}; use async_std::{ net::TcpListener, sync::{Arc, Mutex}, }; use futures::{io::BufReader, AsyncRead, AsyncReadExt, AsyncWrite}; -use futures_rustls::{rustls, TlsAcceptor}; use log::{error, info}; use darkfi::{ @@ -177,7 +177,7 @@ impl IrcServer { continue } - p2p.broadcast(event).await?; + p2p.broadcast(&event).await; } NotifierMsg::UpdateConfig => { diff --git a/bin/darkirc/src/main.rs b/bin/darkirc/src/main.rs index 6f3acb306..531daa2cc 100644 --- a/bin/darkirc/src/main.rs +++ b/bin/darkirc/src/main.rs @@ -106,8 +106,7 @@ async fn realmain(settings: Args, executor: Arc>) -> Result<( let seen_inv = Seen::new(); // Check the version - let mut net_settings = settings.net.clone(); - net_settings.app_version = Some(option_env!("CARGO_PKG_VERSION").unwrap_or("").to_string()); + let net_settings = settings.net.clone(); // New p2p let p2p = net::P2p::new(net_settings.into()).await; diff --git a/bin/dnetview/src/main.rs b/bin/dnetview/src/main.rs index 6ff767e85..6a0c1e618 100644 --- a/bin/dnetview/src/main.rs +++ b/bin/dnetview/src/main.rs @@ -130,8 +130,8 @@ async fn main() -> DnetViewResult<()> { //debug!(target: "dnetview", "main() START"); let args = Args::parse(); - let log_level = get_log_level(args.verbose.into()); - let log_config = get_log_config(); + let log_level = get_log_level(args.verbose); + let log_config = get_log_config(args.verbose); let log_file_path = expand_path(&args.log_path)?; if let Some(parent) = log_file_path.parent() { diff --git a/bin/drk/Cargo.toml b/bin/drk/Cargo.toml index 82fa8a6c6..091bf15e5 100644 --- a/bin/drk/Cargo.toml +++ b/bin/drk/Cargo.toml @@ -27,6 +27,5 @@ smol = "1.3.0" simplelog = "0.12.1" signal-hook-async-std = "0.2.2" signal-hook = "0.3.15" -sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} url = "2.4.0" rodio = {version = "0.17.1", default-features = false, features = ["minimp3"]} diff --git a/bin/drk/src/main.rs b/bin/drk/src/main.rs index 399ea81b4..4c47914b4 100644 --- a/bin/drk/src/main.rs +++ b/bin/drk/src/main.rs @@ -484,8 +484,8 @@ async fn main() -> Result<()> { let args = Args::parse(); if args.verbose > 0 { - let log_level = get_log_level(args.verbose.into()); - let log_config = get_log_config(); + let log_level = get_log_level(args.verbose); + let log_config = get_log_config(args.verbose); TermLogger::init(log_level, log_config, TerminalMode::Mixed, ColorChoice::Auto)?; } diff --git a/bin/faucetd/src/main.rs b/bin/faucetd/src/main.rs index c7ea27170..c44268634 100644 --- a/bin/faucetd/src/main.rs +++ b/bin/faucetd/src/main.rs @@ -133,7 +133,7 @@ struct Args { #[structopt(long, default_value = "8")] /// Connection slots for the syncing protocol - sync_slots: u32, + sync_slots: usize, #[structopt(long)] /// Connect to seed for the syncing protocol (repeatable flag) @@ -151,10 +151,6 @@ struct Args { /// Enable localnet hosts localnet: bool, - #[structopt(long)] - /// Enable channel log - channel_log: bool, - #[structopt(long)] /// Whitelisted cashier address (repeatable flag) cashier_pub: Vec, @@ -719,14 +715,13 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { // P2P network. The faucet doesn't participate in consensus, so we only // build the sync protocol. let network_settings = net::Settings { - inbound: args.sync_p2p_accept, + inbound_addrs: args.sync_p2p_accept, outbound_connections: args.sync_slots, - external_addr: args.sync_p2p_external, + external_addrs: args.sync_p2p_external, peers: args.sync_p2p_peer.clone(), seeds: args.sync_p2p_seed.clone(), - outbound_transports: net::settings::get_outbound_transports(args.sync_p2p_transports), + allowed_transports: args.sync_p2p_transports, localnet: args.localnet, - channel_log: args.channel_log, ..Default::default() }; @@ -788,8 +783,9 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { }) .detach(); - info!("Waiting for sync P2P outbound connections"); - sync_p2p.clone().wait_for_outbound(ex).await?; + // TODO: I think this is not needed anymore + //info!("Waiting for sync P2P outbound connections"); + //sync_p2p.clone().wait_for_outbound(ex).await?; match block_sync_task(sync_p2p, state.clone()).await { Ok(()) => *faucetd.synced.lock().await = true, diff --git a/bin/lilith/src/main.rs b/bin/lilith/src/main.rs index f0600e849..99de7c31f 100644 --- a/bin/lilith/src/main.rs +++ b/bin/lilith/src/main.rs @@ -441,6 +441,10 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> { // Spawn configured networks let mut networks = vec![]; for (name, info) in &configured_nets { + // TODO: Here we could actually differentiate between network versions + // e.g. p2p_v3, p2p_v4, etc. Therefore we can spawn multiple networks + // and they would all be version-checked, so we avoid mismatches when + // seeding peers. match spawn_net( name.to_string(), info, diff --git a/src/consensus/block.rs b/src/consensus/block.rs index e34d77488..2d0561acf 100644 --- a/src/consensus/block.rs +++ b/src/consensus/block.rs @@ -28,7 +28,7 @@ use super::{ constants::{BLOCK_MAGIC_BYTES, BLOCK_VERSION}, LeadInfo, }; -use crate::{net, tx::Transaction, util::time::Timestamp}; +use crate::{impl_p2p_message, net::Message, tx::Transaction, util::time::Timestamp}; /// This struct represents a tuple of the form (version, previous, epoch, slot, timestamp, merkle_root). #[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)] @@ -100,11 +100,7 @@ pub struct Block { pub lead_info: LeadInfo, } -impl net::Message for Block { - fn name() -> &'static str { - "block" - } -} +impl_p2p_message!(Block, "block"); impl Block { pub fn new( @@ -146,11 +142,7 @@ pub struct BlockOrder { pub block: blake3::Hash, } -impl net::Message for BlockOrder { - fn name() -> &'static str { - "blockorder" - } -} +impl_p2p_message!(BlockOrder, "blockorder"); /// Structure representing full block data. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -172,11 +164,7 @@ impl Default for BlockInfo { } } -impl net::Message for BlockInfo { - fn name() -> &'static str { - "blockinfo" - } -} +impl_p2p_message!(BlockInfo, "blockinfo"); impl BlockInfo { pub fn new(header: Header, txs: Vec, lead_info: LeadInfo) -> Self { @@ -210,11 +198,7 @@ pub struct BlockResponse { pub blocks: Vec, } -impl net::Message for BlockResponse { - fn name() -> &'static str { - "blockresponse" - } -} +impl_p2p_message!(BlockResponse, "blockresponse"); /// This struct represents a block proposal, used for consensus. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -260,11 +244,7 @@ impl fmt::Display for BlockProposal { } } -impl net::Message for BlockProposal { - fn name() -> &'static str { - "proposal" - } -} +impl_p2p_message!(BlockProposal, "proposal"); impl From for BlockInfo { fn from(block: BlockProposal) -> BlockInfo { diff --git a/src/consensus/proto/protocol_proposal.rs b/src/consensus/proto/protocol_proposal.rs index db7105dbd..bb51d3407 100644 --- a/src/consensus/proto/protocol_proposal.rs +++ b/src/consensus/proto/protocol_proposal.rs @@ -46,25 +46,22 @@ impl ProtocolProposal { p2p: P2pPtr, ) -> Result { debug!(target: "consensus::protocol_proposal::init()", "Adding ProtocolProposal to the protocol registry"); - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; let proposal_sub = channel.subscribe_msg::().await?; - let channel_address = channel.address(); - Ok(Arc::new(Self { proposal_sub, - jobsman: ProtocolJobsManager::new("ProposalProtocol", channel), + jobsman: ProtocolJobsManager::new("ProposalProtocol", channel.clone()), state, p2p, - channel_address, + channel_address: channel.address().clone(), })) } async fn handle_receive_proposal(self: Arc) -> Result<()> { debug!(target: "consensus::protocol_proposal::handle_receive_proposal()", "START"); - let exclude_list = vec![self.channel_address.clone()]; loop { let proposal = match self.proposal_sub.receive().await { @@ -104,15 +101,7 @@ impl ProtocolProposal { Ok(broadcast) => { if broadcast { // Broadcast proposal to rest of nodes - if let Err(e) = - self.p2p.broadcast_with_exclude(proposal_copy, &exclude_list).await - { - error!( - target: "consensus::protocol_proposal::handle_receive_proposal()", - "proposal broadcast fail: {}", - e - ); - }; + self.p2p.broadcast_with_exclude(&proposal_copy, &exclude_list).await; } } Err(e) => { diff --git a/src/consensus/proto/protocol_sync.rs b/src/consensus/proto/protocol_sync.rs index b1293a9c7..742d5bc46 100644 --- a/src/consensus/proto/protocol_sync.rs +++ b/src/consensus/proto/protocol_sync.rs @@ -58,7 +58,7 @@ impl ProtocolSync { p2p: P2pPtr, consensus_mode: bool, ) -> Result { - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; msg_subsystem.add_dispatch::().await; msg_subsystem.add_dispatch::().await; @@ -129,7 +129,7 @@ impl ProtocolSync { let blocks = vec![BlockInfo::default()]; let response = BlockResponse { blocks }; - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!( target: "consensus::protocol_sync::handle_receive_request()", "channel send fail: {}", @@ -203,15 +203,7 @@ impl ProtocolSync { target: "consensus::protocol_sync::handle_receive_block()", "block processed successfully, broadcasting..." ); - if let Err(e) = - self.p2p.broadcast_with_exclude(info_copy, &exclude_list).await - { - error!( - target: "consensus::protocol_sync::handle_receive_block()", - "p2p broadcast fail: {}", - e - ); - }; + self.p2p.broadcast_with_exclude(&info_copy, &exclude_list).await; } } Err(e) => { @@ -270,7 +262,7 @@ impl ProtocolSync { ); let response = SlotResponse { slots }; - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!( target: "consensus::protocol_sync::handle_receive_slot_request()", "channel send fail: {}", @@ -285,7 +277,7 @@ impl ProtocolSync { target: "consensus::protocol_sync::handle_receive_slot()", "START" ); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let slot = match self.slots_sub.receive().await { Ok(v) => v, @@ -346,15 +338,7 @@ impl ProtocolSync { target: "consensus::protocol_sync::handle_receive_slot()", "slot processed successfully, broadcasting..." ); - if let Err(e) = - self.p2p.broadcast_with_exclude(slot_copy, &exclude_list).await - { - error!( - target: "consensus::protocol_sync::handle_receive_slot()", - "p2p broadcast fail: {}", - e - ); - }; + self.p2p.broadcast_with_exclude(&slot_copy, &exclude_list).await; } } Err(e) => { diff --git a/src/consensus/proto/protocol_sync_consensus.rs b/src/consensus/proto/protocol_sync_consensus.rs index ebfbe2244..9059e29f4 100644 --- a/src/consensus/proto/protocol_sync_consensus.rs +++ b/src/consensus/proto/protocol_sync_consensus.rs @@ -47,7 +47,7 @@ impl ProtocolSyncConsensus { state: ValidatorStatePtr, _p2p: P2pPtr, ) -> Result { - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; msg_subsystem.add_dispatch::().await; @@ -128,7 +128,7 @@ impl ProtocolSyncConsensus { err_history, nullifiers, }; - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!( target: "consensus::protocol_sync_consensus::handle_receive_request()", "channel send fail: {}", @@ -168,7 +168,7 @@ impl ProtocolSyncConsensus { let proposing = lock.consensus.proposing; let is_empty = lock.consensus.slots_is_empty(); let response = ConsensusSyncResponse { bootstrap_slot, proposing, is_empty }; - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!( target: "consensus::protocol_sync_consensus::handle_receive_sync_request()", "channel send fail: {}", diff --git a/src/consensus/proto/protocol_tx.rs b/src/consensus/proto/protocol_tx.rs index 4821a6480..b1d8d1947 100644 --- a/src/consensus/proto/protocol_tx.rs +++ b/src/consensus/proto/protocol_tx.rs @@ -18,15 +18,15 @@ use async_std::sync::Arc; use async_trait::async_trait; -use log::{debug, error}; +use log::debug; use smol::Executor; use url::Url; use crate::{ consensus::ValidatorStatePtr, - net, + impl_p2p_message, net::{ - ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr, + ChannelPtr, Message, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr, ProtocolJobsManager, ProtocolJobsManagerPtr, }, tx::Transaction, @@ -41,11 +41,7 @@ pub struct ProtocolTx { channel_address: Url, } -impl net::Message for Transaction { - fn name() -> &'static str { - "tx" - } -} +impl_p2p_message!(Transaction, "tx"); impl ProtocolTx { pub async fn init( @@ -57,18 +53,17 @@ impl ProtocolTx { target: "consensus::protocol_tx::init()", "Adding ProtocolTx to the protocol registry" ); - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; let tx_sub = channel.subscribe_msg::().await?; - let channel_address = channel.address(); Ok(Arc::new(Self { tx_sub, - jobsman: ProtocolJobsManager::new("TxProtocol", channel), + jobsman: ProtocolJobsManager::new("TxProtocol", channel.clone()), state, p2p, - channel_address, + channel_address: channel.address().clone(), })) } @@ -104,13 +99,7 @@ impl ProtocolTx { // Nodes use unconfirmed_txs vector as seen_txs pool. if self.state.write().await.append_tx(tx_copy.clone()).await { - if let Err(e) = self.p2p.broadcast_with_exclude(tx_copy, &exclude_list).await { - error!( - target: "consensus::protocol_tx::handle_receive_tx()", - "p2p broadcast fail: {}", - e - ); - }; + self.p2p.broadcast_with_exclude(&tx_copy, &exclude_list).await; } } } diff --git a/src/consensus/state.rs b/src/consensus/state.rs index c87ed0649..4a056cc5e 100644 --- a/src/consensus/state.rs +++ b/src/consensus/state.rs @@ -33,7 +33,8 @@ use super::{ }; use crate::{ blockchain::Blockchain, - net, + impl_p2p_message, + net::Message, tx::Transaction, util::time::{TimeKeeper, Timestamp}, wallet::WalletPtr, @@ -631,12 +632,7 @@ impl ConsensusState { /// Auxiliary structure used for consensus syncing. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] pub struct ConsensusRequest {} - -impl net::Message for ConsensusRequest { - fn name() -> &'static str { - "consensusrequest" - } -} +impl_p2p_message!(ConsensusRequest, "consensusrequest"); /// Auxiliary structure used for consensus syncing. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -661,21 +657,13 @@ pub struct ConsensusResponse { pub nullifiers: Vec, } -impl net::Message for ConsensusResponse { - fn name() -> &'static str { - "consensusresponse" - } -} +impl_p2p_message!(ConsensusResponse, "consensusresponse"); /// Auxiliary structure used for consensus syncing. #[derive(Debug, SerialEncodable, SerialDecodable)] pub struct ConsensusSyncRequest {} -impl net::Message for ConsensusSyncRequest { - fn name() -> &'static str { - "consensussyncrequest" - } -} +impl_p2p_message!(ConsensusSyncRequest, "consensussyncrequest"); /// Auxiliary structure used for consensus syncing. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -688,17 +676,55 @@ pub struct ConsensusSyncResponse { pub is_empty: bool, } -impl net::Message for ConsensusSyncResponse { - fn name() -> &'static str { - "consensussyncresponse" +impl_p2p_message!(ConsensusSyncResponse, "consensussyncresponse"); +impl_p2p_message!(Slot, "slot"); + +/// Auxiliary structure used to keep track of slot validation parameters. +#[derive(Debug, Clone, SerialEncodable, SerialDecodable)] +pub struct SlotCheckpoint { + /// Slot UID + pub slot: u64, + /// Previous slot eta + pub previous_eta: pallas::Base, + /// Previous slot forks last proposal/block hashes, + /// as observed by the validator + pub fork_hashes: Vec, + /// Previous slot second to last proposal/block hashes, + /// as observed by the validator + pub fork_previous_hashes: Vec, + /// Slot sigma1 + pub sigma1: pallas::Base, + /// Slot sigma2 + pub sigma2: pallas::Base, +} + +impl SlotCheckpoint { + pub fn new( + slot: u64, + previous_eta: pallas::Base, + fork_hashes: Vec, + fork_previous_hashes: Vec, + sigma1: pallas::Base, + sigma2: pallas::Base, + ) -> Self { + Self { slot, previous_eta, fork_hashes, fork_previous_hashes, sigma1, sigma2 } + } + + /// Generate the genesis slot checkpoint. + pub fn genesis_slot_checkpoint(genesis_block: blake3::Hash) -> Self { + let previous_eta = pallas::Base::ZERO; + let fork_hashes = vec![]; + // Since genesis block has no previous, + // we will use its own hash as its previous. + let fork_previous_hashes = vec![genesis_block]; + let sigma1 = pallas::Base::ZERO; + let sigma2 = pallas::Base::ZERO; + + Self::new(0, previous_eta, fork_hashes, fork_previous_hashes, sigma1, sigma2) } } -impl net::Message for Slot { - fn name() -> &'static str { - "slot" - } -} +impl_p2p_message!(SlotCheckpoint, "slotcheckpoint"); /// Auxiliary structure used for slots syncing #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -707,11 +733,7 @@ pub struct SlotRequest { pub slot: u64, } -impl net::Message for SlotRequest { - fn name() -> &'static str { - "slotrequest" - } -} +impl_p2p_message!(SlotRequest, "slotrequest"); /// Auxiliary structure used for slots syncing #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -720,11 +742,7 @@ pub struct SlotResponse { pub slots: Vec, } -impl net::Message for SlotResponse { - fn name() -> &'static str { - "slotresponse" - } -} +impl_p2p_message!(SlotResponse, "slotresponse"); /// Auxiliary structure used to keep track of consensus state checkpoints. #[derive(Debug, Clone)] diff --git a/src/consensus/task/block_sync.rs b/src/consensus/task/block_sync.rs index 0d527a280..ad5ce7d25 100644 --- a/src/consensus/task/block_sync.rs +++ b/src/consensus/task/block_sync.rs @@ -30,9 +30,9 @@ use log::{debug, info, warn}; pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Result<()> { info!(target: "consensus::block_sync", "Starting blockchain sync..."); // Getting a random connected channel to ask from peers - match p2p.clone().random_channel().await { + match p2p.random_channel().await { Some(channel) => { - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); // Communication setup for slots msg_subsystem.add_dispatch::().await; @@ -54,7 +54,7 @@ pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Resu loop { // Node creates a `SlotRequest` and sends it let request = SlotRequest { slot: last.id }; - channel.send(request).await?; + channel.send(&request).await?; // Node stores response data. let resp = slot_response_sub.receive().await?; @@ -88,7 +88,7 @@ pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Resu loop { // Node creates a `BlockOrder` and sends it let order = BlockOrder { slot: last.0, block: last.1 }; - channel.send(order).await?; + channel.send(&order).await?; // Node stores response data. let _resp = block_response_sub.receive().await?; diff --git a/src/consensus/task/consensus_sync.rs b/src/consensus/task/consensus_sync.rs index 2984f0743..4ee3af543 100644 --- a/src/consensus/task/consensus_sync.rs +++ b/src/consensus/task/consensus_sync.rs @@ -52,12 +52,13 @@ pub async fn consensus_sync_task(p2p: P2pPtr, state: ValidatorStatePtr) -> Resul let mut peer = None; for channel in values { // Communication setup - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; let response_sub = channel.subscribe_msg::().await?; // Node creates a `ConsensusSyncRequest` and sends it let request = ConsensusSyncRequest {}; - channel.send(request).await?; + channel.send(&request).await?; + // Node checks response let response = response_sub.receive().await?; if response.bootstrap_slot == current_slot { @@ -103,11 +104,11 @@ pub async fn consensus_sync_task(p2p: P2pPtr, state: ValidatorStatePtr) -> Resul // This ensures that the received state always consists of 1 fork with one proposal. info!(target: "consensus::consensus_sync", "Finalization signal received, requesting consensus state..."); // Communication setup - let msg_subsystem = peer.get_message_subsystem(); + let msg_subsystem = peer.message_subsystem(); msg_subsystem.add_dispatch::().await; let response_sub = peer.subscribe_msg::().await?; // Node creates a `ConsensusRequest` and sends it - peer.send(ConsensusRequest {}).await?; + peer.send(&ConsensusRequest {}).await?; // Node verifies response came from a participating node. // Extra validations can be added here. @@ -117,7 +118,7 @@ pub async fn consensus_sync_task(p2p: P2pPtr, state: ValidatorStatePtr) -> Resul if !response.forks.is_empty() { warn!(target: "consensus::consensus_sync", "Peer has not finished finalization, retrying..."); sleep(1).await; - peer.send(ConsensusRequest {}).await?; + peer.send(&ConsensusRequest {}).await?; response = response_sub.receive().await?; continue } diff --git a/src/consensus/task/proposal.rs b/src/consensus/task/proposal.rs index 388468427..111387a79 100644 --- a/src/consensus/task/proposal.rs +++ b/src/consensus/task/proposal.rs @@ -258,14 +258,7 @@ async fn propose_period(consensus_p2p: P2pPtr, state: ValidatorStatePtr) -> bool // will always be true, since the node is able to produce proposals info!(target: "consensus::proposal", "consensus: Block proposal saved successfully"); // Broadcast proposal to other consensus nodes - match consensus_p2p.broadcast(proposal).await { - Ok(()) => { - info!(target: "consensus::proposal", "consensus: Proposal broadcasted successfully") - } - Err(e) => { - error!(target: "consensus::proposal", "consensus: Failed broadcasting proposal: {}", e) - } - } + consensus_p2p.broadcast(&proposal).await; } Err(e) => { error!(target: "consensus::proposal", "consensus: Block proposal save failed: {}", e); @@ -311,21 +304,15 @@ async fn finalization_period( // Broadcast finalized blocks info, if any: info!(target: "consensus::proposal", "consensus: Broadcasting finalized blocks"); for info in to_broadcast_block { - match sync_p2p.broadcast(info).await { - Ok(()) => info!(target: "consensus::proposal", "consensus: Broadcasted block"), - Err(e) => error!(target: "consensus::proposal", "consensus: Failed broadcasting block: {}", e), - } + sync_p2p.broadcast(&info).await; } // Broadcast finalized slots, if any: info!(target: "consensus::proposal", "consensus: Broadcasting finalized slots"); for slot in to_broadcast_slots { - match sync_p2p.broadcast(slot).await { - Ok(()) => info!(target: "consensus::proposal", "consensus: Broadcasted slot"), - Err(e) => { - error!(target: "consensus::proposal", "consensus: Failed broadcasting slot: {}", e) - } - } + sync_p2p.broadcast(slot).await; + info!(target: "consensus::proposal", "consensus: Broadcasted slot"); + // TODO: You can give an error if you query P2P and check if there are any connected channels } }) .detach(); @@ -338,7 +325,6 @@ async fn finalization_period( } } */ - // Verify node didn't skip next slot completed_slot != state.read().await.consensus.time_keeper.current_slot() } diff --git a/src/contract/dao/Cargo.toml b/src/contract/dao/Cargo.toml index 8de1d062c..583525007 100644 --- a/src/contract/dao/Cargo.toml +++ b/src/contract/dao/Cargo.toml @@ -30,7 +30,7 @@ darkfi = {path = "../../../", features = ["tx", "blockchain"]} darkfi-money-contract = { path = "../money", features = ["client", "no-entrypoint"] } simplelog = "0.12.1" sled = "0.34.7" -sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} +#sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} # We need to disable random using "custom" which makes the crate a noop # so the wasm32-unknown-unknown target is enabled. diff --git a/src/contract/money/Cargo.toml b/src/contract/money/Cargo.toml index 08d4fa755..040b9416e 100644 --- a/src/contract/money/Cargo.toml +++ b/src/contract/money/Cargo.toml @@ -28,7 +28,7 @@ async-std = {version = "1.12.0", features = ["attributes"]} darkfi = {path = "../../../", features = ["tx", "blockchain"]} simplelog = "0.12.1" sled = "0.34.7" -sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} +#sqlx = {version = "0.6.3", features = ["runtime-async-std-rustls", "sqlite"]} darkfi-contract-test-harness = {path = "../test-harness"} # We need to disable random using "custom" which makes the crate a noop diff --git a/src/dht/messages.rs b/src/dht/messages.rs index 2c9231f67..f7db2559b 100644 --- a/src/dht/messages.rs +++ b/src/dht/messages.rs @@ -21,7 +21,7 @@ use std::collections::{HashMap, HashSet}; use darkfi_serial::{serialize, SerialDecodable, SerialEncodable}; use rand::Rng; -use crate::net; +use crate::{impl_p2p_message, net::Message}; /// This struct represents a DHT key request #[derive(Debug, Clone, SerialDecodable, SerialEncodable)] @@ -45,12 +45,7 @@ impl KeyRequest { Self { id, from, to, key } } } - -impl net::Message for KeyRequest { - fn name() -> &'static str { - "keyrequest" - } -} +impl_p2p_message!(KeyRequest, "keyrequest"); /// This struct represents a DHT key request response #[derive(Debug, Clone, SerialDecodable, SerialEncodable)] @@ -76,12 +71,7 @@ impl KeyResponse { Self { id, from, to, key, value } } } - -impl net::Message for KeyResponse { - fn name() -> &'static str { - "keyresponse" - } -} +impl_p2p_message!(KeyResponse, "keyresponse"); /// This struct represents a lookup map request #[derive(Debug, Clone, SerialDecodable, SerialEncodable)] @@ -105,12 +95,7 @@ impl LookupRequest { Self { id, daemon, key, req_type } } } - -impl net::Message for LookupRequest { - fn name() -> &'static str { - "lookuprequest" - } -} +impl_p2p_message!(LookupRequest, "lookuprequest"); /// Auxiliary structure used for lookup map syncing. #[derive(Debug, SerialEncodable, SerialDecodable)] @@ -130,12 +115,7 @@ impl LookupMapRequest { Self { id, daemon } } } - -impl net::Message for LookupMapRequest { - fn name() -> &'static str { - "lookupmaprequest" - } -} +impl_p2p_message!(LookupMapRequest, "lookupmaprequest"); /// Auxiliary structure used for consensus syncing. #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] @@ -155,9 +135,4 @@ impl LookupMapResponse { Self { id, lookup } } } - -impl net::Message for LookupMapResponse { - fn name() -> &'static str { - "lookupmapresponse" - } -} +impl_p2p_message!(LookupMapResponse, "lookupmapresponse"); diff --git a/src/dht/mod.rs b/src/dht/mod.rs index d51ec2549..38dc7fc1d 100644 --- a/src/dht/mod.rs +++ b/src/dht/mod.rs @@ -130,10 +130,7 @@ impl Dht { }; let request = LookupRequest::new(self.id, key, 0); - if let Err(e) = self.p2p.broadcast(request).await { - error!(target: "dht", "Failed broadcasting request: {}", e); - return Err(e) - } + self.p2p.broadcast(&request).await; Ok(Some(key)) } @@ -145,7 +142,7 @@ impl Dht { Some(_) => { debug!(target: "dht", "Key removed: {}", key); let request = LookupRequest::new(self.id, key, 1); - if let Err(e) = self.p2p.broadcast(request).await { + if let Err(e) = self.p2p.broadcast(&request).await { error!(target: "dht", "Failed broadcasting request: {}", e); return Err(e) } @@ -247,13 +244,13 @@ impl Dht { // Node iterates the channel peers to ask for their lookup map for channel in values { // Communication setup - let msg_subsystem = channel.get_message_subsystem(); + let msg_subsystem = channel.message_subsystem(); msg_subsystem.add_dispatch::().await; let response_sub = channel.subscribe_msg::().await?; // Node creates a `LookupMapRequest` and sends it let order = LookupMapRequest::new(self.id); - channel.send(order).await?; + channel.send(&order).await?; // Node stores response data. let resp = response_sub.receive().await?; @@ -285,11 +282,7 @@ impl Dht { pub async fn waiting_for_response(dht: DhtPtr) -> Result> { let (p2p_recv_channel, stop_signal, timeout) = { let _dht = dht.read().await; - ( - _dht.p2p_recv_channel.clone(), - _dht.stop_signal.clone(), - _dht.p2p.settings().connect_timeout_seconds as u64, - ) + (_dht.p2p_recv_channel.clone(), _dht.stop_signal.clone(), 666) }; let ex = Arc::new(Executor::new()); let (timeout_s, timeout_r) = smol::channel::unbounded::<()>(); diff --git a/src/dht/protocol.rs b/src/dht/protocol.rs index 709815793..f66baa0e0 100644 --- a/src/dht/protocol.rs +++ b/src/dht/protocol.rs @@ -81,7 +81,7 @@ impl Protocol { async fn handle_receive_request(self: Arc) -> Result<()> { debug!(target: "dht::protocol", "Protocol::handle_receive_request() [START]"); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let req = match self.req_sub.receive().await { Ok(v) => v, @@ -109,12 +109,7 @@ impl Protocol { let daemon = self.dht.read().await.id; if daemon != req_copy.to { - if let Err(e) = - self.p2p.broadcast_with_exclude(req_copy.clone(), &exclude_list).await - { - error!(target: "dht::protocol", "Protocol::handle_receive_response(): p2p broadcast fail: {}", e); - }; - continue + self.p2p.broadcast_with_exclude(&req_copy, &exclude_list).await; } match self.dht.read().await.map.get(&req_copy.key) { @@ -122,7 +117,7 @@ impl Protocol { let response = KeyResponse::new(daemon, req_copy.from, req_copy.key, value.clone()); debug!(target: "dht::protocol", "Protocol::handle_receive_request(): sending response: {:?}", response); - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!(target: "dht::protocol", "Protocol::handle_receive_request(): p2p broadcast of response failed: {}", e); }; } @@ -135,7 +130,7 @@ impl Protocol { async fn handle_receive_response(self: Arc) -> Result<()> { debug!(target: "dht::protocol", "Protocol::handle_receive_response() [START]"); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let resp = match self.resp_sub.receive().await { Ok(v) => v, @@ -162,12 +157,7 @@ impl Protocol { } if self.dht.read().await.id != resp_copy.to { - if let Err(e) = - self.p2p.broadcast_with_exclude(resp_copy.clone(), &exclude_list).await - { - error!(target: "dht::protocol", "Protocol::handle_receive_response(): p2p broadcast fail: {}", e); - }; - continue + self.p2p.broadcast_with_exclude(&resp_copy, &exclude_list).await; } self.notify_queue_sender.send(resp_copy.clone()).await?; @@ -176,7 +166,7 @@ impl Protocol { async fn handle_receive_lookup_request(self: Arc) -> Result<()> { debug!(target: "dht::protocol", "Protocol::handle_receive_lookup_request() [START]"); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let req = match self.lookup_sub.receive().await { Ok(v) => v, @@ -217,9 +207,7 @@ impl Protocol { continue }; - if let Err(e) = self.p2p.broadcast_with_exclude(req_copy, &exclude_list).await { - error!(target: "dht::protocol", "Protocol::handle_receive_lookup_request(): p2p broadcast fail: {}", e); - }; + self.p2p.broadcast_with_exclude(&req_copy, &exclude_list).await; } } @@ -252,7 +240,7 @@ impl Protocol { // Extra validations can be added here. let lookup = self.dht.read().await.lookup.clone(); let response = LookupMapResponse::new(lookup); - if let Err(e) = self.channel.send(response).await { + if let Err(e) = self.channel.send(&response).await { error!(target: "dht::protocol", "Protocol::handle_receive_lookup_map_request() channel send fail: {}", e); }; } diff --git a/src/dht2/net_hashmap.rs b/src/dht2/net_hashmap.rs index 9e9c94367..a69d09696 100644 --- a/src/dht2/net_hashmap.rs +++ b/src/dht2/net_hashmap.rs @@ -84,7 +84,7 @@ where /// Additionally, this change will be broadcasted to the P2P network. pub async fn insert(&mut self, k: K, v: V) -> Result> { let message = NetHashMapInsert { k: k.clone(), v: v.clone() }; - self.p2p.broadcast(message).await?; + self.p2p.broadcast(&message).await; Ok(self.hashmap.insert(k, v)) } @@ -101,7 +101,7 @@ where Q: Hash + Eq + Send + Sync + Encodable + Decodable + 'static, { let message = NetHashMapRemove { k: k.clone() }; - self.p2p.broadcast(message).await?; + self.p2p.broadcast(&message).await; Ok(self.hashmap.remove(&k)) } @@ -138,9 +138,7 @@ where K: Encodable + Decodable + Send + Sync + 'static, V: Encodable + Decodable + Send + Sync + 'static, { - fn name() -> &'static str { - "nethashmap_insert" - } + const NAME: &'static str = "nethashmap_insert"; } #[derive(Debug, Clone, SerialDecodable, SerialEncodable)] @@ -152,7 +150,5 @@ impl net::Message for NetHashMapRemove where K: Encodable + Decodable + Send + Sync + 'static, { - fn name() -> &'static str { - "nethashmap_remove" - } + const NAME: &'static str = "nethashmap_remove"; } diff --git a/src/error.rs b/src/error.rs index cf5b69828..5f1a0bbb8 100644 --- a/src/error.rs +++ b/src/error.rs @@ -100,6 +100,12 @@ pub enum Error { // ====================== // Network-related errors // ====================== + #[error("Invalid Dialer scheme")] + InvalidDialerScheme, + + #[error("Invalid Listener scheme")] + InvalidListenerScheme, + #[error("Unsupported network transport: {0}")] UnsupportedTransport(String), @@ -136,6 +142,10 @@ pub enum Error { #[error("Network operation failed")] NetworkOperationFailed, + #[cfg(feature = "arti-client")] + #[error(transparent)] + ArtiError(#[from] arti_client::Error), + #[error("Malformed packet")] MalformedPacket, @@ -189,11 +199,11 @@ pub enum Error { #[error("Invalid DarkFi address")] InvalidAddress, - #[cfg(feature = "futures-rustls")] + #[cfg(feature = "async-rustls")] #[error(transparent)] - RustlsError(#[from] futures_rustls::rustls::Error), + RustlsError(#[from] async_rustls::rustls::Error), - #[cfg(feature = "futures-rustls")] + #[cfg(feature = "async-rustls")] #[error("Invalid DNS Name {0}")] RustlsInvalidDns(String), @@ -279,9 +289,9 @@ pub enum Error { // =============== // Database errors // =============== - #[cfg(feature = "sqlx")] - #[error("Sqlx error: {0}")] - SqlxError(String), + #[cfg(feature = "rusqlite")] + #[error("rusqlite error: {0}")] + RusqliteError(String), #[cfg(feature = "sled")] #[error(transparent)] @@ -589,10 +599,10 @@ impl From for Error { } } -#[cfg(feature = "sqlx")] -impl From for Error { - fn from(err: sqlx::error::Error) -> Self { - Self::SqlxError(err.to_string()) +#[cfg(feature = "rusqlite")] +impl From for Error { + fn from(err: rusqlite::Error) -> Self { + Self::RusqliteError(err.to_string()) } } @@ -619,9 +629,9 @@ impl From for Error { } } -#[cfg(feature = "futures-rustls")] -impl From for Error { - fn from(err: futures_rustls::rustls::client::InvalidDnsNameError) -> Self { +#[cfg(feature = "async-rustls")] +impl From for Error { + fn from(err: async_rustls::rustls::client::InvalidDnsNameError) -> Self { Self::RustlsInvalidDns(err.to_string()) } } diff --git a/src/event_graph/protocol_event.rs b/src/event_graph/protocol_event.rs index 64baf961f..02eb224c0 100644 --- a/src/event_graph/protocol_event.rs +++ b/src/event_graph/protocol_event.rs @@ -26,7 +26,8 @@ use log::debug; use super::EventMsg; use crate::{ event_graph::model::{Event, EventId, ModelPtr}, - net, + impl_p2p_message, net, + net::Message, util::{async_util::sleep, ringbuffer::RingBuffer}, Result, }; @@ -42,16 +43,19 @@ pub struct InvItem { pub struct Inv { pub invs: Vec, } +impl_p2p_message!(Inv, "inv"); #[derive(SerialDecodable, SerialEncodable, Clone, Debug)] struct SyncEvent { leaves: Vec, } +impl_p2p_message!(SyncEvent, "syncevent"); #[derive(SerialDecodable, SerialEncodable, Clone, Debug)] struct GetData { events: Vec, } +impl_p2p_message!(GetData, "getdata"); pub type SeenPtr = Arc>; @@ -101,7 +105,7 @@ where seen_event: SeenPtr, seen_inv: SeenPtr, ) -> net::ProtocolBasePtr { - let message_subsytem = channel.get_message_subsystem(); + let message_subsytem = channel.message_subsystem(); message_subsytem.add_dispatch::>().await; message_subsytem.add_dispatch::().await; message_subsytem.add_dispatch::().await; @@ -137,7 +141,7 @@ where async fn handle_receive_event(self: Arc) -> Result<()> { debug!(target: "event_graph", "ProtocolEvent::handle_receive_event() [START]"); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let event = self.event_sub.receive().await?; let event = (*event).to_owned(); @@ -152,13 +156,13 @@ where self.send_inv(&event).await?; // Broadcast the msg - self.p2p.broadcast_with_exclude(event, &exclude_list).await?; + self.p2p.broadcast_with_exclude(&event, &exclude_list).await; } } async fn handle_receive_inv(self: Arc) -> Result<()> { debug!(target: "event_graph", "ProtocolEvent::handle_receive_inv() [START]"); - let exclude_list = vec![self.channel.address()]; + let exclude_list = vec![self.channel.address().clone()]; loop { let inv = self.inv_sub.receive().await?; let inv = (*inv).to_owned(); @@ -176,7 +180,7 @@ where // } // Broadcast the inv msg - self.p2p.broadcast_with_exclude(inv, &exclude_list).await?; + self.p2p.broadcast_with_exclude(&inv, &exclude_list).await; } } async fn handle_receive_getdata(self: Arc) -> Result<()> { @@ -188,7 +192,7 @@ where for event_id in events { let model_event = self.model.lock().await.get_event(&event_id); if let Some(event) = model_event { - self.channel.send(event).await?; + self.channel.send(&event).await?; } } } @@ -214,7 +218,7 @@ where let children = model.get_offspring(leaf); for child in children { - self.channel.send(child).await?; + self.channel.send(&child).await?; } } } @@ -226,7 +230,7 @@ where loop { sleep(6).await; let leaves = self.model.lock().await.find_leaves(); - self.channel.send(SyncEvent { leaves }).await?; + self.channel.send(&SyncEvent { leaves }).await?; } } @@ -240,14 +244,14 @@ where async fn send_inv(&self, event: &Event) -> Result<()> { debug!(target: "event_graph", "ProtocolEvent::send_inv()"); - self.p2p.broadcast(Inv { invs: vec![InvItem { hash: event.hash() }] }).await?; + self.p2p.broadcast(&Inv { invs: vec![InvItem { hash: event.hash() }] }).await; Ok(()) } async fn send_getdata(&self, events: Vec) -> Result<()> { debug!(target: "event_graph", "ProtocolEvent::send_getdata()"); - self.channel.send(GetData { events }).await?; + self.channel.send(&GetData { events }).await?; Ok(()) } } @@ -278,25 +282,5 @@ impl net::Message for Event where T: Send + Sync + Decodable + Encodable + 'static, { - fn name() -> &'static str { - "event" - } -} - -impl net::Message for Inv { - fn name() -> &'static str { - "inv" - } -} - -impl net::Message for SyncEvent { - fn name() -> &'static str { - "syncevent" - } -} - -impl net::Message for GetData { - fn name() -> &'static str { - "getdata" - } + const NAME: &'static str = "event"; } diff --git a/src/lib.rs b/src/lib.rs index a7878d99f..8567deb5d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,6 +16,8 @@ * along with this program. If not, see . */ +#![feature(ip)] + pub mod error; pub use error::{ClientFailed, ClientResult, Error, Result}; diff --git a/src/net/acceptor.rs b/src/net/acceptor.rs index 7b0bffa2b..36c0b2dce 100644 --- a/src/net/acceptor.rs +++ b/src/net/acceptor.rs @@ -16,27 +16,25 @@ * along with this program. If not, see . */ -use std::{env, fs}; - use async_std::sync::{Arc, Mutex}; -use log::{error, info}; +use log::error; use smol::Executor; use url::Url; use super::{ - transport::{TcpTransport, TorTransport, Transport, TransportListener, TransportName}, - Channel, ChannelPtr, SessionWeakPtr, + channel::{Channel, ChannelPtr}, + session::SessionWeakPtr, + transport::{Listener, PtListener}, }; use crate::{ - net::transport::NymTransport, system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr, Subscription}, Error, Result, }; -/// Atomic pointer to Acceptor class. +/// Atomic pointer to Acceptor pub type AcceptorPtr = Arc; -/// Create inbound socket connections. +/// Create inbound socket connections pub struct Acceptor { channel_subscriber: SubscriberPtr>, task: StoppableTaskPtr, @@ -45,105 +43,22 @@ pub struct Acceptor { impl Acceptor { /// Create new Acceptor object. - pub fn new(session: Mutex>) -> Arc { + pub fn new(session: Mutex>) -> AcceptorPtr { Arc::new(Self { channel_subscriber: Subscriber::new(), task: StoppableTask::new(), session, }) } - /// Start accepting inbound socket connections. Creates a listener to start - /// listening on a local socket address. Then runs an accept loop in a new - /// thread, erroring if a connection problem occurs. - pub async fn start( - self: Arc, - accept_url: Url, - executor: Arc>, - ) -> Result<()> { - let transport_name = TransportName::try_from(accept_url.clone())?; - macro_rules! accept { - ($listener:expr, $transport:expr, $upgrade:expr) => {{ - if let Err(err) = $listener { - error!(target: "net::acceptor", "Setup for {} failed: {}", accept_url, err); - return Err(Error::BindFailed(accept_url.as_str().into())) - } - - let listener = $listener?.await; - - if let Err(err) = listener { - error!(target: "net::acceptor", "Bind listener to {} failed: {}", accept_url, err); - return Err(Error::BindFailed(accept_url.as_str().into())) - } - - let listener = listener?; - - match $upgrade { - None => { - self.accept(Box::new(listener), executor); - } - Some(u) if u == "tls" => { - let tls_listener = $transport.upgrade_listener(listener)?.await?; - self.accept(Box::new(tls_listener), executor); - } - Some(u) => return Err(Error::UnsupportedTransportUpgrade(u)), - } - }}; - } - - match transport_name { - TransportName::Tcp(upgrade) => { - let transport = TcpTransport::new(None, 1024); - let listener = transport.listen_on(accept_url.clone()); - accept!(listener, transport, upgrade); - } - TransportName::Tor(upgrade) => { - let socks5_url = Url::parse( - &env::var("DARKFI_TOR_SOCKS5_URL") - .unwrap_or_else(|_| "socks5://127.0.0.1:9050".to_string()), - )?; - - let torc_url = Url::parse( - &env::var("DARKFI_TOR_CONTROL_URL") - .unwrap_or_else(|_| "tcp://127.0.0.1:9051".to_string()), - )?; - - let auth_cookie = env::var("DARKFI_TOR_COOKIE"); - - if auth_cookie.is_err() { - return Err(Error::TorError( - "Please set the env var DARKFI_TOR_COOKIE to the configured tor cookie file. \ - For example: \ - \'export DARKFI_TOR_COOKIE=\"/var/lib/tor/control_auth_cookie\"\'".to_string(), - )); - } - - let auth_cookie = auth_cookie.unwrap(); - let auth_cookie = hex::encode(fs::read(auth_cookie).unwrap()); - let transport = TorTransport::new(socks5_url, Some((torc_url, auth_cookie)))?; - - // generate EHS pointing to local address - let hurl = transport.create_ehs(accept_url.clone())?; - - info!(target: "net::acceptor", "EHS TOR: {}", hurl.to_string()); - - let listener = transport.clone().listen_on(accept_url.clone()); - - accept!(listener, transport, upgrade); - } - TransportName::Nym(upgrade) => { - let transport = NymTransport::new()?; - - let listener = transport.clone().listen_on(accept_url.clone()); - - accept!(listener, transport, upgrade); - } - _ => unimplemented!(), - } + /// Start accepting inbound socket connections + pub async fn start(self: Arc, endpoint: Url, ex: Arc>) -> Result<()> { + let listener = Listener::new(endpoint).await?.listen().await?; + self.accept(listener, ex); Ok(()) } - /// Stop accepting inbound socket connections. + /// Stop accepting inbound socket connections pub async fn stop(&self) { // Send stop signal self.task.stop().await; @@ -154,20 +69,19 @@ impl Acceptor { self.channel_subscriber.clone().subscribe().await } - /// Run the accept loop in a new thread and error if a connection problem - /// occurs. - fn accept(self: Arc, listener: Box, executor: Arc>) { - let self2 = self.clone(); + /// Run the accept loop in a new thread and error if a connection problem occurs + fn accept(self: Arc, listener: Box, ex: Arc>) { + let self_ = self.clone(); self.task.clone().start( - self.clone().run_accept_loop(listener), - |result| self2.handle_stop(result), + self.run_accept_loop(listener), + |result| self_.handle_stop(result), Error::NetworkServiceStopped, - executor, + ex, ); } /// Run the accept loop. - async fn run_accept_loop(self: Arc, listener: Box) -> Result<()> { + async fn run_accept_loop(self: Arc, listener: Box) -> Result<()> { loop { match listener.next().await { Ok((stream, url)) => { @@ -175,23 +89,23 @@ impl Acceptor { Channel::new(stream, url, self.session.lock().await.clone().unwrap()).await; self.channel_subscriber.notify(Ok(channel)).await; } + Err(e) => { - error!(target: "net::acceptor", "Error listening for new connection: {}", e); + error!( + target: "net::acceptor::run_accept_loop()", + "[P2P] Acceptor failed listening: {}", e, + ); } } } } - /// Handles network errors. Panics if error passes silently, otherwise - /// broadcasts the error. + /// Handles network errors. Panics if errors pass silently, otherwise broadcasts it + /// to all channel subscribers. async fn handle_stop(self: Arc, result: Result<()>) { match result { Ok(()) => panic!("Acceptor task should never complete without error status"), - Err(err) => { - // Send this error to all channel subscribers - let result = Err(err); - self.channel_subscriber.notify(result).await; - } + Err(err) => self.channel_subscriber.notify(Err(err)).await, } } } diff --git a/src/net/channel.rs b/src/net/channel.rs index 753302273..8232d4009 100644 --- a/src/net/channel.rs +++ b/src/net/channel.rs @@ -17,21 +17,24 @@ */ use async_std::sync::{Arc, Mutex}; +use darkfi_serial::serialize; use futures::{ io::{ReadHalf, WriteHalf}, AsyncReadExt, }; use log::{debug, error, info}; -use rand::Rng; +use rand::{rngs::OsRng, Rng}; use serde_json::json; use smol::Executor; use url::Url; use super::{ message, + message::Packet, message_subscriber::{MessageSubscription, MessageSubsystem}, - transport::TransportStream, - Session, SessionBitflag, SessionWeakPtr, + p2p::{dnet, P2pPtr}, + session::{Session, SessionBitFlag, SessionWeakPtr}, + transport::PtStream, }; use crate::{ system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr, Subscription}, @@ -39,78 +42,85 @@ use crate::{ Error, Result, }; -/// Atomic pointer to async channel. +/// Atomic pointer to async channel pub type ChannelPtr = Arc; -const SIZE_OF_BUFFER: usize = 65536; +const RINGBUFFER_SIZE: usize = 512; +/// Channel debug info struct ChannelInfo { random_id: u32, remote_node_id: String, - last_msg: String, - last_status: String, - // Message log which is cleared on querying get_info - log: Option>>, + log: Mutex>, } impl ChannelInfo { - fn new(channel_log: bool) -> Self { - let log = match channel_log { - true => Some(Mutex::new(RingBuffer::new(SIZE_OF_BUFFER))), - false => None, - }; - + fn new() -> Self { Self { - random_id: rand::thread_rng().gen(), + random_id: OsRng.gen(), remote_node_id: String::new(), - last_msg: String::new(), - last_status: String::new(), - log, + log: Mutex::new(RingBuffer::new(RINGBUFFER_SIZE)), } } - // ANCHOR: get_info + /// Get available debug info, resets the ringbuffer when called. async fn get_info(&self) -> serde_json::Value { - let log = match &self.log { - Some(l) => { - let mut lock = l.lock().await; - let ret = lock.clone(); - *lock = RingBuffer::new(SIZE_OF_BUFFER); - ret + let mut lock = self.log.lock().await; + let log = lock.clone(); + *lock = RingBuffer::new(RINGBUFFER_SIZE); + drop(lock); + + let (last_msg, last_status) = { + match log.back() { + Some((_, m, s)) => (m.clone(), s.clone()), + None => (String::new(), String::new()), } - None => RingBuffer::new(0), }; json!({ "random_id": self.random_id, "remote_node_id": self.remote_node_id, - "last_msg": self.last_msg, - "last_status": self.last_status, + "last_msg": last_msg, + "last_status": last_status, "log": log, }) } - // ANCHOR_END: get_info } /// Async channel for communication between nodes. pub struct Channel { - reader: Mutex>>, - writer: Mutex>>, + /// The reading half of the transport stream + reader: Mutex>>, + /// The writing half of the transport stream + writer: Mutex>>, + /// Socket address address: Url, + /// The message subsystem instance for this channel message_subsystem: MessageSubsystem, + /// Subscriber listening for stop signal for closing this channel stop_subscriber: SubscriberPtr, + /// Task that is listening for the stop signal receive_task: StoppableTaskPtr, + /// A boolean marking if this channel is stopped stopped: Mutex, - info: Mutex, + /// Weak pointer to respective session session: SessionWeakPtr, + /// Channel debug info + info: Mutex>, +} + +impl std::fmt::Debug for Channel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.address) + } } impl Channel { - /// Sets up a new channel. Creates a reader and writer TCP stream and - /// summons the message subscriber subsystem. Performs a network - /// handshake on the subsystem dispatchers. + /// Sets up a new channel. Creates a reader and writer [`PtStream`] and + /// summons the message subscriber subsystem. Performs a network handshake + /// on the subsystem dispatchers. pub async fn new( - stream: Box, + stream: Box, address: Url, session: SessionWeakPtr, ) -> Arc { @@ -121,7 +131,11 @@ impl Channel { let message_subsystem = MessageSubsystem::new(); Self::setup_dispatchers(&message_subsystem).await; - let channel_log = session.upgrade().unwrap().p2p().settings().channel_log; + let info = if *session.upgrade().unwrap().p2p().dnet_enabled.lock().await { + Mutex::new(Some(ChannelInfo::new())) + } else { + Mutex::new(None) + }; Arc::new(Self { reader, @@ -131,259 +145,235 @@ impl Channel { stop_subscriber: Subscriber::new(), receive_task: StoppableTask::new(), stopped: Mutex::new(false), - info: Mutex::new(ChannelInfo::new(channel_log)), session, + info, }) } - pub async fn get_info(&self) -> serde_json::Value { - self.info.lock().await.get_info().await + /// Perform network handshake for message subsystem dispatchers. + async fn setup_dispatchers(subsystem: &MessageSubsystem) { + subsystem.add_dispatch::().await; + subsystem.add_dispatch::().await; + subsystem.add_dispatch::().await; + subsystem.add_dispatch::().await; + subsystem.add_dispatch::().await; + subsystem.add_dispatch::().await; } - /// Starts the channel. Runs a receive loop to start receiving messages or - /// handles a network failure. + /// Fetch debug info, if any + pub async fn get_info(&self) -> serde_json::Value { + if *self.p2p().dnet_enabled.lock().await { + // Maybe here we should panic? It probably should never be + // the case that dnet is enabled, but this stuff is empty. + // However it's possible that it somehow happens through a + // race condition, so let's be safe. + match self.info.lock().await.as_ref() { + Some(info) => info.get_info().await, + None => json!({}), + } + } else { + json!({}) + } + } + + /// Starts the channel. Runs a receive loop to start receiving messages + /// or handles a network failure. pub fn start(self: Arc, executor: Arc>) { - debug!(target: "net::channel::start()", "START, address={}", self.address()); - let self2 = self.clone(); + debug!(target: "net::channel::start()", "START => address={}", self.address()); + + let self_ = self.clone(); self.receive_task.clone().start( self.clone().main_receive_loop(), - |result| self2.handle_stop(result), + |result| self_.handle_stop(result), Error::NetworkServiceStopped, executor, ); - debug!(target: "net::channel::start()", "END, address={}", self.address()); + + debug!(target: "net::channel::start()", "END => address={}", self.address()); } - /// Stops the channel. Steps through each component of the channel - /// connection and sends a stop signal. Notifies all subscribers that - /// the channel has been closed. + /// Stops the channel. Steps through each component of the channel connection + /// and sends a stop signal. Notifies all subscribers that the channel has + /// been closed. pub async fn stop(&self) { - debug!(target: "net::channel::stop()", "START, address={}", self.address()); - if !(*self.stopped.lock().await) { + debug!(target: "net::channel::stop()", "START => address={}", self.address()); + + if !*self.stopped.lock().await { *self.stopped.lock().await = true; self.stop_subscriber.notify(Error::ChannelStopped).await; self.receive_task.stop().await; self.message_subsystem.trigger_error(Error::ChannelStopped).await; - debug!(target: "net::channel::stop()", "END, address={}", self.address()); } + + debug!(target: "net::channel::stop()", "END => address={}", self.address()); } /// Creates a subscription to a stopped signal. /// If the channel is stopped then this will return a ChannelStopped error. pub async fn subscribe_stop(&self) -> Result> { - debug!(target: "net::channel::subscribe_stop()", "START, address={}", self.address()); + debug!(target: "net::channel::subscribe_stop()", "START => address={}", self.address()); - { - let stopped = *self.stopped.lock().await; - if stopped { - return Err(Error::ChannelStopped) - } + if *self.stopped.lock().await { + return Err(Error::ChannelStopped) } let sub = self.stop_subscriber.clone().subscribe().await; - debug!(target: "net::channel::subscribe_stop()", "END, address={}", self.address()); + + debug!(target: "net::channel::subscribe_stop()", "END => address={}", self.address()); Ok(sub) } - /// Sends a message across a channel. Calls function 'send_message' that - /// creates a new payload and sends it over the TCP connection as a - /// packet. Returns an error if something goes wrong. - pub async fn send(&self, message: M) -> Result<()> { + /// Sends a message across a channel. Calls `send_message` that creates + /// a new payload and sends it over the network transport as a packet. + /// Returns an error if something goes wrong. + pub async fn send(&self, message: &M) -> Result<()> { debug!( - target: "net::channel::send()", - "START, command={:?}, address={}", - M::name(), - self.address() + target: "net::channel::send()", "[START] command={} => address={}", + M::NAME, self.address(), ); - { - let stopped = *self.stopped.lock().await; - if stopped { - return Err(Error::ChannelStopped) - } + if *self.stopped.lock().await { + return Err(Error::ChannelStopped) } // Catch failure and stop channel, return a net error - let result = match self.send_message(message).await { - Ok(()) => Ok(()), - Err(err) => { - error!(target: "net::channel::send()", "Channel send error for [{}]: {}", self.address(), err); - self.stop().await; - Err(Error::ChannelStopped) - } - }; + if let Err(e) = self.send_message(message).await { + error!( + target: "net::channel::send()", "[P2P]Channel send error for [{}]: {}", + self.address(), e + ); + self.stop().await; + return Err(Error::ChannelStopped) + } debug!( - target: "net::channel::send()", - "END, command={:?}, address={}", - M::name(), - self.address() + target: "net::channel::send()", "[END] command={} => address={}", + M::NAME,self.address(), ); - { - let info = &mut *self.info.lock().await; - info.last_msg = M::name().to_string(); - info.last_status = "sent".to_string(); - } - result + Ok(()) } - /// Implements send message functionality. Creates a new payload and encodes - /// it. Then creates a message packet- the base type of the network- and - /// copies the payload into it. Then we send the packet over the TCP - /// stream. - async fn send_message(&self, message: M) -> Result<()> { - let mut payload = Vec::new(); - message.encode(&mut payload)?; - let packet = message::Packet { command: String::from(M::name()), payload }; - let time = NanoTimestamp::current_time(); - //let time = time::unix_timestamp()?; + /// Implements send message functionality. Creates a new payload and + /// encodes it. Then creates a message packet (the base type of the + /// network) and copies the payload into it. Then we send the packet + /// over the network stream. + async fn send_message(&self, message: &M) -> Result<()> { + let packet = Packet { command: M::NAME.to_string(), payload: serialize(message) }; - { - let info = &mut *self.info.lock().await; - if let Some(l) = &info.log { - l.lock().await.push((time, "send".to_string(), packet.command.clone())); - }; - } + dnet!(self, + let time = NanoTimestamp::current_time(); + let info_lock = self.info.lock().await; + let mut log = info_lock.as_ref().unwrap().log.lock().await; + log.push((time, "send".to_string(), packet.command.clone())); + ); let stream = &mut *self.writer.lock().await; - message::send_packet(stream, packet).await + let _written = message::send_packet(stream, packet).await?; + + Ok(()) } - /// Subscribe to a messages on the message subsystem. + /// Subscribe to a message on the message subsystem. pub async fn subscribe_msg(&self) -> Result> { debug!( - target: "net::channel::subscribe_msg()", - "START, command={:?}, address={}", - M::name(), - self.address() + target: "net::channel::subscribe_msg()", "[START] command={} => address={}", + M::NAME, self.address(), ); + let sub = self.message_subsystem.subscribe::().await; + debug!( - target: "net::channel::subscribe_msg()", - "END, command={:?}, address={}", - M::name(), - self.address() + target: "net::channel::subscribe_msg()", "[END] command={} => address={}", + M::NAME, self.address(), ); + sub } - /// Return the local socket address. - pub fn address(&self) -> Url { - self.address.clone() - } - - pub async fn remote_node_id(&self) -> String { - self.info.lock().await.remote_node_id.clone() - } - pub async fn set_remote_node_id(&self, remote_node_id: String) { - self.info.lock().await.remote_node_id = remote_node_id; - } - - /// End of file error. Triggered when unexpected end of file occurs. - fn is_eof_error(err: Error) -> bool { - match err { - Error::Io(io_err) => io_err == std::io::ErrorKind::UnexpectedEof, - _ => false, - } - } - - /// Perform network handshake for message subsystem dispatchers. - async fn setup_dispatchers(message_subsystem: &MessageSubsystem) { - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - message_subsystem.add_dispatch::().await; - } - - /// Convenience function that returns the Message Subsystem. - pub fn get_message_subsystem(&self) -> &MessageSubsystem { - &self.message_subsystem - } - - /// Run the receive loop. Start receiving messages or handle network - /// failure. - async fn main_receive_loop(self: Arc) -> Result<()> { - debug!(target: "net::channel::main_receive_loop()", "START, address={}", self.address()); - - let reader = &mut *self.reader.lock().await; - - loop { - let packet = match message::read_packet(reader).await { - Ok(packet) => packet, - Err(err) => { - if Self::is_eof_error(err.clone()) { - info!( - target: "net::channel::main_receive_loop()", - "Inbound connection {} disconnected", - self.address() - ); - } else { - error!( - target: "net::channel::main_receive_loop()", - "Read error on channel {}: {}", - self.address(), - err - ); - } - debug!( - target: "net::channel::main_receive_loop()", - "Channel::receive_loop() stopping channel {}", - self.address() - ); - self.stop().await; - return Err(Error::ChannelStopped) - } - }; - { - let info = &mut *self.info.lock().await; - info.last_msg = packet.command.clone(); - info.last_status = "recv".to_string(); - let time = NanoTimestamp::current_time(); - //let time = time::unix_timestamp()?; - if let Some(l) = &info.log { - l.lock().await.push((time, "recv".to_string(), packet.command.clone())); - }; - } - - // Send result to our subscribers - self.message_subsystem.notify(&packet.command, packet.payload).await; - } - } - /// Handle network errors. Panic if error passes silently, otherwise /// broadcast the error. async fn handle_stop(self: Arc, result: Result<()>) { - debug!( - target: "net::channel::handle_stop()", - "START, address={}", - self.address() - ); + debug!(target: "net::channel::handle_stop()", "[START] address={}", self.address()); + match result { Ok(()) => panic!("Channel task should never complete without error status"), - Err(err) => { - // Send this error to all channel subscribers - self.message_subsystem.trigger_error(err).await; - } + // Send this error to all channel subscribers + Err(e) => self.message_subsystem.trigger_error(e).await, } - debug!( - target: "net::channel::handle_stop()", - "END, address={}", - self.address() - ); + + debug!(target: "net::channel::handle_stop()", "[END] address={}", self.address()); + } + + /// Run the receive loop. Start receiving messages or handle network failure. + async fn main_receive_loop(self: Arc) -> Result<()> { + debug!(target: "net::channel::main_receive_loop()", "[START] address={}", self.address()); + + // Acquire reader lock + let reader = &mut *self.reader.lock().await; + + // Run loop + loop { + let packet = match message::read_packet(reader).await { + Ok(packet) => packet, + Err(err) => { + if Self::is_eof_error(&err) { + info!( + target: "net::channel::main_receive_loop()", + "[net] Channel inbound connecion {} disconnected", + self.address(), + ); + } else { + error!( + target: "net::channel::main_receive_loop()", + "Read error on channel {}: {}", + self.address(), err, + ); + } + + debug!( + target: "net::channel::main_receive_loop()", + "Stopping channel {}", self.address(), + ); + self.stop().await; + return Err(Error::ChannelStopped) + } + }; + + // Send result to our subscribers + self.message_subsystem.notify(&packet.command, &packet.payload).await; + } + } + + /// Returns the local socket address + pub fn address(&self) -> &Url { + &self.address + } + + /// Returns the inner [`MessageSubsystem`] reference + pub fn message_subsystem(&self) -> &MessageSubsystem { + &self.message_subsystem } fn session(&self) -> Arc { self.session.upgrade().unwrap() } - pub fn session_type_id(&self) -> SessionBitflag { + pub fn session_type_id(&self) -> SessionBitFlag { let session = self.session(); session.type_id() } + + fn p2p(&self) -> P2pPtr { + self.session().p2p() + } + + fn is_eof_error(err: &Error) -> bool { + match err { + Error::Io(ioerr) => ioerr == &std::io::ErrorKind::UnexpectedEof, + _ => false, + } + } } diff --git a/src/net/connector.rs b/src/net/connector.rs index b0fe3ef2d..aede203ba 100644 --- a/src/net/connector.rs +++ b/src/net/connector.rs @@ -16,104 +16,39 @@ * along with this program. If not, see . */ -use std::{env, time::Duration}; +use std::time::Duration; -use async_std::sync::Arc; -use log::error; use url::Url; use super::{ - transport::{NymTransport, TcpTransport, TorTransport, Transport, TransportName}, - Channel, ChannelPtr, SessionWeakPtr, SettingsPtr, + channel::{Channel, ChannelPtr}, + session::SessionWeakPtr, + settings::SettingsPtr, + transport::Dialer, }; -use crate::{Error, Result}; +use crate::Result; -/// Create outbound socket connections. +/// Create outbound socket connections pub struct Connector { + /// P2P settings settings: SettingsPtr, + /// Weak pointer to the session pub session: SessionWeakPtr, } impl Connector { - /// Create a new connector with default network settings. + /// Create a new connector with given network settings pub fn new(settings: SettingsPtr, session: SessionWeakPtr) -> Self { Self { settings, session } } - /// Establish an outbound connection. - pub async fn connect(&self, connect_url: Url) -> Result { - let transport_name = TransportName::try_from(connect_url.clone())?; - self.connect_channel( - connect_url, - transport_name, - Duration::from_secs(self.settings.connect_timeout_seconds.into()), - ) - .await - } + /// Establish an outbound connection + pub async fn connect(&self, endpoint: Url) -> Result { + let dialer = Dialer::new(endpoint.clone()).await?; + let timeout = Duration::from_secs(self.settings.outbound_connect_timeout); + let ptstream = dialer.dial(Some(timeout)).await?; - async fn connect_channel( - &self, - connect_url: Url, - transport_name: TransportName, - timeout: Duration, - ) -> Result> { - macro_rules! connect { - ($stream:expr, $transport:expr, $upgrade:expr) => {{ - if let Err(err) = $stream { - error!(target: "net::connector", "Setup for {} failed: {}", connect_url, err); - return Err(Error::ConnectFailed) - } - - let stream = $stream?.await; - - if let Err(err) = stream { - error!(target: "net::connector", "Connection to {} failed: {}", connect_url, err); - return Err(Error::ConnectFailed) - } - - let channel = match $upgrade { - // session - None => { - Channel::new(Box::new(stream?), connect_url.clone(), self.session.clone()) - .await - } - Some(u) if u == "tls" => { - let stream = $transport.upgrade_dialer(stream?)?.await; - Channel::new(Box::new(stream?), connect_url, self.session.clone()).await - } - Some(u) => return Err(Error::UnsupportedTransportUpgrade(u)), - }; - - Ok(channel) - }}; - } - - match transport_name { - TransportName::Tcp(upgrade) => { - let transport = TcpTransport::new(None, 1024); - let stream = transport.dial(connect_url.clone(), Some(timeout)); - connect!(stream, transport, upgrade) - } - TransportName::Tor(upgrade) => { - let socks5_url = Url::parse( - &env::var("DARKFI_TOR_SOCKS5_URL") - .unwrap_or_else(|_| "socks5://127.0.0.1:9050".to_string()), - )?; - - let transport = TorTransport::new(socks5_url, None)?; - - let stream = transport.clone().dial(connect_url.clone(), None); - - connect!(stream, transport, upgrade) - } - TransportName::Nym(upgrade) => { - let transport = NymTransport::new()?; - - let stream = transport.clone().dial(connect_url.clone(), None); - - connect!(stream, transport, upgrade) - } - _ => unimplemented!(), - } + let channel = Channel::new(ptstream, endpoint, self.session.clone()).await; + Ok(channel) } } diff --git a/src/net/constants.rs b/src/net/constants.rs deleted file mode 100644 index 2c0aaf2bd..000000000 --- a/src/net/constants.rs +++ /dev/null @@ -1,74 +0,0 @@ -/* This file is part of DarkFi (https://dark.fi) - * - * Copyright (C) 2020-2023 Dyne.org foundation - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -/// Localnet addresses -pub const LOCALNET: [&str; 5] = ["localhost", "0.0.0.0", "[::]", "127.0.0.1", "[::1]"]; - -/// Illegal IPv6 addresses -pub const IP6_PRIV_RANGES: [&str; 2] = ["fc00::/7", "fec0::/10"]; - -/// Illegal IPv4 addresses -pub const IP4_PRIV_RANGES: [&str; 47] = [ - "0.0.0.0/8", - "10.0.0.0/8", - "127.0.0.0/8", - "224.0.0.0/8", - "225.0.0.0/8", - "226.0.0.0/8", - "227.0.0.0/8", - "228.0.0.0/8", - "229.0.0.0/8", - "230.0.0.0/8", - "231.0.0.0/8", - "232.0.0.0/8", - "233.0.0.0/8", - "234.0.0.0/8", - "235.0.0.0/8", - "236.0.0.0/8", - "237.0.0.0/8", - "238.0.0.0/8", - "239.0.0.0/8", - "240.0.0.0/8", - "241.0.0.0/8", - "242.0.0.0/8", - "243.0.0.0/8", - "244.0.0.0/8", - "245.0.0.0/8", - "246.0.0.0/8", - "247.0.0.0/8", - "248.0.0.0/8", - "249.0.0.0/8", - "250.0.0.0/8", - "251.0.0.0/8", - "252.0.0.0/8", - "253.0.0.0/8", - "254.0.0.0/8", - "255.0.0.0/8", - "100.64.0.0/10", - "169.254.0.0/16", - "172.16.0.0/12", - "192.0.0.0/24", - "192.0.2.0/24", - "192.88.99.0/24", - "192.168.0.0/16", - "198.18.0.0/15", - "198.51.100.0/24", - "203.0.113.0/24", - "233.252.0.0/24", - "255.255.255.255/32", -]; diff --git a/src/net/hosts.rs b/src/net/hosts.rs index 76c0c7eb9..4d0bef545 100644 --- a/src/net/hosts.rs +++ b/src/net/hosts.rs @@ -16,470 +16,233 @@ * along with this program. If not, see . */ -use std::{ - collections::{HashMap, HashSet}, - net::IpAddr, -}; +use std::collections::HashSet; -use async_std::sync::{Arc, Mutex}; -use ipnet::{Ipv4Net, Ipv6Net}; -use iprange::IpRange; -use log::{debug, error, warn}; +use async_std::sync::{Arc, RwLock}; +use log::debug; +use rand::{prelude::IteratorRandom, rngs::OsRng}; use url::Url; -use super::constants::{IP4_PRIV_RANGES, IP6_PRIV_RANGES, LOCALNET}; -use crate::util::encoding::base32; +use super::settings::SettingsPtr; -/// Pointer to hosts class. +/// Atomic pointer to hosts object pub type HostsPtr = Arc; -/// Manages a store of network addresses. +/// Manages a store of network addresses pub struct Hosts { - addrs: Mutex>, - localnet: bool, - ipv4_range: IpRange, - ipv6_range: IpRange, + /// Set of stored addresses + addrs: RwLock>, + /// Pointer to configured P2P settings + settings: SettingsPtr, } impl Hosts { - /// Create a new host list. - pub fn new(localnet: bool) -> Arc { - // Initialize ipv4_range and ipv6_range if needed - let mut ipv4_range: IpRange = - IP4_PRIV_RANGES.iter().map(|s| s.parse().unwrap()).collect(); - let mut ipv6_range: IpRange = - IP6_PRIV_RANGES.iter().map(|s| s.parse().unwrap()).collect(); - - // These will make the trie potentially smaller - ipv4_range.simplify(); - ipv6_range.simplify(); - - Arc::new(Self { addrs: Mutex::new(HashSet::new()), localnet, ipv4_range, ipv6_range }) + /// Create a new hosts list. Also initializes private IP ranges used + /// for filtering. + pub fn new(settings: SettingsPtr) -> HostsPtr { + Arc::new(Self { addrs: RwLock::new(HashSet::new()), settings }) } - /// Add a new host to the host list, after filtering. - pub async fn store(&self, input_addrs: Vec) { - debug!(target: "net::hosts::store()", "hosts::store() [Start]"); - let addrs = if !self.localnet { - let filtered = filter_localnet(input_addrs); - let filtered = filter_invalid(&self.ipv4_range, &self.ipv6_range, filtered); - filtered.into_keys().collect() - } else { - debug!(target: "net::hosts::store()", "hosts::store() [Localnet mode, skipping filterring.]"); - input_addrs - }; - let mut addrs_map = self.addrs.lock().await; - for addr in addrs { - addrs_map.insert(addr); + /// Append given addrs to the known set. Filtering should be done externally. + pub async fn store(&self, addrs: &[Url]) { + debug!(target: "net::hosts::store()", "hosts::store() [START]"); + + let filtered_addrs = self.filter_addresses(addrs).await; + + if !filtered_addrs.is_empty() { + let mut addrs_map = self.addrs.write().await; + for addr in filtered_addrs { + addrs_map.insert(addr); + } } - debug!(target: "net::hosts::store()", "hosts::store() [End]"); + + debug!(target: "net::hosts::store()", "hosts::store() [END]"); } - /// Add a new hosts external adders to the host list, after filtering and verifying - /// the address url resolves to the provided connection address. - pub async fn store_ext(&self, connection_addr: Url, input_addrs: Vec) { - debug!(target: "net::hosts::store_ext()", "hosts::store_ext() [Start]"); - let addrs = if !self.localnet { - let filtered = filter_localnet(input_addrs); - let filtered = filter_invalid(&self.ipv4_range, &self.ipv6_range, filtered); - filter_non_resolving(connection_addr, filtered) - } else { - debug!(target: "net::hosts::store_ext()", "hosts::store_ext() [Localnet mode, skipping filterring.]"); - input_addrs - }; - let mut addrs_map = self.addrs.lock().await; - for addr in addrs { - addrs_map.insert(addr); + /// Filter given addresses based on certain rulesets and validity. + async fn filter_addresses(&self, addrs: &[Url]) -> Vec { + let mut ret = vec![]; + + for _addr in addrs { + // Validate that the format is `scheme://host_str:port` + if _addr.host_str().is_none() || + _addr.port().is_none() || + _addr.cannot_be_a_base() || + _addr.path_segments().is_some() + { + continue + } + + // Our own addresses should never enter the hosts set. + let host_str = _addr.host_str().unwrap(); + let mut got_own = false; + for ext in &self.settings.external_addrs { + if host_str == ext.host_str().unwrap() { + got_own = true; + break + } + } + if got_own { + continue + }; + + // We do this hack in order to parse IPs properly. + // https://github.com/whatwg/url/issues/749 + let addr = Url::parse(&_addr.as_str().replace(_addr.scheme(), "http")).unwrap(); + + // Filter non-global ranges if we're not allowing localnet. + // Should never be allowed in production, so we don't really care + // about some of them (e.g. 0.0.0.0, or broadcast, etc.). + if !self.settings.localnet { + // Filter private IP ranges + match addr.host().unwrap() { + url::Host::Ipv4(ip) => { + if !ip.is_global() { + continue + } + } + url::Host::Ipv6(ip) => { + if !ip.is_global() { + continue + } + } + url::Host::Domain(d) => { + // TODO: This could perhaps be more exhaustive? + if d == "localhost" { + continue + } + } + } + } + + // TODO: Should find a way to test the hosts are live without DNS leaks. + // Historically there is some code for this in cb73861bc13d3d5b43a6af931f29ce937e6fe681 + // We could try to instantiate a channel and perform a handshake, + // although this seems kinda "heavy". Open to suggestions :) + + ret.push(_addr.clone()); } - debug!(target: "net::hosts::store_ext()", "hosts::store_ext() [End]"); + + ret } - /// Return the list of hosts. - pub async fn load_all(&self) -> Vec { - self.addrs.lock().await.iter().cloned().collect() - } - - /// Remove an Url from the list pub async fn remove(&self, url: &Url) -> bool { - self.addrs.lock().await.remove(url) + self.addrs.write().await.remove(url) } /// Check if the host list is empty. pub async fn is_empty(&self) -> bool { - self.addrs.lock().await.is_empty() - } -} - -/// Auxiliary function to filter localnet hosts. -fn filter_localnet(input_addrs: Vec) -> Vec { - debug!(target: "net::hosts::filter_localnet()", "hosts::filter_localnet() [Input addresses: {:?}]", input_addrs); - let mut filtered = vec![]; - - for addr in &input_addrs { - if let Some(host_str) = addr.host_str() { - if !LOCALNET.contains(&host_str) { - filtered.push(addr.clone()); - continue - } - debug!(target: "net::hosts::filter_localnet()", "hosts::filter_localnet() [Filtered localnet addr: {}]", addr); - continue - } - warn!(target: "net::hosts::filter_localnet()", "hosts::filter_localnet() [{} addr.host_str is empty, skipping.]", addr); + self.addrs.read().await.is_empty() } - debug!(target: "net::hosts::filter_localnet()", "hosts::filter_localnet() [Filtered addresses: {:?}]", filtered); - filtered -} - -/// Auxiliary function to filter invalid(unresolvable) hosts. -fn filter_invalid( - ipv4_range: &IpRange, - ipv6_range: &IpRange, - input_addrs: Vec, -) -> HashMap> { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Input addresses: {:?}]", input_addrs); - let mut filtered = HashMap::new(); - for addr in &input_addrs { - // Discard domainless Urls - let domain = match addr.domain() { - Some(d) => d, - None => { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered domainless url: {}]", addr); - continue - } - }; - - // Validate onion domain - if domain.ends_with("onion") { - match is_valid_onion(domain) { - true => { - filtered.insert(addr.clone(), vec![]); - } - false => { - warn!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Got invalid onion address: {}]", addr) - } - } - continue - } - - // Validate Internet domains and IPs. socket_addrs() does a resolution - // with the local DNS resolver (i.e. /etc/resolv.conf), so the admin has - // to take care of any DNS leaks by properly configuring their system for - // DNS resolution. - if let Ok(socket_addrs) = addr.socket_addrs(|| None) { - // Check if domain resolved to anything - if socket_addrs.is_empty() { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered unresolvable URL: {}]", addr); - continue - } - - // Checking resolved IP validity - let mut resolves = vec![]; - for i in socket_addrs { - let ip = i.ip(); - match ip { - IpAddr::V4(a) => { - if ipv4_range.contains(&a) { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered private-range IPv4: {}]", a); - continue - } - } - IpAddr::V6(a) => { - if ipv6_range.contains(&a) { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered private range IPv6: {}]", a); - continue - } - } - } - resolves.push(ip); - } - - if resolves.is_empty() { - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered unresolvable URL: {}]", addr); - continue - } - - filtered.insert(addr.clone(), resolves); - } else { - warn!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Failed resolving socket_addrs for {}]", addr); - continue - } + /// Check if host is already in the set + pub async fn contains(&self, addr: &Url) -> bool { + self.addrs.read().await.contains(addr) } - debug!(target: "net::hosts::filter_invalid()", "hosts::filter_invalid() [Filtered addresses: {:?}]", filtered); - filtered -} + /// Return all known hosts + pub async fn load_all(&self) -> Vec { + self.addrs.read().await.iter().cloned().collect() + } -/// Filters `input_addrs` keys to whatever has at least one `IpAddr` that is -/// the same as `connection_addr`'s IP address. -/// Skips .onion domains. -fn filter_non_resolving(connection_addr: Url, input_addrs: HashMap>) -> Vec { - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [Input addresses: {:?}]", input_addrs); - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [Connection address: {}]", connection_addr); + /// Get up to n random hosts from the hosts set. + pub async fn get_n_random(&self, n: u32) -> Vec { + let n = n as usize; + let addrs = self.addrs.read().await; + let urls = addrs.iter().choose_multiple(&mut OsRng, n.min(addrs.len())); + let urls = urls.iter().map(|&url| url.clone()).collect(); + urls + } - // Retrieve connection IPs - let mut ipv4_range = vec![]; - let mut ipv6_range = vec![]; + /// Get all peers that match the given transport schemes from the hosts set. + pub async fn load_with_schemes(&self, schemes: &[String]) -> Vec { + let mut ret = vec![]; - match connection_addr.socket_addrs(|| None) { - Ok(v) => { - for i in v { - match i.ip() { - IpAddr::V4(a) => ipv4_range.push(a), - IpAddr::V6(a) => ipv6_range.push(a), - } - } - } - Err(e) => { - error!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [Failed resolving connection_addr {}: {}]", connection_addr, e); - return vec![] - } - }; - - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [{} IPv4: {:?}]", connection_addr, ipv4_range); - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [{} IPv6: {:?}]", connection_addr, ipv6_range); - - let mut filtered = vec![]; - for (addr, resolves) in &input_addrs { - // Keep onion domains. It's assumed that the .onion addresses - // have already been validated. - let addr_domain = addr.domain().unwrap(); - if addr_domain.ends_with(".onion") { - filtered.push(addr.clone()); - continue - } - - // Checking IP validity. If at least one IP matches, we consider it fine. - let mut valid = false; - for ip in resolves { - match ip { - IpAddr::V4(a) => { - if ipv4_range.contains(a) { - valid = true; - break - } - } - IpAddr::V6(a) => { - if ipv6_range.contains(a) { - valid = true; - break - } - } + for addr in self.addrs.read().await.iter() { + if schemes.contains(&addr.scheme().to_string()) { + ret.push(addr.clone()); } } - if !valid { - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [Filtered unresolvable url: {}]", addr); - continue - } - - filtered.push(addr.clone()); + ret } - - debug!(target: "net::hosts::filter_non_resolving()", "hosts::filter_non_resolving() [Filtered addresses: {:?}]", filtered); - filtered -} - -/// Validate a given .onion address. Currently it just checks that the -/// length and encoding are ok, and does not do any deeper check. Should -/// be fixed in the future when arti is ready. -fn is_valid_onion(onion: &str) -> bool { - let onion = match onion.strip_suffix(".onion") { - Some(s) => s, - None => onion, - }; - - if onion.len() != 56 { - return false - } - - base32::decode(&onion.to_uppercase()).is_some() } #[cfg(test)] mod tests { - use std::{ - collections::{HashMap, HashSet}, - net::{IpAddr, Ipv4Addr}, - }; + use super::{super::settings::Settings, *}; - use ipnet::{Ipv4Net, Ipv6Net}; - use iprange::IpRange; - use url::Url; + #[async_std::test] + async fn test_store_localnet() { + let mut settings = Settings::default(); + settings.localnet = true; + settings.external_addrs = vec![ + Url::parse("tcp://foo.bar:123").unwrap(), + Url::parse("tcp://lol.cat:321").unwrap(), + ]; - use crate::net::{ - constants::{IP4_PRIV_RANGES, IP6_PRIV_RANGES}, - hosts::{filter_invalid, filter_localnet, filter_non_resolving, is_valid_onion}, - }; + let hosts = Hosts::new(Arc::new(settings.clone())); + hosts.store(&settings.external_addrs).await; + assert!(hosts.is_empty().await); - #[test] - fn test_filter_localnet() { - // Uncomment for inner logging - /* - simplelog::TermLogger::init( - simplelog::LevelFilter::Debug, - simplelog::Config::default(), - simplelog::TerminalMode::Mixed, - simplelog::ColorChoice::Auto, - ) - .unwrap(); - */ + let local_hosts = vec![ + Url::parse("tcp://localhost:3921").unwrap(), + Url::parse("tcp://127.0.0.1:23957").unwrap(), + Url::parse("tcp://[::1]:21481").unwrap(), + Url::parse("tcp://192.168.10.65:311").unwrap(), + Url::parse("tcp://0.0.0.0:2312").unwrap(), + Url::parse("tcp://255.255.255.255:2131").unwrap(), + ]; + hosts.store(&local_hosts).await; + for i in local_hosts { + assert!(hosts.contains(&i).await); + } - // Create addresses to test - let valid = Url::parse("tls://facebook.com:13333").unwrap(); - let onion = Url::parse( - "tor://facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd.onion:13333", - ) - .unwrap(); - let localhost = Url::parse("tls://localhost:13333").unwrap(); - let localip = Url::parse("tls://127.0.0.1:13333").unwrap(); - - // Create input addresses vector - let input_addrs = vec![valid.clone(), onion.clone(), localhost, localip]; - - // Create expected output addresses vector - let output_addrs = vec![valid, onion]; - let output_addrs: HashSet<&Url> = HashSet::from_iter(output_addrs.iter()); - - // Execute filtering for v4 addr - let filtered = filter_localnet(input_addrs); - let filtered: HashSet<&Url> = HashSet::from_iter(filtered.iter()); - // Validate filtered addresses - assert_eq!(output_addrs, filtered); + let remote_hosts = vec![ + Url::parse("tcp://dark.fi:80").unwrap(), + Url::parse("tcp://top.kek:111").unwrap(), + Url::parse("tcp://http.cat:401").unwrap(), + ]; + hosts.store(&remote_hosts).await; + for i in remote_hosts { + assert!(hosts.contains(&i).await); + } } - #[test] - fn test_filter_invalid() { - // Uncomment for inner logging - /* - TermLogger::init( - LevelFilter::Debug, - Config::default(), - TerminalMode::Mixed, - ColorChoice::Auto, - ) - .unwrap(); - */ + #[async_std::test] + async fn test_store() { + let mut settings = Settings::default(); + settings.localnet = false; + settings.external_addrs = vec![ + Url::parse("tcp://foo.bar:123").unwrap(), + Url::parse("tcp://lol.cat:321").unwrap(), + ]; - // Initialize ipv4_range and ipv6_range if needed - let mut ipv4_range: IpRange = - IP4_PRIV_RANGES.iter().map(|s| s.parse().unwrap()).collect(); - let mut ipv6_range: IpRange = - IP6_PRIV_RANGES.iter().map(|s| s.parse().unwrap()).collect(); + let hosts = Hosts::new(Arc::new(settings.clone())); + hosts.store(&settings.external_addrs).await; + assert!(hosts.is_empty().await); - // These will make the trie potentially smaller - ipv4_range.simplify(); - ipv6_range.simplify(); + let local_hosts = vec![ + Url::parse("tcp://localhost:3921").unwrap(), + Url::parse("tcp://127.0.0.1:23957").unwrap(), + Url::parse("tor://[::1]:21481").unwrap(), + Url::parse("tcp://192.168.10.65:311").unwrap(), + Url::parse("tcp+tls://0.0.0.0:2312").unwrap(), + Url::parse("tcp://255.255.255.255:2131").unwrap(), + ]; + hosts.store(&local_hosts).await; + for i in local_hosts { + assert!(!hosts.contains(&i).await); + } - // Create addresses to test - let valid = Url::parse("tls://facebook.com:13333").unwrap(); - let domainless = Url::parse("unix:/run/foo.socket").unwrap(); - let mut hostless = Url::parse("tls://185.60.216.35:13333").unwrap(); - hostless.set_host(None).unwrap(); - let onion = Url::parse( - "tor://facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd.onion:13333", - ) - .unwrap(); - let invalid_onion = - Url::parse("tor://facebookwemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd.onion:13333") - .unwrap(); - - // Create input addresses vector - let input_addrs = vec![valid.clone(), domainless, hostless, onion.clone(), invalid_onion]; - - // Create expected output addresses vector - let output_addrs = vec![valid, onion]; - let output_addrs: HashSet<&Url> = HashSet::from_iter(output_addrs.iter()); - - // Execute filtering for v4 addr - let filtered = filter_invalid(&ipv4_range, &ipv6_range, input_addrs); - let filtered: Vec = filtered.into_iter().map(|(k, _)| k).collect(); - let filtered: HashSet<&Url> = HashSet::from_iter(filtered.iter()); - // Validate filtered addresses - assert_eq!(output_addrs, filtered); - } - - #[test] - fn test_filter_non_resolving() { - // Uncomment for inner logging - /* - TermLogger::init( - LevelFilter::Debug, - Config::default(), - TerminalMode::Mixed, - ColorChoice::Auto, - ) - .unwrap(); - */ - - // Create addresses to test - let connection_url_v4 = Url::parse("tls://185.60.216.35:13333").unwrap(); - let connection_url_v6 = - Url::parse("tls://[2a03:2880:f12d:83:face:b00c:0:25de]:13333").unwrap(); - let fake_connection_url = Url::parse("tls://185.199.109.153:13333").unwrap(); - let resolving_url = Url::parse("tls://facebook.com:13333").unwrap(); - let random_url = Url::parse("tls://facebookkk.com:13333").unwrap(); - let onion = Url::parse( - "tor://facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd.onion:13333", - ) - .unwrap(); - - // Create input addresses hashmap, containing created addresses, excluding connection url - let mut input_addrs = HashMap::new(); - input_addrs.insert( - resolving_url.clone(), - vec![ - IpAddr::V4(Ipv4Addr::new(185, 60, 216, 35)), - "2a03:2880:f12d:83:face:b00c:0:25de".parse().unwrap(), - ], - ); - input_addrs.insert(random_url, vec![]); - input_addrs.insert(onion.clone(), vec![]); - - // Create expected output addresses hashset - let mut output_addrs = HashMap::new(); - output_addrs.insert( - resolving_url, - vec![ - IpAddr::V4(Ipv4Addr::new(185, 60, 216, 35)), - "2a03:2880:f12d:83:face:b00c:0:25de".parse().unwrap(), - ], - ); - output_addrs.insert(onion.clone(), vec![]); - // Convert hashmap to Vec = output_addrs.into_iter().map(|(k, _)| k).collect(); - let output_addrs: HashSet<&Url> = HashSet::from_iter(output_addrs.iter()); - - let mut fake_output_addrs: HashMap> = HashMap::new(); - // Onion addresses don't get filtered, as we can't resolve them - fake_output_addrs.insert(onion, vec![]); - let fake_output_addrs: Vec = fake_output_addrs.into_iter().map(|(k, _)| k).collect(); - let fake_output_addrs: HashSet<&Url> = HashSet::from_iter(fake_output_addrs.iter()); - - // Execute filtering for v4 addr - let filtered = filter_non_resolving(connection_url_v4, input_addrs.clone()); - let filtered = HashSet::from_iter(filtered.iter()); - // Validate filtered addresses - assert_eq!(output_addrs, filtered); - - // Execute filtering for v6 addr - let filtered = filter_non_resolving(connection_url_v6, input_addrs.clone()); - let filtered = HashSet::from_iter(filtered.iter()); - assert_eq!(output_addrs, filtered); - - // Execute filtering for fake addr - let filtered = filter_non_resolving(fake_connection_url, input_addrs); - let filtered = HashSet::from_iter(filtered.iter()); - assert_eq!(fake_output_addrs, filtered); - } - - #[test] - fn test_is_valid_onion() { - // Valid onion - assert!(is_valid_onion("facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd.onion"),); - // Valid onion without .onion suffix - assert!(is_valid_onion("facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd"),); - // Invalid onion - assert!(!is_valid_onion("facebook.com")); + let remote_hosts = vec![ + Url::parse("tcp://dark.fi:80").unwrap(), + Url::parse("tcp://http.cat:401").unwrap(), + Url::parse("tcp://foo.bar:111").unwrap(), + ]; + hosts.store(&remote_hosts).await; + assert!(hosts.contains(&remote_hosts[0]).await); + assert!(hosts.contains(&remote_hosts[1]).await); + assert!(!hosts.contains(&remote_hosts[2]).await); } } diff --git a/src/net/message.rs b/src/net/message.rs index ccb035a8f..1b0e831fb 100644 --- a/src/net/message.rs +++ b/src/net/message.rs @@ -26,157 +26,134 @@ use crate::{Error, Result}; const MAGIC_BYTES: [u8; 4] = [0xd9, 0xef, 0xb6, 0x7d]; /// Generic message template. -pub trait Message: 'static + Encodable + Decodable + Send + Sync { - fn name() -> &'static str; +pub trait Message: 'static + Send + Sync + Encodable + Decodable { + const NAME: &'static str; } -/// Outbound keep-alive message. -#[derive(SerialEncodable, SerialDecodable)] +#[macro_export] +macro_rules! impl_p2p_message { + ($st:ty, $nm:expr) => { + impl Message for $st { + const NAME: &'static str = $nm; + } + }; +} + +/// Outbound keepalive message. +#[derive(Debug, Copy, Clone, SerialEncodable, SerialDecodable)] pub struct PingMessage { - pub nonce: u32, + pub nonce: u16, } +impl_p2p_message!(PingMessage, "ping"); -/// Inbound keep-alive message. -#[derive(SerialEncodable, SerialDecodable)] +/// Inbound keepalive message. +#[derive(Debug, Copy, Clone, SerialEncodable, SerialDecodable)] pub struct PongMessage { - pub nonce: u32, + pub nonce: u16, } +impl_p2p_message!(PongMessage, "pong"); -/// Requests address of outbound connection. -#[derive(SerialEncodable, SerialDecodable)] -pub struct GetAddrsMessage {} +/// Requests address of outbound connecction. +#[derive(Debug, Copy, Clone, SerialEncodable, SerialDecodable)] +pub struct GetAddrsMessage { + /// Maximum number of addresses to receive + pub max: u32, +} +impl_p2p_message!(GetAddrsMessage, "getaddr"); -/// Sends address information to inbound connection. Response to GetAddrs -/// message. -#[derive(SerialEncodable, SerialDecodable)] +/// Sends address information to inbound connection. +/// Response to `GetAddrsMessage`. +#[derive(Debug, Clone, SerialEncodable, SerialDecodable)] pub struct AddrsMessage { pub addrs: Vec, } - -/// Sends external address information to inbound connection. -#[derive(SerialEncodable, SerialDecodable)] -pub struct ExtAddrsMessage { - pub ext_addrs: Vec, -} +impl_p2p_message!(AddrsMessage, "addr"); /// Requests version information of outbound connection. -#[derive(SerialEncodable, SerialDecodable)] +#[derive(Debug, Clone, SerialEncodable, SerialDecodable)] pub struct VersionMessage { + /// Only used for debugging. Compromises privacy when set. pub node_id: String, } +impl_p2p_message!(VersionMessage, "version"); -/// Sends version information to inbound connection. Response to VersionMessage. -#[derive(SerialEncodable, SerialDecodable)] +/// Sends version information to inbound connection. +/// Response to `VersionMessage`. +#[derive(Debug, Clone, SerialEncodable, SerialDecodable)] pub struct VerackMessage { - // app version - pub app: String, + /// App version + pub app_version: semver::Version, } +impl_p2p_message!(VerackMessage, "verack"); -impl Message for PingMessage { - fn name() -> &'static str { - "ping" - } -} - -impl Message for PongMessage { - fn name() -> &'static str { - "pong" - } -} - -impl Message for GetAddrsMessage { - fn name() -> &'static str { - "getaddr" - } -} - -impl Message for AddrsMessage { - fn name() -> &'static str { - "addr" - } -} - -impl Message for ExtAddrsMessage { - fn name() -> &'static str { - "extaddr" - } -} - -impl Message for VersionMessage { - fn name() -> &'static str { - "version" - } -} - -impl Message for VerackMessage { - fn name() -> &'static str { - "verack" - } -} - -/// Packets are the base type read from the network. Converted to messages and -/// passed to event loop. +/// Packets are the base type read from the network. +/// Converted to messages and passed to event loop. +#[derive(Debug, SerialEncodable, SerialDecodable)] pub struct Packet { pub command: String, pub payload: Vec, } -/// Reads and decodes an inbound payload. +/// Reads and decodes an inbound payload from the given async stream. +/// Returns decoded [`Packet`]. pub async fn read_packet(stream: &mut R) -> Result { - // Packets have a 4 byte header of magic digits - // This is used for network debugging + // Packets should have a 4 byte header of magic digits. + // This is used for network debugging. let mut magic = [0u8; 4]; - debug!(target: "net::message", "reading magic..."); - + debug!(target: "net::message", "Reading magic..."); stream.read_exact(&mut magic).await?; - debug!(target: "net::message", "read magic {:?}", magic); + debug!(target: "net::message", "Read magic {:?}", magic); if magic != MAGIC_BYTES { + debug!(target: "net::message", "Error: Magic bytes mismatch"); return Err(Error::MalformedPacket) } - // The type of the message + // The type of the message. let command_len = VarInt::decode_async(stream).await?.0 as usize; let mut cmd = vec![0u8; command_len]; - if command_len > 0 { - stream.read_exact(&mut cmd).await?; - } - let cmd = String::from_utf8(cmd)?; - debug!(target: "net::message", "read command: {}", cmd); - - let payload_len = VarInt::decode_async(stream).await?.0 as usize; + stream.read_exact(&mut cmd).await?; + let command = String::from_utf8(cmd)?; + debug!(target: "net::message", "Read command: {}", command); // The message-dependent data (see message types) + let payload_len = VarInt::decode_async(stream).await?.0 as usize; let mut payload = vec![0u8; payload_len]; - if payload_len > 0 { - stream.read_exact(&mut payload).await?; - } - debug!(target: "net::message", "read payload {} bytes", payload_len); + stream.read_exact(&mut payload).await?; + debug!(target: "net::message", "Read payload {} bytes", payload_len); - Ok(Packet { command: cmd, payload }) + Ok(Packet { command, payload }) } -/// Sends an outbound packet by writing data to TCP stream. +/// Sends an outbound packet by writing data to the given async stream. +/// Returns the total written bytes. pub async fn send_packet( stream: &mut W, packet: Packet, -) -> Result<()> { - debug!(target: "net::message", "sending magic..."); - stream.write_all(&MAGIC_BYTES).await?; - debug!(target: "net::message", "sent magic..."); - - VarInt(packet.command.len() as u64).encode_async(stream).await?; +) -> Result { assert!(!packet.command.is_empty()); - stream.write_all(packet.command.as_bytes()).await?; - debug!(target: "net::message", "sent command: {}", packet.command); - + assert!(!packet.payload.is_empty()); assert!(std::mem::size_of::() <= std::mem::size_of::()); - VarInt(packet.payload.len() as u64).encode_async(stream).await?; - if !packet.payload.is_empty() { - stream.write_all(&packet.payload).await?; - } - debug!(target: "net::message", "sent payload {} bytes", packet.payload.len() as u64); + let mut written: usize = 0; - Ok(()) + debug!(target: "net::message", "Sending magic..."); + stream.write_all(&MAGIC_BYTES).await?; + written += MAGIC_BYTES.len(); + debug!(target: "net::message", "Sent magic"); + + debug!(target: "net::message", "Sending command..."); + written += VarInt(packet.command.len() as u64).encode_async(stream).await?; + let cmd_ref = packet.command.as_bytes(); + stream.write_all(cmd_ref).await?; + written += cmd_ref.len(); + debug!(target: "net::message", "Sent command: {}", packet.command); + + debug!(target: "net::message", "Sending payload..."); + written += VarInt(packet.payload.len() as u64).encode_async(stream).await?; + stream.write_all(&packet.payload).await?; + written += packet.payload.len(); + debug!(target: "net::message", "Sent payload {} bytes", packet.payload.len() as u64); + + Ok(written) } diff --git a/src/net/message_subscriber.rs b/src/net/message_subscriber.rs index 58a8dda6b..a86c0fdca 100644 --- a/src/net/message_subscriber.rs +++ b/src/net/message_subscriber.rs @@ -16,23 +16,119 @@ * along with this program. If not, see . */ -use std::{any::Any, collections::HashMap, io::Cursor, sync::Arc}; +use std::{any::Any, collections::HashMap, io::Cursor}; -use async_std::sync::Mutex; +use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; +use futures::stream::{FuturesUnordered, StreamExt}; use log::{debug, warn}; -use rand::Rng; - -use crate::{Error, Result}; +use rand::{rngs::OsRng, Rng}; use super::message::Message; +use crate::{Error, Result}; -/// 64bit identifier for message subscription. +/// 64-bit identifier for message subscription. pub type MessageSubscriptionId = u64; type MessageResult = Result>; -/// Handles message subscriptions through a subscription ID and a receiver -/// channel. +/// A dispatcher that is unique to every [`Message`]. +/// Maintains a list of subscribers that are subscribed to that +/// unique Message type and handles sending messages across these +/// subscriptions. +#[derive(Debug)] +struct MessageDispatcher { + subs: Mutex>>>, +} + +impl MessageDispatcher { + /// Create a new message dispatcher + fn new() -> Self { + Self { subs: Mutex::new(HashMap::new()) } + } + + /// Create a random ID. + fn random_id() -> MessageSubscriptionId { + //let mut rng = rand::thread_rng(); + OsRng.gen() + } + + /// Subscribe to a channel. + /// Assigns a new ID and adds it to the list of subscribers. + pub async fn subscribe(self: Arc) -> MessageSubscription { + let (sender, recv_queue) = smol::channel::unbounded(); + // Guard against overwriting + let mut id = Self::random_id(); + let mut subs = self.subs.lock().await; + loop { + if subs.contains_key(&id) { + id = Self::random_id(); + continue + } + + subs.insert(id, sender); + break + } + + drop(subs); + MessageSubscription { id, recv_queue, parent: self } + } + + /// Unsubscribe from a channel. + /// Removes the associated ID from the subscriber list. + async fn unsubscribe(&self, sub_id: MessageSubscriptionId) { + self.subs.lock().await.remove(&sub_id); + } + + /// Private function to concurrently transmit a message to all subscriber channels. + /// Automatically clear all inactive channels. Strictly used internally. + async fn _trigger_all(&self, message: MessageResult) { + let mut subs = self.subs.lock().await; + + debug!( + target: "net::message_subscriber::_trigger_all()", "START msg={}({}), subs={}", + if message.is_ok() { "Ok" } else {"Err"}, + M::NAME, subs.len(), + ); + + let mut futures = FuturesUnordered::new(); + let mut garbage_ids = vec![]; + + // Prep the futures for concurrent execution + for (sub_id, sub) in &*subs { + let sub_id = *sub_id; + let sub = sub.clone(); + let message = message.clone(); + futures.push(async move { + match sub.send(message).await { + Ok(res) => Ok((sub_id, res)), + Err(err) => Err((sub_id, err)), + } + }); + } + + // Start polling + while let Some(r) = futures.next().await { + if let Err((sub_id, _err)) = r { + garbage_ids.push(sub_id); + } + } + + // Garbage cleanup + for sub_id in garbage_ids { + subs.remove(&sub_id); + } + + debug!( + target: "net::message_subscriber::_trigger_all()", "END msg={}({}), subs={}", + if message.is_ok() { "Ok" } else { "Err" }, + M::NAME, subs.len(), + ); + } +} + +/// Handles message subscriptions through a subscription ID and +/// a receiver channel. +#[derive(Debug)] pub struct MessageSubscription { id: MessageSubscriptionId, recv_queue: smol::channel::Receiver>, @@ -44,141 +140,65 @@ impl MessageSubscription { pub async fn receive(&self) -> MessageResult { match self.recv_queue.recv().await { Ok(message) => message, - Err(err) => { - panic!("MessageSubscription::receive() recv_queue failed! {}", err); - } + Err(e) => panic!("MessageSubscription::receive(): recv_queue failed! {}", e), } } /// Unsubscribe from a message subscription. Must be called manually. pub async fn unsubscribe(&self) { - self.parent.clone().unsubscribe(self.id).await + self.parent.unsubscribe(self.id).await } } +/// Generic interface for the message dispatcher. #[async_trait] -/// Generic interface for message dispatcher. trait MessageDispatcherInterface: Send + Sync { - async fn trigger(&self, payload: Vec); + async fn trigger(&self, payload: &[u8]); async fn trigger_error(&self, err: Error); fn as_any(self: Arc) -> Arc; } -/// A dispatchers that is unique to every Message. Maintains a list of subscribers that are subscribed to that unique Message type and handles sending messages across these subscriptions. -struct MessageDispatcher { - subs: Mutex>>>, -} - -impl MessageDispatcher { - /// Create a new message dispatcher. - fn new() -> Self { - MessageDispatcher { subs: Mutex::new(HashMap::new()) } - } - - /// Create a random ID. - fn random_id() -> MessageSubscriptionId { - let mut rng = rand::thread_rng(); - rng.gen() - } - - /// Subscribe to a channel. Assigns a new ID and adds it to the list of - /// subscribers. - pub async fn subscribe(self: Arc) -> MessageSubscription { - let (sender, recvr) = smol::channel::unbounded(); - let sub_id = Self::random_id(); - self.subs.lock().await.insert(sub_id, sender); - - MessageSubscription { id: sub_id, recv_queue: recvr, parent: self } - } - - /// Unsubcribe from a channel. Removes the associated ID from the subscriber - /// list. - async fn unsubscribe(&self, sub_id: MessageSubscriptionId) { - self.subs.lock().await.remove(&sub_id); - } - - /// Private function to transmit a message to all subscriber channels. Automatically clear inactive - /// channels. Used strictly internally. - async fn _trigger_all(&self, message: MessageResult) { - debug!( - target: "net::message_subscriber::_trigger_all()", - "START, message={}({}), subs={}", - if message.is_ok() { "Ok" } else { "Err" }, - M::name(), - self.subs.lock().await.len() - ); - let mut garbage_ids = Vec::new(); - - for (sub_id, sub) in &*self.subs.lock().await { - match sub.send(message.clone()).await { - Ok(()) => {} - Err(_err) => { - // Automatically clean out closed channels - garbage_ids.push(*sub_id); - // panic!("Error returned sending message in notify() call! - // {}", err); - } - } - } - - self.collect_garbage(garbage_ids).await; - - debug!( - target: "net::message_subscriber::_trigger_all()", - "END, msg={}({}), subs={}", - if message.is_ok() { "Ok" } else { "Err" }, - M::name(), - self.subs.lock().await.len() - ); - } - - /// Remove inactive channels. - async fn collect_garbage(&self, ids: Vec) { - let mut subs = self.subs.lock().await; - for id in &ids { - subs.remove(id); - } - } -} - +/// Local implementation of the Message Dispatcher Interface #[async_trait] -// Local implementation of the Message Dispatcher Interface. impl MessageDispatcherInterface for MessageDispatcher { - /// Internal function to deserialize data into a message type and dispatch it across subscriber channels. - async fn trigger(&self, payload: Vec) { - // deserialize data into type - // send down the pipes + /// Internal function to deserialize data into a message type + /// and dispatch it across subscriber channels. + async fn trigger(&self, payload: &[u8]) { + // Deserialize data into type, send down the pipes. let cursor = Cursor::new(payload); match M::decode(cursor) { Ok(message) => { let message = Ok(Arc::new(message)); self._trigger_all(message).await } + Err(err) => { debug!( target: "net::message_subscriber::trigger()", "Unable to decode data. Dropping...: {}", - err + err, ); } } } - /// Interal function that sends a Error message to all subscriber channels. + /// Internal function that sends an error message to all subscriber channels. async fn trigger_error(&self, err: Error) { self._trigger_all(Err(err)).await; } - /// Converts to Any trait. Enables the dynamic modification of static types. + /// Converts to `Any` trait. Enables the dynamic modification of static types. fn as_any(self: Arc) -> Arc { self } } -/// Generic publish/subscribe class that maintains a list of dispatchers. Dispatchers transmit -/// messages to subscribers and are specific to one message type. +/// Generic publish/subscribe class that maintains a list of dispatchers. +/// Dispatchers transmit messages to subscribers and are specific to one +/// message type. +#[derive(Default)] pub struct MessageSubsystem { dispatchers: Mutex>>, } @@ -186,18 +206,19 @@ pub struct MessageSubsystem { impl MessageSubsystem { /// Create a new message subsystem. pub fn new() -> Self { - MessageSubsystem { dispatchers: Mutex::new(HashMap::new()) } + Self { dispatchers: Mutex::new(HashMap::new()) } } - /// Add a new dispatcher for specified Message. + /// Add a new dispatcher for specified [`Message`]. pub async fn add_dispatch(&self) { - self.dispatchers.lock().await.insert(M::name(), Arc::new(MessageDispatcher::::new())); + self.dispatchers.lock().await.insert(M::NAME, Arc::new(MessageDispatcher::::new())); } - /// Subscribes to a Message. Using the Message name, the method returns an the associated MessageDispatcher from the list of - /// dispatchers and calls subscribe(). + /// Subscribes to a [`Message`]. Using the Message name, the method + /// returns the associated [`MessageDispatcher`] from the list of + /// dispatchers and calls `subscribe()`. pub async fn subscribe(&self) -> Result> { - let dispatcher = self.dispatchers.lock().await.get(M::name()).cloned(); + let dispatcher = self.dispatchers.lock().await.get(M::NAME).cloned(); let sub = match dispatcher { Some(dispatcher) => { @@ -208,9 +229,9 @@ impl MessageSubsystem { dispatcher.subscribe().await } + None => { - // normall return failure here - // for now panic + // Normal return failure here return Err(Error::NetworkOperationFailed) } }; @@ -218,102 +239,72 @@ impl MessageSubsystem { Ok(sub) } - /// Transmits a payload to a dispatcher. Returns an error if the payload - /// fails to transmit. - pub async fn notify(&self, command: &str, payload: Vec) { - let dispatcher = self.dispatchers.lock().await.get(command).cloned(); + /// Transmits a payload to a dispatcher. + /// Returns an error if the payload fails to transmit. + pub async fn notify(&self, command: &str, payload: &[u8]) { + let Some(dispatcher) = self.dispatchers.lock().await.get(command).cloned() else { + warn!( + target: "net::message_subscriber::notify", + "message_subscriber::notify: Command '{}' did not find a dispatcher", + command, + ); + return + }; - match dispatcher { - Some(dispatcher) => { - dispatcher.trigger(payload).await; - } - None => { - warn!( - target: "net::message_subscriber::notify()", - "Command '{}' did not find a dispatcher", - command - ); - } - } + dispatcher.trigger(payload).await; } - /// Transmits an error message across dispatchers. + /// Concurrently transmits an error message across dispatchers. pub async fn trigger_error(&self, err: Error) { - // TODO: this could be parallelized - for dispatcher in self.dispatchers.lock().await.values() { - dispatcher.trigger_error(err.clone()).await; + let mut futures = FuturesUnordered::new(); + + let dispatchers = self.dispatchers.lock().await; + + for dispatcher in dispatchers.values() { + let dispatcher = dispatcher.clone(); + let error = err.clone(); + futures.push(async move { dispatcher.trigger_error(error).await }); } + + drop(dispatchers); + + while let Some(_r) = futures.next().await {} } } -impl Default for MessageSubsystem { - fn default() -> Self { - Self::new() - } -} - -/// Test functions for message subsystem. -// This is a test function for the message subsystem code above -// Normall we would use the #[test] macro but cannot since it is async code -// Instead we call it using smol::block_on() in the unit test code after this -// func #[cfg(test)] mod tests { use super::*; - use darkfi_serial::{Decodable, Encodable}; - use std::io; + use darkfi_serial::{serialize, SerialDecodable, SerialEncodable}; #[async_std::test] async fn message_subscriber_test() { - struct MyVersionMessage { - x: u32, - } - - impl Message for MyVersionMessage { - fn name() -> &'static str { - "verver" - } - } - - impl Encodable for MyVersionMessage { - fn encode(&self, mut s: S) -> core::result::Result { - let mut len = 0; - len += self.x.encode(&mut s)?; - Ok(len) - } - } - - impl Decodable for MyVersionMessage { - fn decode(mut d: D) -> core::result::Result { - Ok(Self { x: Decodable::decode(&mut d)? }) - } - } - println!("hello"); + #[derive(SerialEncodable, SerialDecodable)] + struct MyVersionMessage(pub u32); + crate::impl_p2p_message!(MyVersionMessage, "verver"); let subsystem = MessageSubsystem::new(); subsystem.add_dispatch::().await; - // subscribe - // 1. get dispatcher - // 2. cast to specific type - // 3. do sub, return sub + // Subscribe: + // 1. Get dispatcher + // 2. Cast to specific type + // 3. Do sub, return sub let sub = subsystem.subscribe::().await.unwrap(); - let msg = MyVersionMessage { x: 110 }; - let mut payload = Vec::new(); - msg.encode(&mut payload).unwrap(); + // Receive message and publish: + // 1. Based on string, lookup relevant dispatcher interface + // 2. Publish data there + let msg = MyVersionMessage(110); + let payload = serialize(&msg); + subsystem.notify("verver", &payload).await; - // receive message and publish - // 1. based on string, lookup relevant dispatcher interface - // 2. publish data there - subsystem.notify("verver", payload).await; - - // receive - // 1. do a get easy + // Receive: + // 1. Do a get easy let msg2 = sub.receive().await.unwrap(); - assert_eq!(msg2.x, 110); - println!("{}", msg2.x); + assert_eq!(msg.0, msg2.0); + // Trigger an error subsystem.trigger_error(Error::ChannelStopped).await; let msg2 = sub.receive().await; diff --git a/src/net/mod.rs b/src/net/mod.rs index 7004425d5..5fa827042 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -16,30 +16,17 @@ * along with this program. If not, see . */ -/// Acceptor class handles the acceptance of inbound socket connections. It's -/// used to start listening on a local socket address, to accept incoming -/// connections and to handle network errors. -pub mod acceptor; - -/// Async channel that handles the sending of messages across the network. -/// Public interface is used to create new channels, to stop and start -/// a channel, and to send messages. +/// Defines how to decode generic messages as well as implementing the +/// common network messages that are sent between nodes as described +/// by the [`protocol`] submodule. /// -/// Implements message functionality and the message subscriber subsystem. -pub mod channel; +/// Implements a type called [`Packet`] which is the base message type. +/// Packets are converted into messages and passed to an event loop. +pub mod message; +pub use message::Message; -/// Handles the creation of outbound connections. Used to establish an outbound -/// connection. -pub mod connector; - -/// Hosts are a list of network addresses used when establishing an outbound -/// connection. Hosts are shared across the network through the address -/// protocol. When attempting to connect, a node will loop through addresses in -/// the host store until it finds ones to connect to. -pub mod hosts; - -/// Generic publish/subscribe class that can dispatch any kind of message to a -/// subscribed list of dispatchers. Dispatchers subscribe to a single +/// Generic publish/subscribe class that can dispatch any kind of message +/// to a subscribed list of dispatchers. Dispatchers subscribe to a single /// message format of any type. This is a generalized version of the simple /// publish-subscribe class in system::Subscriber. /// @@ -48,77 +35,85 @@ pub mod hosts; /// /// Message Subsystem maintains a list of dispatchers, which is a generalized /// version of a subscriber. Pub-sub is called on dispatchers through the -/// functions 'subscribe' and 'notify'. Whereas system::Subscriber only allows +/// functions `subscribe` and `notify`. Whereas system::Subscriber only allows /// messages of a single type, dispatchers can handle any kind of message. This /// generic message is called a payload and is processed and decoded by the /// Message Dispatcher. /// -/// The Message Dispatcher is a class of subscribers that implements a -/// generic trait called Message Dispatcher Interface, which allows us to -/// process any kind of payload as a message. +/// The Message Dispatcher is a class of subscribers that implement a generic +/// trait called Message Dispatcher Interface, which allows us to process any +/// kind of payload as a message. pub mod message_subscriber; +pub use message_subscriber::MessageSubscription; -/// Defines how to decode generic messages as well as implementing the common -/// network messages that are sent between nodes as described by the Protocol -/// submodule. -/// -/// Implements a type called Packet which is the base message type. Packets are -/// converted into messages and passed to an event loop. -pub mod message; - -/// P2P provides all core functionality to interact with the peer-to-peer -/// network. -/// -/// Used to create a network, to start and run it, to broadcast messages across -/// all channels, and to manage the channel store. -/// -/// The channel store is a hashmap of channel address that we can use to add and -/// remove channels or check whether a channel is already is in the store. -pub mod p2p; - -/// Defines the networking protocol used at each stage in a connection. Consists -/// of a series of messages that are sent across the network at the different -/// connection stages. -/// -/// When a node connects to a network for the first time, it must follow a seed -/// protocol, which provides it with a list of network hosts to connect to. To -/// establish a connection to another node, nodes must send version and version -/// acknowledgement messages. During a connection, nodes continually get address -/// and get-address messages to inform eachother about what nodes are on the -/// network. Nodes also send out a ping and pong message which keeps the network -/// from shutting down. -/// -/// Protocol submodule also implements a jobs manager than handles the -/// asynchronous execution of the protocols. -pub mod protocol; - -/// Defines the interaction between nodes during a connection. Consists of an -/// inbound session, which describes how to set up an incoming connection, and -/// an outbound session, which describes setting up an outbound connection. Also -/// describes the seed session, which is the type of connection used when a node -/// connects to the network for the first time. Implements the session trait -/// which describes the common functions across all sessions. -pub mod session; - -/// Network configuration settings. -pub mod settings; - -/// Network transport implementations. +/// Network transports, holds implementations of pluggable transports. +/// Exposes agnostic dialers and agnostic listeners. pub mod transport; -/// Network constants for various validations. -pub mod constants; +/// Hosts are a list of network addresses used when establishing outbound +/// connections. Hosts are shared across the network through the address +/// protocol. When attempting to connect, a node will loop through addresses +/// in the hosts store until it finds ones to connect to. +pub mod hosts; -pub use acceptor::{Acceptor, AcceptorPtr}; -pub use channel::{Channel, ChannelPtr}; -pub use connector::Connector; -pub use hosts::{Hosts, HostsPtr}; -pub use message::Message; -pub use message_subscriber::MessageSubscription; +/// Async channel that handles the sending of messages across the network. +/// Public interface is used to create new channels, to stop and start a +/// channel, and to send messages. +pub mod channel; +pub use channel::ChannelPtr; + +/// P2P provides all core functionality to interact with the P2P network. +/// +/// Used to create a network, to start and run it, to broadcast messages +/// across all channels, and to manage the channel store. +/// +/// The channel store is a hashmap of channel addresses that we can use +/// to add and remove channels or check whether a channel is already in +/// the store. +#[macro_use] +pub mod p2p; pub use p2p::{P2p, P2pPtr}; -pub use protocol::{ProtocolBase, ProtocolBasePtr, ProtocolJobsManager, ProtocolJobsManagerPtr}; -pub use session::{ - Session, SessionBitflag, SessionWeakPtr, SESSION_ALL, SESSION_INBOUND, SESSION_MANUAL, - SESSION_OUTBOUND, SESSION_SEED, + +/// Defines the networking protocol used at each stage in a connection. +/// Consists of a series of messages that are sent across the network at +/// the different connection stages. +/// +/// When a node connects to a network for the first time, it must follow +/// a seed protocol, which provides it with a list of network hosts to +/// connect to. To establish a connection to another node, nodes must send +/// version and version acknowledgement messages. During a connection, nodes +/// continually get address and get-address messages to inform each other +/// about what nodes are on the network. Nodes also send out a ping and pong +/// message which keeps the network from shutting down. +/// +/// Protocol submodule also implements a jobs manager that handles the +/// asynchronous execution of the protocols. +pub mod protocol; +pub use protocol::{ + protocol_base::{ProtocolBase, ProtocolBasePtr}, + protocol_jobs_manager::{ProtocolJobsManager, ProtocolJobsManagerPtr}, }; -pub use settings::{Settings, SettingsPtr}; + +/// Defines the interaction between nodes during a connection. +/// Consists of an inbound session, which describes how to set up an +/// incoming connection, and an outbound session, which describes setting +/// up an outbound connection. Also describes the sesd session, which is +/// the type of connection used when a node connects to the network for +/// the first time. Implements the [`Session`] trait which describes the +/// common functions across all sessions. +pub mod session; +pub use session::SESSION_ALL; + +/// Handles the acceptance of inbound socket connections. +/// Used to start listening on a local socket, to accept incoming connections, +/// and to handle network errors. +pub mod acceptor; + +/// Handles the creation of outbound connections. +/// Used to establish an outbound connection. +pub mod connector; + +/// Network configuration settings. This holds the configured P2P instance +/// behaviour and is controlled by clients of this API. +pub mod settings; +pub use settings::Settings; diff --git a/src/net/p2p.rs b/src/net/p2p.rs index c1e27c13f..b6dd49b6b 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -16,52 +16,58 @@ * along with this program. If not, see . */ -use std::{ - collections::{HashMap, HashSet}, - fmt, -}; +use std::collections::{HashMap, HashSet}; -use async_std::sync::{Arc, Mutex}; -use futures::{select, stream::FuturesUnordered, try_join, FutureExt, StreamExt, TryFutureExt}; -use log::{debug, error, warn}; -use rand::Rng; +use async_std::{ + stream::StreamExt, + sync::{Arc, Mutex}, +}; +use futures::{stream::FuturesUnordered, TryFutureExt}; +use log::{debug, error, info, warn}; +use rand::{prelude::IteratorRandom, rngs::OsRng}; use serde_json::json; use smol::Executor; use url::Url; +use super::{ + channel::ChannelPtr, + hosts::{Hosts, HostsPtr}, + message::Message, + protocol::{protocol_registry::ProtocolRegistry, register_default_protocols}, + session::{ + InboundSession, InboundSessionPtr, ManualSession, ManualSessionPtr, OutboundSession, + OutboundSessionPtr, SeedSyncSession, Session, + }, + settings::{Settings, SettingsPtr}, +}; use crate::{ system::{Subscriber, SubscriberPtr, Subscription}, - util::async_util::sleep, Result, }; -use super::{ - message::Message, - protocol::{register_default_protocols, ProtocolRegistry}, - session::{InboundSession, ManualSession, OutboundSession, SeedSyncSession, Session}, - Channel, ChannelPtr, Hosts, HostsPtr, Settings, SettingsPtr, -}; - -/// List of channels that are awaiting connection. +/// Set of channels that are awaiting connection pub type PendingChannels = Mutex>; -/// List of connected channels. -pub type ConnectedChannels = Mutex>>; -/// Atomic pointer to p2p interface. +/// Set of connected channels +pub type ConnectedChannels = Mutex>; +/// Atomic pointer to the p2p interface pub type P2pPtr = Arc; +/// Representations of the p2p state enum P2pState { - // The p2p object has been created but not yet started. + /// The P2P object has been created but not yet started Open, - // We are performing the initial seed session + /// We are performing the initial seed session Start, - // Seed session finished, but not yet running + /// Seed session finished, but not yet running Started, - // p2p is running and the network is active. + /// P2P is running and the network is active Run, + /// The P2P network has been stopped + Stopped, } -impl fmt::Display for P2pState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl std::fmt::Display for P2pState { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{}", @@ -70,43 +76,54 @@ impl fmt::Display for P2pState { Self::Start => "start", Self::Started => "started", Self::Run => "run", + Self::Stopped => "stopped", } ) } } -/// Top level peer-to-peer networking interface. +/// Toplevel peer-to-peer networking interface pub struct P2p { + /// Channels pending connection pending: PendingChannels, + /// Connected channels channels: ConnectedChannels, + /// Subscriber for notifications of new channels channel_subscriber: SubscriberPtr>, - // Used both internally and externally + /// Subscriber for stop notifications stop_subscriber: SubscriberPtr<()>, + /// Known hosts (peers) hosts: HostsPtr, + /// Protocol registry protocol_registry: ProtocolRegistry, + /// The state of the interface + state: Mutex, + /// P2P network settings + settings: SettingsPtr, + /// Boolean lock marking if peer discovery is active + pub peer_discovery_running: Mutex, - // We keep a reference to the sessions used for get info + /// Reference to configured [`ManualSession`] session_manual: Mutex>>, + /// Reference to configured [`InboundSession`] session_inbound: Mutex>>, + /// Reference to configured [`OutboundSession`] session_outbound: Mutex>>, - state: Mutex, - - settings: SettingsPtr, - - /// Flag to check if on discovery mode - discovery: Mutex, + /// Enable network debugging + pub dnet_enabled: Mutex, } impl P2p { /// Initialize a new p2p network. /// - /// Initializes all sessions and protocols. Adds the protocols to the protocol registry, along - /// with a bitflag session selector that includes or excludes sessions from seed, version, and - /// address protocols. + /// Initializes all sessions and protocols. Adds the protocols to the protocol + /// registry, along with a bitflag session selector that includes or excludes + /// sessions from seed, version, and address protocols. /// - /// Creates a weak pointer to self that is used by all sessions to access the p2p parent class. - pub async fn new(settings: Settings) -> Arc { + /// Creates a weak pointer to self that is used by all sessions to access the + /// p2p parent class. + pub async fn new(settings: Settings) -> P2pPtr { let settings = Arc::new(settings); let self_ = Arc::new(Self { @@ -114,20 +131,23 @@ impl P2p { channels: Mutex::new(HashMap::new()), channel_subscriber: Subscriber::new(), stop_subscriber: Subscriber::new(), - hosts: Hosts::new(settings.localnet), + hosts: Hosts::new(settings.clone()), protocol_registry: ProtocolRegistry::new(), + state: Mutex::new(P2pState::Open), + settings, + peer_discovery_running: Mutex::new(false), + session_manual: Mutex::new(None), session_inbound: Mutex::new(None), session_outbound: Mutex::new(None), - state: Mutex::new(P2pState::Open), - settings, - discovery: Mutex::new(false), + + dnet_enabled: Mutex::new(false), }); let parent = Arc::downgrade(&self_); *self_.session_manual.lock().await = Some(ManualSession::new(parent.clone())); - *self_.session_inbound.lock().await = Some(InboundSession::new(parent.clone()).await); + *self_.session_inbound.lock().await = Some(InboundSession::new(parent.clone())); *self_.session_outbound.lock().await = Some(OutboundSession::new(parent)); register_default_protocols(self_.clone()).await; @@ -135,375 +155,213 @@ impl P2p { self_ } - // ANCHOR: get_info pub async fn get_info(&self) -> serde_json::Value { - // Building ext_addr_vec string - let mut ext_addr_vec = vec![]; - for ext_addr in &self.settings.external_addr { - ext_addr_vec.push(ext_addr.as_ref().to_string()); + let mut ext = vec![]; + for addr in &self.settings.external_addrs { + ext.push(addr.to_string()); } json!({ - "external_addr": format!("{:?}", ext_addr_vec), - "session_manual": self.session_manual().await.get_info().await, + "external_addrs": format!("{:?}", ext), + //"session_manual": self.session_manual().await.get_info().await, "session_inbound": self.session_inbound().await.get_info().await, "session_outbound": self.session_outbound().await.get_info().await, "state": self.state.lock().await.to_string(), }) } - // ANCHOR_END: get_info /// Invoke startup and seeding sequence. Call from constructing thread. - // ANCHOR: start - pub async fn start(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::p2p::start()", "P2p::start() [BEGIN]"); - + pub async fn start(self: Arc, ex: Arc>) -> Result<()> { + debug!(target: "net::p2p::start()", "P2P::start() [BEGIN]"); + info!(target: "net::p2p::start()", "[P2P] Seeding P2P subsystem"); *self.state.lock().await = P2pState::Start; // Start seed session let seed = SeedSyncSession::new(Arc::downgrade(&self)); // This will block until all seed queries have finished - seed.start(executor.clone()).await?; + seed.start(ex.clone()).await?; *self.state.lock().await = P2pState::Started; - debug!(target: "net::p2p::start()", "P2p::start() [END]"); + debug!(target: "net::p2p::start()", "P2P::start() [END]"); Ok(()) } - // ANCHOR_END: start - pub async fn session_manual(&self) -> Arc { - self.session_manual.lock().await.as_ref().unwrap().clone() - } - pub async fn session_inbound(&self) -> Arc { - self.session_inbound.lock().await.as_ref().unwrap().clone() - } - pub async fn session_outbound(&self) -> Arc { - self.session_outbound.lock().await.as_ref().unwrap().clone() - } - - /// Runs the network. Starts inbound, outbound and manual sessions. + /// Runs the network. Starts inbound, outbound, and manual sessions. /// Waits for a stop signal and stops the network if received. - // ANCHOR: run - pub async fn run(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::p2p::run()", "P2p::run() [BEGIN]"); - + pub async fn run(self: Arc, ex: Arc>) -> Result<()> { + debug!(target: "net::p2p::run()", "P2P::run() [BEGIN]"); + info!(target: "net::p2p::run()", "[P2P] Running P2P subsystem"); *self.state.lock().await = P2pState::Run; + // First attempt any set manual connections let manual = self.session_manual().await; for peer in &self.settings.peers { - manual.clone().connect(peer, executor.clone()).await; + manual.clone().connect(peer.clone(), ex.clone()).await; } + // Start the inbound session let inbound = self.session_inbound().await; - inbound.clone().start(executor.clone()).await?; + inbound.clone().start(ex.clone()).await?; + // Start the outbound session let outbound = self.session_outbound().await; - outbound.clone().start(executor.clone()).await?; + outbound.clone().start(ex.clone()).await?; + + info!(target: "net::p2p::run()", "[P2P] P2P subsystem started"); - let stop_sub = self.subscribe_stop().await; // Wait for stop signal + let stop_sub = self.subscribe_stop().await; stop_sub.receive().await; + info!(target: "net::p2p::run()", "[P2P] Received P2P subsystem stop signal. Shutting down."); + // Stop the sessions manual.stop().await; inbound.stop().await; outbound.stop().await; - debug!(target: "net::p2p::run()", "P2p::run() [END]"); + *self.state.lock().await = P2pState::Stopped; + + debug!(target: "net::p2p::run()", "P2P::run() [END]"); Ok(()) } - // ANCHOR_END: run - - /// Wait for outbound connections to be established. - pub async fn wait_for_outbound(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::p2p::wait_for_outbound()", "P2p::wait_for_outbound() [BEGIN]"); - // To verify that the network needs initialization, we check if we have seeds or peers configured, - // and have configured outbound slots. - if !(self.settings.seeds.is_empty() && self.settings.peers.is_empty()) && - self.settings.outbound_connections > 0 - { - debug!(target: "net::p2p::wait_for_outbound()", "P2p::wait_for_outbound(): seeds are configured, waiting for outbound initialization..."); - // Retrieve P2P network settings; - let settings = self.settings(); - - // Retrieve our own inbound addresses - let self_inbound_addr = &settings.external_addr; - - // Retrieve timeout config - let timeout = settings.connect_timeout_seconds as u64; - - // Retrieve outbound addresses to connect to (including manual peers) - let peers = &settings.peers; - let outbound = &self.hosts().load_all().await; - - // Enable manual channel subscriber notifications - self.session_manual().await.clone().enable_notify().await; - - // Retrieve manual channel subscriber ptr - let manual_sub = - self.session_manual.lock().await.as_ref().unwrap().subscribe_channel().await; - - // Enable outbound channel subscriber notifications - self.session_outbound().await.clone().enable_notify().await; - - // Retrieve outbound channel subscriber ptr - let outbound_sub = - self.session_outbound.lock().await.as_ref().unwrap().subscribe_channel().await; - - // Create tasks for peers and outbound - let peers_task = Self::outbound_addr_loop( - self_inbound_addr, - timeout, - self.subscribe_stop().await, - peers, - manual_sub, - executor.clone(), - ); - let outbound_task = Self::outbound_addr_loop( - self_inbound_addr, - timeout, - self.subscribe_stop().await, - outbound, - outbound_sub, - executor, - ); - // Wait for both tasks completion - try_join!(peers_task, outbound_task)?; - - // Disable manual channel subscriber notifications - self.session_manual().await.disable_notify().await; - - // Disable outbound channel subscriber notifications - self.session_outbound().await.disable_notify().await; - } - - debug!(target: "net::p2p::wait_for_outbound()", "P2p::wait_for_outbound() [END]"); - Ok(()) - } - - // Wait for the process for each of the provided addresses, excluding our own inbound addresses - async fn outbound_addr_loop( - self_inbound_addr: &[Url], - timeout: u64, - stop_sub: Subscription<()>, - addrs: &Vec, - subscriber: Subscription>, - executor: Arc>, - ) -> Result<()> { - // Process addresses - for addr in addrs { - if self_inbound_addr.contains(addr) { - continue - } - - // Wait for address to be processed. - // We use a timeout to eliminate the following cases: - // 1. Network timeout - // 2. Thread reaching the receiver after peer has signal it - let (timeout_s, timeout_r) = smol::channel::unbounded::<()>(); - executor - .spawn(async move { - sleep(timeout).await; - timeout_s.send(()).await.unwrap_or(()); - }) - .detach(); - - select! { - msg = subscriber.receive().fuse() => { - if let Err(e) = msg { - warn!( - target: "net::p2p::outbound_addr_loop()", - "P2p::wait_for_outbound(): Outbound connection failed [{}]: {}", - addr, e - ); - } - }, - _ = stop_sub.receive().fuse() => debug!(target: "net::p2p::outbound_addr_loop()", "P2p::wait_for_outbound(): stop signal received!"), - _ = timeout_r.recv().fuse() => { - warn!(target: "net::p2p::outbound_addr_loop()", "P2p::wait_for_outbound(): Timeout on outbound connection: {}", addr); - continue - }, - } - } - - Ok(()) - } - - // ANCHOR: stop - pub async fn stop(&self) { - self.stop_subscriber.notify(()).await - } - // ANCHOR_END: stop - - /// Broadcasts a message concurrently across all channels. - // ANCHOR: broadcast - pub async fn broadcast(&self, message: M) -> Result<()> { - let chans = self.channels.lock().await; - let iter = chans.values(); - let mut futures = FuturesUnordered::new(); - - for channel in iter { - futures.push(channel.send(message.clone()).map_err(|e| { - format!( - "P2P::broadcast: Broadcasting message to {} failed: {}", - channel.address(), - e - ) - })); - } - - if futures.is_empty() { - error!(target: "net::p2p::broadcast()", "P2P::broadcast: No connected channels found"); - return Ok(()) - } - - while let Some(entry) = futures.next().await { - if let Err(e) = entry { - error!(target: "net::p2p::broadcast()", "{}", e); - } - } - - Ok(()) - } - // ANCHOR_END: broadcast - - /// Broadcasts a message concurrently across all channels. - /// Excludes channels provided in `exclude_list`. - pub async fn broadcast_with_exclude( - &self, - message: M, - exclude_list: &[Url], - ) -> Result<()> { - let chans = self.channels.lock().await; - let iter = chans.values(); - let mut futures = FuturesUnordered::new(); - - for channel in iter { - if !exclude_list.contains(&channel.address()) { - futures.push(channel.send(message.clone()).map_err(|e| { - format!( - "P2P::broadcast_with_exclude: Broadcasting message to {} failed: {}", - channel.address(), - e - ) - })); - } - } - - if futures.is_empty() { - error!(target: "net::p2p::broadcast_with_exclude()", "P2P::broadcast_with_exclude: No connected channels found"); - return Ok(()) - } - - while let Some(entry) = futures.next().await { - if let Err(e) = entry { - error!(target: "net::p2p::broadcast_with_exclude()", "{}", e); - } - } - - Ok(()) - } - - /// Add channel address to the list of connected channels. - pub async fn store(&self, channel: ChannelPtr) { - self.channels.lock().await.insert(channel.address(), channel.clone()); - self.channel_subscriber.notify(Ok(channel)).await; - } - - /// Remove a channel from the list of connected channels. - pub async fn remove(&self, channel: ChannelPtr) { - self.channels.lock().await.remove(&channel.address()); - } - - /// Check whether a channel is stored in the list of connected channels. - /// If key is not contained, we also check if we are connected with a different transport. - pub async fn exists(&self, addr: &Url) -> Result { - let channels = self.channels.lock().await; - if channels.contains_key(addr) { - return Ok(true) - } - - let mut addr = addr.clone(); - for transport in &self.settings.outbound_transports { - addr.set_scheme(&transport.to_scheme())?; - if channels.contains_key(&addr) { - return Ok(true) - } - } - - Ok(false) - } - - /// Add a channel to the list of pending channels. - pub async fn add_pending(&self, addr: Url) -> bool { - self.pending.lock().await.insert(addr) - } - - /// Remove a channel from the list of pending channels. - pub async fn remove_pending(&self, addr: &Url) { - self.pending.lock().await.remove(addr); - } - - /// Return the number of connected channels. - pub async fn connections_count(&self) -> usize { - self.channels.lock().await.len() - } - - /// Return an atomic pointer to the default network settings. - pub fn settings(&self) -> SettingsPtr { - self.settings.clone() - } - - /// Return an atomic pointer to the list of hosts. - pub fn hosts(&self) -> HostsPtr { - self.hosts.clone() - } - - pub fn protocol_registry(&self) -> &ProtocolRegistry { - &self.protocol_registry - } - - /// Subscribe to a channel. - pub async fn subscribe_channel(&self) -> Subscription> { - self.channel_subscriber.clone().subscribe().await - } /// Subscribe to a stop signal. pub async fn subscribe_stop(&self) -> Subscription<()> { self.stop_subscriber.clone().subscribe().await } - /// Retrieve channels + /// Stop the running P2P subsystem + pub async fn stop(&self) { + self.stop_subscriber.notify(()).await + } + + /// Add a channel to the set of connected channels + pub async fn store(&self, channel: ChannelPtr) { + // TODO: Check the code path for this, and potentially also insert the remote + // into the hosts list? + self.channels.lock().await.insert(channel.address().clone(), channel.clone()); + self.channel_subscriber.notify(Ok(channel)).await; + } + + /// Broadcasts a message concurrently across all active channels. + pub async fn broadcast(&self, message: &M) { + self.broadcast_with_exclude(message, &[]).await + } + + /// Broadcasts a message concurrently across active channels, excluding + /// the ones provided in `exclude_list`. + pub async fn broadcast_with_exclude(&self, message: &M, exclude_list: &[Url]) { + let chans = self.channels.lock().await; + let iter = chans.values(); + let mut futures = FuturesUnordered::new(); + + for channel in iter { + if exclude_list.contains(channel.address()) { + continue + } + + futures.push(channel.send(message).map_err(|e| { + format!("P2P: Broadcasting message to {} failed: {}", channel.address(), e) + })); + } + + if futures.is_empty() { + warn!(target: "net::p2p::broadcast()", "P2P: No connected channels found for broadcast"); + return + } + + while let Some(entry) = futures.next().await { + // TODO: Here we can close the channels. + // See message_subscriber::_trigger_all on how to do it. + if let Err(e) = entry { + error!(target: "net::p2p::broadcast()", "{}", e); + } + } + } + + /// Check whether we're connected to a given address + pub async fn exists(&self, addr: &Url) -> bool { + self.channels.lock().await.contains_key(addr) + } + + /// Remove a channel from the set of connected channels + pub async fn remove(&self, channel: ChannelPtr) { + self.channels.lock().await.remove(channel.address()); + } + + /// Add an address to the list of pending channels. + pub async fn add_pending(&self, addr: &Url) -> bool { + self.pending.lock().await.insert(addr.clone()) + } + + /// Remove a channel from the list of pending channels. + pub async fn remove_pending(&self, addr: &Url) { + self.pending.lock().await.remove(addr); + } + + /// Return reference to connected channels map pub fn channels(&self) -> &ConnectedChannels { &self.channels } - /// Try to start discovery mode. - /// Returns false if already on discovery mode. - pub async fn start_discovery(self: Arc) -> bool { - if *self.discovery.lock().await { - return false - } - *self.discovery.lock().await = true; - true + /// Retrieve a random connected channel from the + pub async fn random_channel(&self) -> Option { + let channels = self.channels().lock().await; + channels.values().choose(&mut OsRng).cloned() } - /// Stops discovery mode. - pub async fn stop_discovery(self: Arc) { - *self.discovery.lock().await = false; + /// Return an atomic pointer to the set network settings + pub fn settings(&self) -> SettingsPtr { + self.settings.clone() } - /// Retrieves a random connected channel, exluding seeds - pub async fn random_channel(self: Arc) -> Option> { - let mut channels_map = self.channels().lock().await.clone(); - channels_map.retain(|c, _| !self.settings.seeds.contains(c)); - let mut values = channels_map.values(); + /// Return an atomic pointer to the list of hosts + pub fn hosts(&self) -> HostsPtr { + self.hosts.clone() + } - if values.len() == 0 { - return None - } + /// Return a reference to the internal protocol registry + pub fn protocol_registry(&self) -> &ProtocolRegistry { + &self.protocol_registry + } - Some(values.nth(rand::thread_rng().gen_range(0..values.len())).unwrap().clone()) + /// Get pointer to manual session + pub async fn session_manual(&self) -> ManualSessionPtr { + self.session_manual.lock().await.as_ref().unwrap().clone() + } + + /// Get pointer to inbound session + pub async fn session_inbound(&self) -> InboundSessionPtr { + self.session_inbound.lock().await.as_ref().unwrap().clone() + } + + /// Get pointer to outbound session + pub async fn session_outbound(&self) -> OutboundSessionPtr { + self.session_outbound.lock().await.as_ref().unwrap().clone() + } + + /// Enable network debugging + pub async fn enable_dnet(&self) { + *self.dnet_enabled.lock().await = true; + warn!("[P2P] Network debugging enabled!"); + } + + /// Disable network debugging + pub async fn disable_dnet(&self) { + *self.dnet_enabled.lock().await = false; + warn!("[P2P] Network debugging disabled!"); } } + +macro_rules! dnet { + ($self:expr, $($code:tt)*) => { + { + if *$self.p2p().dnet_enabled.lock().await { + $($code)* + } + } + }; +} +pub(crate) use dnet; diff --git a/src/net/protocol/mod.rs b/src/net/protocol/mod.rs index 4dc307584..a9a009e86 100644 --- a/src/net/protocol/mod.rs +++ b/src/net/protocol/mod.rs @@ -16,71 +16,63 @@ * along with this program. If not, see . */ -/// Protocol for address and get-address messages. Implements how nodes exchange -/// connection information about other nodes on the network. Address and -/// get-address messages are exchanged continually alongside ping-pong messages -/// as part of a network connection. -/// -/// Protocol starts by creating a subscription to address and get address -/// messages. Then the protocol sends out a get address message and waits for an -/// address message. Upon receiving an address messages, nodes add the -/// address information to their local store. -pub mod protocol_address; - -/// Manages the tasks for the network protocol. Used by other connection -/// protocols to handle asynchronous task execution across the network. Runs all -/// tasks that are handed to it on an executor that has stopping functionality. -pub mod protocol_jobs_manager; - -/// Protocol for ping-pong keep-alive messages. Implements ping message and pong -/// response. These messages are like the network heartbeat- they are sent -/// continually between nodes, to ensure each node is still alive and active. -/// Ping-pong messages ensure that the network doesn't -/// time out. -/// -/// Protocol starts by creating a subscription to ping and pong messages. Then -/// it starts a loop with a timer and runs ping-pong in the task manager. It -/// sends out a ping and waits for pong reply. Then waits for ping and replies -/// with a pong. -pub mod protocol_ping; - -/// Seed server protocol. Seed server is used when connecting to the network for -/// the first time. Returns a list of IP addresses that nodes can connect to. -/// -/// To start the seed protocol, we create a subscription to the address message, -/// and send our address to the seed server. Then we send a get-address message -/// and receive an address message. We add these addresses to our internal -/// store. -pub mod protocol_seed; - -/// Protocol for version information handshake between nodes at the start of a -/// connection. Implements the process for exchanging version information -/// between nodes. This is the first step when establishing a p2p connection. -/// -/// The version protocol starts of by instantiating the protocol and creating a -/// new subscription to version and version acknowledgement messages. Then we -/// run the protocol. Nodes send a version message and wait for a version -/// acknowledgement, while asynchronously waiting for version info from the -/// other node and sending the version acknowledgement. -pub mod protocol_version; - -pub mod protocol_base; -pub mod protocol_registry; - -pub use protocol_address::ProtocolAddress; -pub use protocol_jobs_manager::{ProtocolJobsManager, ProtocolJobsManagerPtr}; -pub use protocol_ping::ProtocolPing; -pub use protocol_seed::ProtocolSeed; -pub use protocol_version::ProtocolVersion; - -pub use protocol_base::{ProtocolBase, ProtocolBasePtr}; -pub use protocol_registry::ProtocolRegistry; - use super::{ + p2p::P2pPtr, session::{SESSION_ALL, SESSION_SEED}, - P2pPtr, }; +/// Manages the tasks for the network protocol. Used by other connection +/// protocols to handle asynchronous task execution across the network. +/// Runs all tasks that are handed to it on an executor that has stopping +/// functionality. +pub mod protocol_jobs_manager; + +/// Protocol for version information handshake between nodes at the start +/// of a connection. This is the first step when establishing a p2p conn. +/// +/// The version protocol starts by instantiating the protocol and creating +/// a new subscription to version and version acknowledgement messages. +/// Then we run the protocol. Nodes send a version message and wait for a +/// version acknowledgement, while asynchronously waiting for version info +/// from the other node and sending the version acknowledgement. +pub mod protocol_version; +pub use protocol_version::ProtocolVersion; + +/// Protocol for ping-pong keepalive messages. Implements ping message and +/// pong response. These messages are like the network heartbeat - they are +/// sent continually between nodes, to ensure each node is still alive and +/// active. Ping-pong messages ensure that the network doesn't time out. +pub mod protocol_ping; +pub use protocol_ping::ProtocolPing; + +/// Protocol for address and get-address messages. Implements how nodes +/// exchange connection information about other nodes on the network. +/// Address and get-address messages are exchanged continually alongside +/// ping-pong messages as part of a network connection. +/// +/// Protocol starts by creating a subscription to address and get-address +/// messages. Then the protocol sends out a get-address message and waits +/// for an address message. Upon receiving address messages, nodes validate +/// and add the address information to their local store. +pub mod protocol_address; +pub use protocol_address::ProtocolAddress; + +/// Seed servere protocol. Seed server is used when connecting to the network +/// for the first time. Returns a list of peers that nodes can connect to. +/// +/// To start the seed protocol, we create a subscription to the address +/// message, and send our address to the seed server. Then we send a +/// get-address message and receive an address message. We add these addresses +/// to our internal store. +pub mod protocol_seed; +pub use protocol_seed::ProtocolSeed; + +/// Base trait for implementing P2P protocols +pub mod protocol_base; +/// Interface for registering arbitrary P2P protocols +pub mod protocol_registry; + +/// Register the default network protocols for a p2p instance. pub async fn register_default_protocols(p2p: P2pPtr) { let registry = p2p.protocol_registry(); registry.register(SESSION_ALL, ProtocolPing::init).await; diff --git a/src/net/protocol/protocol_address.rs b/src/net/protocol/protocol_address.rs index e56e62cba..62523ec99 100644 --- a/src/net/protocol/protocol_address.rs +++ b/src/net/protocol/protocol_address.rs @@ -16,143 +16,125 @@ * along with this program. If not, see . */ -use std::sync::Arc; - +use async_std::sync::Arc; use async_trait::async_trait; use log::debug; -use rand::seq::SliceRandom; use smol::Executor; -use crate::{util::async_util, Result}; - use super::{ super::{ - message, message_subscriber::MessageSubscription, ChannelPtr, HostsPtr, P2pPtr, - SettingsPtr, SESSION_OUTBOUND, + channel::ChannelPtr, + hosts::HostsPtr, + message::{AddrsMessage, GetAddrsMessage}, + message_subscriber::MessageSubscription, + p2p::P2pPtr, + session::SESSION_OUTBOUND, + settings::SettingsPtr, }, - ProtocolBase, ProtocolBasePtr, ProtocolJobsManager, ProtocolJobsManagerPtr, + protocol_base::{ProtocolBase, ProtocolBasePtr}, + protocol_jobs_manager::{ProtocolJobsManager, ProtocolJobsManagerPtr}, }; +use crate::{util::async_util::sleep, Result}; -const SEND_ADDR_SLEEP_SECONDS: u64 = 900; - -/// Defines address and get-address messages. +/// Defines address and get-address messages pub struct ProtocolAddress { channel: ChannelPtr, - addrs_sub: MessageSubscription, - ext_addrs_sub: MessageSubscription, - get_addrs_sub: MessageSubscription, + addrs_sub: MessageSubscription, + get_addrs_sub: MessageSubscription, hosts: HostsPtr, - jobsman: ProtocolJobsManagerPtr, settings: SettingsPtr, + jobsman: ProtocolJobsManagerPtr, } +const PROTO_NAME: &str = "ProtocolAddress"; + impl ProtocolAddress { - /// Create a new address protocol. Makes an address, an external address - /// and a get-address subscription and adds them to the address protocol instance. + /// Creates a new address protocol. Makes an address, an external address + /// and a get-address subscription and adds them to the address protocol + /// instance. pub async fn init(channel: ChannelPtr, p2p: P2pPtr) -> ProtocolBasePtr { let settings = p2p.settings(); let hosts = p2p.hosts(); - // Creates a subscription to address message. - let addrs_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing addrs dispatcher!"); + // Creates a subscription to address message + let addrs_sub = + channel.subscribe_msg::().await.expect("Missing addrs dispatcher!"); - // Creates a subscription to external address message. - let ext_addrs_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing ext_addrs dispatcher!"); - - // Creates a subscription to get-address message. - let get_addrs_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing getaddrs dispatcher!"); + // Creates a subscription to get-address message + let get_addrs_sub = + channel.subscribe_msg::().await.expect("Missing getaddrs dispatcher!"); Arc::new(Self { channel: channel.clone(), addrs_sub, - ext_addrs_sub, get_addrs_sub, hosts, - jobsman: ProtocolJobsManager::new("ProtocolAddress", channel), + jobsman: ProtocolJobsManager::new(PROTO_NAME, channel), settings, }) } - /// Handles receiving the address message. Loops to continually recieve - /// address messages on the address subsciption. Adds the recieved - /// addresses to the list of hosts. + /// Handles receiving the address message. Loops to continually receive + /// address messages on the address subscription. Validates and adds the + /// received addresses to the hosts set. async fn handle_receive_addrs(self: Arc) -> Result<()> { - debug!(target: "net::protocol_address::handle_receive_addrs()", "START"); + debug!( + target: "net::protocol_address::handle_receive_addrs()", + "[START] address={}", self.channel.address(), + ); + loop { let addrs_msg = self.addrs_sub.receive().await?; debug!( target: "net::protocol_address::handle_receive_addrs()", - "received {} addrs", - addrs_msg.addrs.len() + "Received {} addrs from {}", addrs_msg.addrs.len(), self.channel.address(), ); - self.hosts.store(addrs_msg.addrs.clone()).await; + + // TODO: We might want to close the channel here if we're getting + // corrupted addresses. + self.hosts.store(&addrs_msg.addrs).await; } } - /// Handles receiving the external address message. Loops to continually recieve - /// external address messages on the address subsciption. Adds the recieved - /// external addresses to the list of hosts. - async fn handle_receive_ext_addrs(self: Arc) -> Result<()> { - debug!(target: "net::protocol_address::handle_receive_ext_addrs()", "START"); - loop { - let ext_addrs_msg = self.ext_addrs_sub.receive().await?; - debug!( - target: "net::protocol_address::handle_receive_ext_addrs()", - "ProtocolAddress::handle_receive_ext_addrs() received {} addrs", - ext_addrs_msg.ext_addrs.len() - ); - self.hosts.store_ext(self.channel.address(), ext_addrs_msg.ext_addrs.clone()).await; - } - } - - /// Handles receiving the get-address message. Continually recieves - /// get-address messages on the get-address subsciption. Then replies - /// with an address message. + /// Handles receiving the get-address message. Continually receives get-address + /// messages on the get-address subscription. Then replies with an address message. async fn handle_receive_get_addrs(self: Arc) -> Result<()> { - debug!(target: "net::protocol_address::handle_receive_get_addrs()", "START"); + debug!( + target: "net::protocol_address::handle_receive_get_addrs()", + "[START] address={}", self.channel.address(), + ); + loop { - let _get_addrs = self.get_addrs_sub.receive().await?; + let get_addrs_msg = self.get_addrs_sub.receive().await?; debug!( target: "net::protocol_address::handle_receive_get_addrs()", - "Received GetAddrs message" + "Received GetAddrs({}) message from {}", get_addrs_msg.max, self.channel.address(), ); - // Loads the list of hosts. - let mut addrs = self.hosts.load_all().await; - // Shuffling list of hosts - addrs.shuffle(&mut rand::thread_rng()); + let addrs = self.hosts.get_n_random(get_addrs_msg.max).await; debug!( target: "net::protocol_address::handle_receive_get_addrs()", - "Sending {} addrs", - addrs.len() + "Sending {} addresses to {}", addrs.len(), self.channel.address(), ); - // Creates an address messages containing host address. - let addrs_msg = message::AddrsMessage { addrs }; - // Sends the address message across the channel. - self.channel.clone().send(addrs_msg).await?; + + let addrs_msg = AddrsMessage { addrs }; + self.channel.send(&addrs_msg).await?; } } + /// Periodically send our external addresses through the channel. async fn send_my_addrs(self: Arc) -> Result<()> { - debug!(target: "net::protocol_address::send_my_addrs()", "START"); + debug!( + target: "net::protocol_address::send_my_addrs()", + "[START] address={}", self.channel.address(), + ); + + // FIXME: Revisit this. Why do we keep sending it? loop { - let ext_addrs = self.settings.external_addr.clone(); - let ext_addr_msg = message::ExtAddrsMessage { ext_addrs }; - self.channel.clone().send(ext_addr_msg).await?; - async_util::sleep(SEND_ADDR_SLEEP_SECONDS).await; + let ext_addr_msg = AddrsMessage { addrs: self.settings.external_addrs.clone() }; + self.channel.send(&ext_addr_msg).await?; + sleep(900).await; } } } @@ -160,32 +142,36 @@ impl ProtocolAddress { #[async_trait] impl ProtocolBase for ProtocolAddress { /// Starts the address protocol. Runs receive address and get address - /// protocols on the protocol task manager. Then sends get-address - /// message. - async fn start(self: Arc, executor: Arc>) -> Result<()> { + /// protocols on the protocol task manager. Then sends get-address msg. + async fn start(self: Arc, ex: Arc>) -> Result<()> { + debug!(target: "net::protocol_address::start()", "START => address={}", self.channel.address()); + let type_id = self.channel.session_type_id(); - // if it's an outbound session + has an external address - // send our address - if type_id == SESSION_OUTBOUND && !self.settings.external_addr.is_empty() { - self.jobsman.clone().start(executor.clone()); - self.jobsman.clone().spawn(self.clone().send_my_addrs(), executor.clone()).await; + let mut jobsman_started = false; + + // If it's an outbound session + has an extern_addr, send our address. + if type_id == SESSION_OUTBOUND && !self.settings.external_addrs.is_empty() { + self.jobsman.clone().start(ex.clone()); + jobsman_started = true; + self.jobsman.clone().spawn(self.clone().send_my_addrs(), ex.clone()).await; } - debug!(target: "net::protocol_address::start()", "START"); - self.jobsman.clone().start(executor.clone()); - self.jobsman.clone().spawn(self.clone().handle_receive_addrs(), executor.clone()).await; - self.jobsman.clone().spawn(self.clone().handle_receive_ext_addrs(), executor.clone()).await; - self.jobsman.clone().spawn(self.clone().handle_receive_get_addrs(), executor).await; + if !jobsman_started { + self.jobsman.clone().start(ex.clone()); + } + self.jobsman.clone().spawn(self.clone().handle_receive_addrs(), ex.clone()).await; + self.jobsman.spawn(self.clone().handle_receive_get_addrs(), ex).await; // Send get_address message. - let get_addrs = message::GetAddrsMessage {}; - let _ = self.channel.clone().send(get_addrs).await; - debug!(target: "net::protocol_address::start()", "END"); + let get_addrs = GetAddrsMessage { max: self.settings.outbound_connections as u32 }; + self.channel.send(&get_addrs).await?; + + debug!(target: "net::protocol_address::start()", "END => address={}", self.channel.address()); Ok(()) } fn name(&self) -> &'static str { - "ProtocolAddress" + PROTO_NAME } } diff --git a/src/net/protocol/protocol_base.rs b/src/net/protocol/protocol_base.rs index d2c375ad9..cdbf3ac4f 100644 --- a/src/net/protocol/protocol_base.rs +++ b/src/net/protocol/protocol_base.rs @@ -16,8 +16,7 @@ * along with this program. If not, see . */ -use std::sync::Arc; - +use async_std::sync::Arc; use async_trait::async_trait; use smol::Executor; diff --git a/src/net/protocol/protocol_jobs_manager.rs b/src/net/protocol/protocol_jobs_manager.rs index 3ba9ec2f4..1ac673424 100644 --- a/src/net/protocol/protocol_jobs_manager.rs +++ b/src/net/protocol/protocol_jobs_manager.rs @@ -16,23 +16,17 @@ * along with this program. If not, see . */ -use async_std::sync::Mutex; -use std::sync::Arc; - +use async_std::sync::{Arc, Mutex}; use futures::Future; -use log::*; -use smol::Task; +use log::{debug, trace}; +use smol::{Executor, Task}; -use crate::{system::ExecutorPtr, Result}; +use super::super::channel::ChannelPtr; +use crate::Result; -use super::super::ChannelPtr; - -/// Pointer to protocol jobs manager. +/// Pointer to protocol jobs manager pub type ProtocolJobsManagerPtr = Arc; -/// Manages the tasks for the network protocol. Used by other connection -/// protocols to handle asynchronous task execution across the network. Runs all -/// tasks that are handed to it on an executor that has stopping functionality. pub struct ProtocolJobsManager { name: &'static str, channel: ChannelPtr, @@ -40,29 +34,29 @@ pub struct ProtocolJobsManager { } impl ProtocolJobsManager { - /// Create a new protocol jobs manager. - pub fn new(name: &'static str, channel: ChannelPtr) -> Arc { - Arc::new(Self { name, channel, tasks: Mutex::new(Vec::new()) }) + /// Create a new protocol jobs manager + pub fn new(name: &'static str, channel: ChannelPtr) -> ProtocolJobsManagerPtr { + Arc::new(Self { name, channel, tasks: Mutex::new(vec![]) }) } - /// Runs the task on an executor. Prepares to stop all tasks when the - /// channel is closed. - pub fn start(self: Arc, executor: ExecutorPtr<'_>) { + /// Runs the task on an executor + pub fn start(self: Arc, executor: Arc>) { executor.spawn(self.handle_stop()).detach() } - /// Spawns a new task and adds it to the internal queue. - pub async fn spawn<'a, F>(&self, future: F, executor: ExecutorPtr<'a>) + /// Spawns a new task and adds it to the internal queue + pub async fn spawn<'a, F>(&self, future: F, executor: Arc>) where F: Future> + Send + 'a, { self.tasks.lock().await.push(executor.spawn(future)) } - /// Waits for a stop signal, then closes all tasks. Insures that all tasks - /// are stopped when a channel closes. Called in start(). + /// Waits for a stop signal, then closes all tasks. + /// Ensures that all tasks are stopped when a channel closes. + /// Called in `start()` async fn handle_stop(self: Arc) { - let stop_sub = self.channel.clone().subscribe_stop().await; + let stop_sub = self.channel.subscribe_stop().await; if stop_sub.is_ok() { // Wait for the stop signal @@ -72,19 +66,24 @@ impl ProtocolJobsManager { self.close_all_tasks().await } - /// Closes all open tasks. Takes all the tasks from the internal queue and - /// closes them. + /// Closes all open tasks. Takes all the tasks from the internal queue. async fn close_all_tasks(self: Arc) { - debug!(target: "net::protocol_jobs_manager", + debug!( + target: "net::protocol_jobs_manager", "ProtocolJobsManager::close_all_tasks() [START, name={}, addr={}]", - self.name, - self.channel.address() + self.name, self.channel.address(), ); - // Take all the tasks from our internal queue... + let tasks = std::mem::take(&mut *self.tasks.lock().await); + + trace!(target: "net::protocol_jobs_manager", "Cancelling {} tasks", tasks.len()); + let mut i = 0; + #[allow(clippy::explicit_counter_loop)] for task in tasks { - // ... and cancel them + trace!(target: "net::protocol_jobs_manager", "Cancelling task #{}", i); let _ = task.cancel().await; + trace!(target: "net::protocol_jobs_manager", "Cancelled task #{}", i); + i += 1; } } } diff --git a/src/net/protocol/protocol_ping.rs b/src/net/protocol/protocol_ping.rs index 1d3a95ff0..fd420795d 100644 --- a/src/net/protocol/protocol_ping.rs +++ b/src/net/protocol/protocol_ping.rs @@ -16,135 +16,152 @@ * along with this program. If not, see . */ -use std::{sync::Arc, time::Instant}; +use std::time::Instant; +use async_std::sync::Arc; use async_trait::async_trait; use log::{debug, error}; -use rand::Rng; +use rand::{rngs::OsRng, Rng}; use smol::Executor; +use super::{ + super::{ + channel::ChannelPtr, + message::{PingMessage, PongMessage}, + message_subscriber::MessageSubscription, + p2p::P2pPtr, + settings::SettingsPtr, + }, + protocol_base::{ProtocolBase, ProtocolBasePtr}, + protocol_jobs_manager::{ProtocolJobsManager, ProtocolJobsManagerPtr}, +}; use crate::{util::async_util::sleep, Error, Result}; -use super::{ - super::{message, message_subscriber::MessageSubscription, ChannelPtr, P2pPtr, SettingsPtr}, - ProtocolBase, ProtocolBasePtr, ProtocolJobsManager, ProtocolJobsManagerPtr, -}; - -/// Defines ping and pong messages. +/// Defines ping and pong messages pub struct ProtocolPing { channel: ChannelPtr, - ping_sub: MessageSubscription, - pong_sub: MessageSubscription, + ping_sub: MessageSubscription, + pong_sub: MessageSubscription, settings: SettingsPtr, jobsman: ProtocolJobsManagerPtr, } +const PROTO_NAME: &str = "ProtocolPing"; + impl ProtocolPing { /// Create a new ping-pong protocol. pub async fn init(channel: ChannelPtr, p2p: P2pPtr) -> ProtocolBasePtr { let settings = p2p.settings(); - // Creates a subscription to ping message. - let ping_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing ping dispatcher!"); + // Creates a subscription to ping message + let ping_sub = + channel.subscribe_msg::().await.expect("Missing ping dispatcher!"); - // Creates a subscription to pong message. - let pong_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing pong dispatcher!"); + // Creates a subscription to pong message + let pong_sub = + channel.subscribe_msg::().await.expect("Missing pong dispatcher!"); Arc::new(Self { channel: channel.clone(), ping_sub, pong_sub, settings, - jobsman: ProtocolJobsManager::new("ProtocolPing", channel), + jobsman: ProtocolJobsManager::new(PROTO_NAME, channel), }) } - /// Runs ping-pong protocol. Creates a subscription to pong, then starts a - /// loop. Loop sleeps for the duration of the channel heartbeat, then - /// sends a ping message with a random nonce. Loop starts a timer, waits - /// for the pong reply and insures the nonce is the same. + /// Runs the ping-pong protocol. Creates a subscription to pong, then + /// starts a loop. Loop sleeps for the duration of the channel heartbeat, + /// then sends a ping message with a random nonce. Loop starts a timer, + /// waits for the pong reply and ensures the nonce is the same. async fn run_ping_pong(self: Arc) -> Result<()> { - debug!(target: "net::protocol_ping::run_ping_pong()", "START"); - loop { - // Wait channel_heartbeat amount of time. - sleep(self.settings.channel_heartbeat_seconds.into()).await; + debug!( + target: "net::protocol_ping::run_ping_pong()", + "START => address={}", self.channel.address(), + ); + loop { // Create a random nonce. let nonce = Self::random_nonce(); // Send ping message. - let ping = message::PingMessage { nonce }; - self.channel.clone().send(ping).await?; - debug!(target: "net::protocol_ping::run_ping_pong()", "Send Ping message"); - // Start the timer for ping timer. - let start = Instant::now(); + let ping = PingMessage { nonce }; + self.channel.send(&ping).await?; + + // Start the timer for the ping timer + let timer = Instant::now(); // Wait for pong, check nonce matches. let pong_msg = self.pong_sub.receive().await?; if pong_msg.nonce != nonce { - // TODO: this is too extreme error!( target: "net::protocol_ping::run_ping_pong()", - "Wrong nonce for ping reply. Disconnecting from channel." + "[P2P] Wrong nonce in pingpong, disconnecting {}", + self.channel.address(), ); self.channel.stop().await; return Err(Error::ChannelStopped) } - let duration = start.elapsed().as_millis(); + debug!( target: "net::protocol_ping::run_ping_pong()", - "Received Pong message {}ms from [{:?}]", - duration, - self.channel.address() + "Received Pong from {}: {}ms", + timer.elapsed().as_millis(), + self.channel.address(), + ); + + // Sleep until next heartbeat + sleep(self.settings.channel_heartbeat_interval).await; + } + } + + /// Waits for ping, then replies with pong. + /// Copies ping's nonce into the pong reply. + async fn reply_to_ping(self: Arc) -> Result<()> { + debug!( + target: "net::protocol_ping::reply_to_ping()", + "START => address={}", self.channel.address(), + ); + + loop { + // Wait for ping, reply with pong that has a matching nonce. + let ping = self.ping_sub.receive().await?; + debug!( + target: "net::protocol_ping::reply_to_ping()", + "Received Ping from {}", self.channel.address(), + ); + + // Send pong message + let pong = PongMessage { nonce: ping.nonce }; + self.channel.send(&pong).await?; + + debug!( + target: "net::protocol_ping::reply_to_ping()", + "Sent Pong reply to {}", self.channel.address(), ); } } - /// Waits for ping, then replies with pong. Copies ping's nonce into the - /// pong reply. - async fn reply_to_ping(self: Arc) -> Result<()> { - debug!(target: "net::protocol_ping::reply_to_ping()", "START"); - loop { - // Wait for ping, reply with pong that has a matching nonce. - let ping = self.ping_sub.receive().await?; - debug!(target: "net::protocol_ping::reply_to_ping()", "Received Ping message"); - - // Send pong message. - let pong = message::PongMessage { nonce: ping.nonce }; - self.channel.clone().send(pong).await?; - debug!(target: "net::protocol_ping::reply_to_ping()", "Sent Pong reply"); - } - } - - fn random_nonce() -> u32 { - let mut rng = rand::thread_rng(); - rng.gen() + fn random_nonce() -> u16 { + OsRng::gen(&mut OsRng) } } #[async_trait] impl ProtocolBase for ProtocolPing { - /// Starts ping-pong keep-alive messages exchange. Runs ping-pong in the + /// Starts ping-pong keepalive messages exchange. Runs ping-pong in the /// protocol task manager, then queues the reply. Sends out a ping and /// waits for pong reply. Waits for ping and replies with a pong. - async fn start(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::protocol_ping::start()", "START"); - self.jobsman.clone().start(executor.clone()); - self.jobsman.clone().spawn(self.clone().run_ping_pong(), executor.clone()).await; - self.jobsman.clone().spawn(self.reply_to_ping(), executor).await; - debug!(target: "net::protocol_ping::start()", "END"); + async fn start(self: Arc, ex: Arc>) -> Result<()> { + debug!(target: "net::protocol_ping::start()", "START => address={}", self.channel.address()); + self.jobsman.clone().start(ex.clone()); + self.jobsman.clone().spawn(self.clone().run_ping_pong(), ex.clone()).await; + self.jobsman.clone().spawn(self.clone().reply_to_ping(), ex).await; + debug!(target: "net::protocol_ping::start()", "END => address={}", self.channel.address()); Ok(()) } fn name(&self) -> &'static str { - "ProtocolPing" + PROTO_NAME } } diff --git a/src/net/protocol/protocol_registry.rs b/src/net/protocol/protocol_registry.rs index b2169b56b..451261b18 100644 --- a/src/net/protocol/protocol_registry.rs +++ b/src/net/protocol/protocol_registry.rs @@ -17,36 +17,30 @@ */ use async_std::sync::Mutex; -use std::future::Future; - -use futures::future::BoxFuture; +use futures::{future::BoxFuture, Future}; use log::debug; use super::{ - super::{session::SessionBitflag, ChannelPtr, P2pPtr}, - ProtocolBasePtr, + super::{channel::ChannelPtr, p2p::P2pPtr, session::SessionBitFlag}, + protocol_base::ProtocolBasePtr, }; type Constructor = Box BoxFuture<'static, ProtocolBasePtr> + Send + Sync>; +#[derive(Default)] pub struct ProtocolRegistry { - protocol_constructors: Mutex>, -} - -impl Default for ProtocolRegistry { - fn default() -> Self { - Self::new() - } + constructors: Mutex>, } impl ProtocolRegistry { + /// Instantiate a new [`ProtocolRegistry`] pub fn new() -> Self { - Self { protocol_constructors: Mutex::new(Vec::new()) } + Self::default() } - // add_protocol()? - pub async fn register(&self, session_flags: SessionBitflag, constructor: C) + /// `add_protocol()?` + pub async fn register(&self, session_flags: SessionBitFlag, constructor: C) where C: 'static + Fn(ChannelPtr, P2pPtr) -> F + Send + Sync, F: 'static + Future + Send, @@ -54,28 +48,30 @@ impl ProtocolRegistry { let constructor = move |channel, p2p| { Box::pin(constructor(channel, p2p)) as BoxFuture<'static, ProtocolBasePtr> }; - self.protocol_constructors.lock().await.push((session_flags, Box::new(constructor))); + + self.constructors.lock().await.push((session_flags, Box::new(constructor))); } pub async fn attach( &self, - selector_id: SessionBitflag, + selector_id: SessionBitFlag, channel: ChannelPtr, p2p: P2pPtr, ) -> Vec { - let mut protocols: Vec = Vec::new(); - for (session_flags, construct) in self.protocol_constructors.lock().await.iter() { + let mut protocols = vec![]; + + for (session_flags, construct) in self.constructors.lock().await.iter() { // Skip protocols that are not registered for this session if selector_id & session_flags == 0 { debug!(target: "net::protocol_registry", "Skipping {selector_id:#b}, {session_flags:#b}"); continue } - let protocol: ProtocolBasePtr = construct(channel.clone(), p2p.clone()).await; + let protocol = construct(channel.clone(), p2p.clone()).await; debug!(target: "net::protocol_registry", "Attached {}", protocol.name()); - - protocols.push(protocol) + protocols.push(protocol); } + protocols } } diff --git a/src/net/protocol/protocol_seed.rs b/src/net/protocol/protocol_seed.rs index 089db89a1..f6d4ea74c 100644 --- a/src/net/protocol/protocol_seed.rs +++ b/src/net/protocol/protocol_seed.rs @@ -16,87 +16,98 @@ * along with this program. If not, see . */ -use std::sync::Arc; - +use async_std::sync::Arc; use async_trait::async_trait; use log::debug; use smol::Executor; -use crate::Result; - use super::{ super::{ - message, message_subscriber::MessageSubscription, ChannelPtr, HostsPtr, P2pPtr, SettingsPtr, + channel::ChannelPtr, + hosts::HostsPtr, + message::{AddrsMessage, GetAddrsMessage}, + message_subscriber::MessageSubscription, + p2p::P2pPtr, + settings::SettingsPtr, }, - ProtocolBase, ProtocolBasePtr, + protocol_base::{ProtocolBase, ProtocolBasePtr}, }; +use crate::Result; -/// Implements the seed protocol. +/// Implements the seed protocol pub struct ProtocolSeed { channel: ChannelPtr, hosts: HostsPtr, settings: SettingsPtr, - addr_sub: MessageSubscription, + addr_sub: MessageSubscription, } +const PROTO_NAME: &str = "ProtocolSeed"; + impl ProtocolSeed { /// Create a new seed protocol. pub async fn init(channel: ChannelPtr, p2p: P2pPtr) -> ProtocolBasePtr { let hosts = p2p.hosts(); let settings = p2p.settings(); - //// Create a subscription to address message. - let addr_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing addr dispatcher!"); + // Create a subscription to address message + let addr_sub = + channel.subscribe_msg::().await.expect("Missing addr dispatcher!"); Arc::new(Self { channel, hosts, settings, addr_sub }) } /// Sends own external addresses over a channel. Imports own external addresses - /// from settings, then adds that addresses to an address message and - /// sends it out over the channel. + /// from settings, then adds those addresses to an addrs message and sends it + /// out over the channel. pub async fn send_self_address(&self) -> Result<()> { + debug!(target: "net::protocol_seed::send_self_address()", "[START]"); // Do nothing if external addresses are not configured - if self.settings.external_addr.is_empty() { + if self.settings.external_addrs.is_empty() { return Ok(()) } - let ext_addrs = self.settings.external_addr.clone(); - debug!(target: "net::protocol_seed::send_self_address()", "ext_addrs={:?}", ext_addrs); - let ext_addr_msg = message::ExtAddrsMessage { ext_addrs }; - self.channel.clone().send(ext_addr_msg).await + let addrs = self.settings.external_addrs.clone(); + debug!( + target: "net::protocol_seed::send_self_address()", + "ext_addrs={:?}, dest={}", addrs, self.channel.address(), + ); + + let ext_addr_msg = AddrsMessage { addrs }; + self.channel.send(&ext_addr_msg).await?; + debug!(target: "net::protocol_seed::send_self_address()", "[END]"); + Ok(()) } } #[async_trait] impl ProtocolBase for ProtocolSeed { /// Starts the seed protocol. Creates a subscription to the address message, - /// then sends our address to the seed server. Sends a get-address - /// message and receives an address message. - async fn start(self: Arc, _executor: Arc>) -> Result<()> { - debug!(target: "net::protocol_seed::start()", "START"); + /// then sends our address to the seed server. Sends a get-address message + /// and receives an address messsage. + async fn start(self: Arc, _ex: Arc>) -> Result<()> { + debug!(target: "net::protocol_seed::start()", "START => address={}", self.channel.address()); - // Send own address to the seed server. + // Send own address to the seed server self.send_self_address().await?; - // Send get address message. - let get_addr = message::GetAddrsMessage {}; - self.channel.clone().send(get_addr).await?; + // Send get address message + let get_addr = GetAddrsMessage { max: self.settings.outbound_connections as u32 }; + self.channel.send(&get_addr).await?; - // Receive addresses. + // Receive addresses let addrs_msg = self.addr_sub.receive().await?; - debug!(target: "net::protocol_seed::start()", "Received {} addrs", addrs_msg.addrs.len() + debug!( + target: "net::protocol_seed::start()", + "Received {} addrs from {}", addrs_msg.addrs.len(), self.channel.address(), ); - self.hosts.store(addrs_msg.addrs.clone()).await; + self.hosts.store(&addrs_msg.addrs).await; - debug!(target: "net::protocol_seed::start()", "END"); + debug!(target: "net::protocol_seed::start()", "END => address={}", self.channel.address()); Ok(()) } fn name(&self) -> &'static str { - "ProtocolSeed" + PROTO_NAME } } diff --git a/src/net/protocol/protocol_version.rs b/src/net/protocol/protocol_version.rs index d87f096b9..e6bf6eb6e 100644 --- a/src/net/protocol/protocol_version.rs +++ b/src/net/protocol/protocol_version.rs @@ -16,164 +16,165 @@ * along with this program. If not, see . */ -use async_std::future::timeout; -use std::{sync::Arc, time::Duration}; +use std::time::Duration; -use log::*; +use async_std::{future::timeout, sync::Arc}; +use log::{debug, error}; use smol::Executor; +use super::super::{ + channel::ChannelPtr, + hosts::HostsPtr, + message::{VerackMessage, VersionMessage}, + message_subscriber::MessageSubscription, + settings::SettingsPtr, +}; use crate::{Error, Result}; -use super::super::{ - message, message_subscriber::MessageSubscription, ChannelPtr, HostsPtr, SettingsPtr, -}; - -/// Implements the protocol version handshake sent out by nodes at the beginning -/// of a connection. +/// Implements the protocol version handshake sent out by nodes at +/// the beginning of a connection. pub struct ProtocolVersion { channel: ChannelPtr, - version_sub: MessageSubscription, - verack_sub: MessageSubscription, + version_sub: MessageSubscription, + verack_sub: MessageSubscription, settings: SettingsPtr, hosts: HostsPtr, } impl ProtocolVersion { - /// Create a new version protocol. Makes a version and version - /// acknowledgement subscription, then adds them to a version protocol - /// instance. + /// Create a new version protocol. Makes a version and version ack + /// subscription, then adds them to a version protocol instance. pub async fn new(channel: ChannelPtr, settings: SettingsPtr, hosts: HostsPtr) -> Arc { - // Creates a version subscription. - let version_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing version dispatcher!"); + // Creates a versi5on subscription + let version_sub = + channel.subscribe_msg::().await.expect("Missing version dispatcher!"); - // Creates a version acknowledgement subscription. - let verack_sub = channel - .clone() - .subscribe_msg::() - .await - .expect("Missing verack dispatcher!"); + // Creates a version acknowledgement subscription + let verack_sub = + channel.subscribe_msg::().await.expect("Missing verack dispatcher!"); Arc::new(Self { channel, version_sub, verack_sub, settings, hosts }) } - /// Start version information exchange. Start the timer. Send version info - /// and wait for version acknowledgement. Wait for version info and send - /// version acknowledgement. + /// Start version information exchange. Start the timer. Send version + /// info and wait for version ack. Wait for version info and send + /// version ack. pub async fn run(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::protocol_version::run()", "START"); + debug!(target: "net::protocol_version::run()", "START => address={}", self.channel.address()); // Start timer // Send version, wait for verack // Wait for version, send verack // Fin. let result = timeout( - Duration::from_secs(self.settings.channel_handshake_seconds.into()), + Duration::from_secs(self.settings.channel_handshake_timeout), self.clone().exchange_versions(executor), ) .await; - if let Err(_e) = result { + if let Err(e) = result { + error!( + target: "net::protocol_version::run()", + "[P2P] Version Exchange failed [{}]: {}", + self.channel.address(), e, + ); + + // Remove from hosts + self.hosts.remove(self.channel.address()).await; + self.channel.stop().await; return Err(Error::ChannelTimeout) } - debug!(target: "net::protocol_version::run()", "END"); + debug!(target: "net::protocol_version::run()", "END => address={}", self.channel.address()); Ok(()) } - /// Send and recieve version information. + /// Send and receive version information async fn exchange_versions(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::protocol_version::exchange_versions()", "START"); + debug!( + target: "net::protocol_version::exchange_versions()", + "START => address={}", self.channel.address(), + ); let send = executor.spawn(self.clone().send_version()); - let recv = executor.spawn(self.recv_version()); + let recv = executor.spawn(self.clone().recv_version()); send.await?; recv.await?; - debug!(target: "net::protocol_version::exchange_versions()", "END"); + debug!( + target: "net::protocol_version::exchange_versions()", + "END => address={}", self.channel.address(), + ); Ok(()) } - /// Send version info and wait for version acknowledgement - /// and ensures the app version is the same, if configured. + /// Send version info and wait for version acknowledgement. + /// Ensures that the app version is the same. async fn send_version(self: Arc) -> Result<()> { - debug!(target: "net::protocol_version::send_version()", "START"); + debug!( + target: "net::protocol_version::send_version()", + "START => address={}", self.channel.address(), + ); - let version = message::VersionMessage { node_id: self.settings.node_id.clone() }; + let version = VersionMessage { node_id: self.settings.node_id.clone() }; + self.channel.send(&version).await?; - self.channel.clone().send(version).await?; - - // Wait for version acknowledgement + // Wait for verack let verack_msg = self.verack_sub.receive().await?; - // Validate peer received version against our version, if configured. - // Seeds version gets ignored. - if !self.settings.seeds.contains(&self.channel.address()) { - match &self.settings.app_version { - Some(app_version) => { - debug!( - target: "net::protocol_version::send_version()", - "App version: {}, received version: {}", - app_version, - verack_msg.app - ); - // Version format: MAJOR.MINOR.PATCH - let app_versions: Vec<&str> = app_version.split('.').collect(); - let verack_msg_versions: Vec<&str> = verack_msg.app.split('.').collect(); - // Check for malformed versions - if app_versions.len() != 3 || verack_msg_versions.len() != 3 { - error!( - target: "net::protocol_version::send_version()", - "Malformed version detected. Disconnecting from channel." - ); - self.hosts.remove(&self.channel.address()).await; - self.channel.stop().await; - return Err(Error::ChannelStopped) - } - // Ignore PATCH version - if app_versions[0] != verack_msg_versions[0] || - app_versions[1] != verack_msg_versions[1] - { - error!( - target: "net::protocol_version::send_version()", - "Wrong app version from ({}). Disconnecting from channel.", - self.channel.address() - ); - self.hosts.remove(&self.channel.address()).await; - self.channel.stop().await; - return Err(Error::ChannelStopped) - } - } - None => { - debug!( - target: "net::protocol_version::send_version()", - "App version not set, ignoring received" - ) - } - } + // Validate peer received version against our version. + // Seeds get ignored + if self.settings.seeds.contains(self.channel.address()) { + debug!(target: "net::protocol_version::send_version()", "Peer is a seed, skipping version"); + debug!(target: "net::protocol_version::send_version()", "END => address={}", self.channel.address()); + return Ok(()) } - debug!(target: "net::protocol_version::send_version()", "END"); + debug!( + target: "net::protocol_version::send_version()", + "App version: {}, Recv version: {}", + self.settings.app_version, verack_msg.app_version, + ); + + // MAJOR and MINOR should be the same. + if self.settings.app_version.major != verack_msg.app_version.major || + self.settings.app_version.minor != verack_msg.app_version.minor + { + error!( + target: "net::protocol_version::send_version()", + "[P2P] Version mismatch from {}. Disconnecting...", + self.channel.address(), + ); + + self.hosts.remove(self.channel.address()).await; + self.channel.stop().await; + return Err(Error::ChannelStopped) + } + + // Versions are compatible Ok(()) } - /// Recieve version info, check the message is okay and send version - /// acknowledgement with app version attached. + /// Receive version info, check the message is okay and send verack + /// with app version attached. async fn recv_version(self: Arc) -> Result<()> { - debug!(target: "net::protocol_version::recv_version()", "START"); + debug!( + target: "net::protocol_version::recv_version()", + "START => address={}", self.channel.address(), + ); + // Receive version message - let version = self.version_sub.receive().await?; - self.channel.set_remote_node_id(version.node_id.clone()).await; + let _version = self.version_sub.receive().await?; + //self.channel.set_remote_node_id(version.node_id.clone()).await; - // Send version acknowledgement - let verack = - message::VerackMessage { app: self.settings.app_version.clone().unwrap_or_default() }; - self.channel.clone().send(verack).await?; + // Send verack + let verack = VerackMessage { app_version: self.settings.app_version.clone() }; + self.channel.send(&verack).await?; - debug!(target: "net::protocol_version::recv_version()", "END"); + debug!( + target: "net::protocol_version::recv_version()", + "END => address={}", self.channel.address(), + ); Ok(()) } } diff --git a/src/net/session/inbound_session.rs b/src/net/session/inbound_session.rs index 0bcc89b8b..9cabe3f79 100644 --- a/src/net/session/inbound_session.rs +++ b/src/net/session/inbound_session.rs @@ -15,6 +15,14 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ + +//! Inbound connections session. Manages the creation of inbound sessions. +//! Used to create an inbound session and start and stop the session. +//! +//! Class consists of 3 pointers: a weak pointer to the p2p parent class, +//! an acceptor pointer, and a stoppable task pointer. Using a weak pointer +//! to P2P allows us to avoid circular dependencies. + use std::collections::HashMap; use async_std::sync::{Arc, Mutex, Weak}; @@ -24,16 +32,22 @@ use serde_json::json; use smol::Executor; use url::Url; +use super::{ + super::{ + acceptor::{Acceptor, AcceptorPtr}, + channel::ChannelPtr, + p2p::{P2p, P2pPtr}, + }, + Session, SessionBitFlag, SESSION_INBOUND, +}; use crate::{ system::{StoppableTask, StoppableTaskPtr}, Error, Result, }; -use super::{ - super::{Acceptor, AcceptorPtr, ChannelPtr, P2p}, - Session, SessionBitflag, SESSION_INBOUND, -}; +pub type InboundSessionPtr = Arc; +/// Channel debug info struct InboundInfo { channel: ChannelPtr, } @@ -44,7 +58,7 @@ impl InboundInfo { } } -/// Defines inbound connections session. +/// Defines inbound connections session pub struct InboundSession { p2p: Weak, acceptors: Mutex>, @@ -53,39 +67,39 @@ pub struct InboundSession { } impl InboundSession { - /// Create a new inbound session. - pub async fn new(p2p: Weak) -> Arc { + /// Create a new inbound session + pub fn new(p2p: Weak) -> InboundSessionPtr { Arc::new(Self { p2p, - acceptors: Mutex::new(Vec::new()), - accept_tasks: Mutex::new(Vec::new()), - connect_infos: Mutex::new(Vec::new()), + acceptors: Mutex::new(vec![]), + accept_tasks: Mutex::new(vec![]), + connect_infos: Mutex::new(vec![]), }) } - /// Starts the inbound session. Begins by accepting connections and fails if - /// the addresses are not configured. Then runs the channel subscription + /// Starts the inbound session. Begins by accepting connections and fails + /// if the addresses are not configured. Then runs the channel subscription /// loop. - pub async fn start(self: Arc, executor: Arc>) -> Result<()> { - if self.p2p().settings().inbound.is_empty() { - info!(target: "net::inbound_session", "Not configured for accepting incoming connections."); + pub async fn start(self: Arc, ex: Arc>) -> Result<()> { + if self.p2p().settings().inbound_addrs.is_empty() { + info!(target: "net::inbound_session", "[P2P] Not configured for inbound connections."); return Ok(()) } // Activate mutex lock on accept tasks. let mut accept_tasks = self.accept_tasks.lock().await; - for (index, accept_addr) in self.p2p().settings().inbound.iter().enumerate() { - self.clone().start_accept_session(index, accept_addr.clone(), executor.clone()).await?; + for (index, accept_addr) in self.p2p().settings().inbound_addrs.iter().enumerate() { + self.clone().start_accept_session(index, accept_addr.clone(), ex.clone()).await?; let task = StoppableTask::new(); task.clone().start( - self.clone().channel_sub_loop(index, executor.clone()), + self.clone().channel_sub_loop(index, ex.clone()), // Ignore stop handler |_| async {}, Error::NetworkServiceStopped, - executor.clone(), + ex.clone(), ); self.connect_infos.lock().await.push(HashMap::new()); @@ -113,79 +127,75 @@ impl InboundSession { self: Arc, index: usize, accept_addr: Url, - executor: Arc>, + ex: Arc>, ) -> Result<()> { - info!(target: "net::inbound_session", "#{} starting inbound session on {}", index, accept_addr); + info!(target: "net::inbound_session", "[P2P] Starting Inbound session #{} on {}", index, accept_addr); // Generate a new acceptor for this inbound session let acceptor = Acceptor::new(Mutex::new(None)); let parent = Arc::downgrade(&self); *acceptor.session.lock().await = Some(Arc::new(parent)); // Start listener - let result = acceptor.clone().start(accept_addr, executor).await; - if let Err(err) = result.clone() { - error!(target: "net::inbound_session", "#{} error starting listener: {}", index, err); + let result = acceptor.clone().start(accept_addr, ex).await; + if let Err(e) = result.clone() { + error!(target: "net::inbound_session", "[P2P] Error starting listener #{}: {}", index, e); + acceptor.stop().await; + } else { + self.acceptors.lock().await.push(acceptor); } - self.acceptors.lock().await.push(acceptor); - result } - /// Wait for all new channels created by the acceptor and call - /// setup_channel() on them. - async fn channel_sub_loop( - self: Arc, - index: usize, - executor: Arc>, - ) -> Result<()> { + /// Wait for all new channels created by the acceptor and call setup_channel() on them. + async fn channel_sub_loop(self: Arc, index: usize, ex: Arc>) -> Result<()> { let channel_sub = self.acceptors.lock().await[index].clone().subscribe().await; + loop { let channel = channel_sub.receive().await?; - // Spawn a detached task to process the channel + // Spawn a detached task to process the channel. // This will just perform the channel setup then exit. - executor.spawn(self.clone().setup_channel(index, channel, executor.clone())).detach(); + ex.spawn(self.clone().setup_channel(index, channel, ex.clone())).detach(); } } - /// Registers the channel. First performs a network handshake and starts the - /// channel. Then starts sending keep-alive and address messages across the - /// channel. + /// Registers the channel. First performs a network handshake and starts the channel. + /// Then starts sending keep-alive and address messages across the channel. async fn setup_channel( self: Arc, index: usize, channel: ChannelPtr, - executor: Arc>, + ex: Arc>, ) -> Result<()> { - info!(target: "net::inbound_session", "#{} connected inbound [{}]", index, channel.address()); + info!(target: "net::inbound_session", "[P2P] Connected Inbound #{} [{}]", index, channel.address()); + self.register_channel(channel.clone(), ex.clone()).await?; - self.clone().register_channel(channel.clone(), executor.clone()).await?; + let addr = channel.address().clone(); + self.connect_infos.lock().await[index] + .insert(addr.clone(), InboundInfo { channel: channel.clone() }); - self.manage_channel_for_get_info(index, channel).await; + let stop_sub = channel.subscribe_stop().await?; + stop_sub.receive().await; + + self.connect_infos.lock().await[index].remove(&addr); Ok(()) } - - async fn manage_channel_for_get_info(&self, index: usize, channel: ChannelPtr) { - let key = channel.address(); - self.connect_infos.lock().await[index] - .insert(key.clone(), InboundInfo { channel: channel.clone() }); - - let stop_sub = channel.subscribe_stop().await; - - if stop_sub.is_ok() { - stop_sub.unwrap().receive().await; - } - - self.connect_infos.lock().await[index].remove(&key); - } } #[async_trait] impl Session for InboundSession { + fn p2p(&self) -> P2pPtr { + self.p2p.upgrade().unwrap() + } + + fn type_id(&self) -> SessionBitFlag { + SESSION_INBOUND + } + async fn get_info(&self) -> serde_json::Value { let mut infos = HashMap::new(); - for (index, accept_addr) in self.p2p().settings().inbound.iter().enumerate() { + for (index, accept_addr) in self.p2p().settings().inbound_addrs.iter().enumerate() { let connect_infos = &self.connect_infos.lock().await[index]; for (addr, info) in connect_infos { let json_addr = json!({ "accept_addr": accept_addr }); @@ -193,16 +203,7 @@ impl Session for InboundSession { infos.insert(addr.to_string(), info); } } - json!({ - "connected": infos, - }) - } - fn p2p(&self) -> Arc { - self.p2p.upgrade().unwrap() - } - - fn type_id(&self) -> SessionBitflag { - SESSION_INBOUND + json!({ "connected": infos }) } } diff --git a/src/net/session/manual_session.rs b/src/net/session/manual_session.rs index d9606877d..5f06e7095 100644 --- a/src/net/session/manual_session.rs +++ b/src/net/session/manual_session.rs @@ -16,26 +16,42 @@ * along with this program. If not, see . */ -use async_std::sync::{Arc, Mutex, Weak}; +//! Manual connections session. Manages the creation of manual sessions. +//! Used to create a manual session and to stop and start the session. +//! +//! A manual session is a type of outbound session in which we attempt +//! connection to a predefined set of peers. +//! +//! Class consists of a weak pointer to the p2p interface and a vector of +//! outbound connection slots. Using a weak pointer to p2p allows us to +//! avoid circular dependencies. The vector of slots is wrapped in a mutex +//! lock. This is switched on every time we instantiate a connection slot +//! and insures that no other part of the program uses the slots at the +//! same time. +use async_std::sync::{Arc, Mutex, Weak}; use async_trait::async_trait; use log::{info, warn}; -use serde_json::json; use smol::Executor; use url::Url; +use super::{ + super::{ + channel::ChannelPtr, + connector::Connector, + p2p::{P2p, P2pPtr}, + }, + Session, SessionBitFlag, SESSION_MANUAL, +}; use crate::{ - net::transport::TransportName, - system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr, Subscription}, + system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr}, util::async_util::sleep, Error, Result, }; -use super::{ - super::{ChannelPtr, Connector, P2p}, - Session, SessionBitflag, SESSION_MANUAL, -}; +pub type ManualSessionPtr = Arc; +/// Defines manual connections session. pub struct ManualSession { p2p: Weak, connect_slots: Mutex>, @@ -46,17 +62,17 @@ pub struct ManualSession { } impl ManualSession { - /// Create a new inbound session. - pub fn new(p2p: Weak) -> Arc { + /// Create a new manual session. + pub fn new(p2p: Weak) -> ManualSessionPtr { Arc::new(Self { p2p, - connect_slots: Mutex::new(Vec::new()), + connect_slots: Mutex::new(vec![]), channel_subscriber: Subscriber::new(), notify: Mutex::new(false), }) } - /// Stop the outbound session. + /// Stops the manual session. pub async fn stop(&self) { let connect_slots = &*self.connect_slots.lock().await; @@ -65,115 +81,115 @@ impl ManualSession { } } - pub async fn connect(self: Arc, addr: &Url, executor: Arc>) { + /// Connect the manual session to the given address + pub async fn connect(self: Arc, addr: Url, ex: Arc>) { let task = StoppableTask::new(); task.clone().start( - self.clone().channel_connect_loop(addr.clone(), executor.clone()), + self.clone().channel_connect_loop(addr, ex.clone()), // Ignore stop handler |_| async {}, Error::NetworkServiceStopped, - executor.clone(), + ex, ); self.connect_slots.lock().await.push(task); } + /// Creates a connector object and tries to connect using it pub async fn channel_connect_loop( self: Arc, addr: Url, - executor: Arc>, + ex: Arc>, ) -> Result<()> { let parent = Arc::downgrade(&self); - let settings = self.p2p().settings(); - let connector = Connector::new(settings.clone(), Arc::new(parent)); let attempts = settings.manual_attempt_limit; let mut remaining = attempts; - // Retrieve preferent outbound transports - let outbound_transports = &settings.outbound_transports; - - // Check that addr transport is in configured outbound transport - let addr_transport = TransportName::try_from(addr.clone())?; - let transports = if outbound_transports.contains(&addr_transport) { - vec![addr_transport] - } else { - warn!(target: "net::manual_session", "Manual outbound address {} transport is not in accepted outbound transports, will try with: {:?}", addr, outbound_transports); - outbound_transports.clone() - }; + // Add the peer to list of pending channels + self.p2p().add_pending(&addr).await; + // Loop forever if attempts==0, otherwise loop attempts number of times. + let mut tried_attempts = 0; loop { - // Loop forever if attempts is 0 - // Otherwise loop attempts number of times + tried_attempts += 1; + info!( + target: "net::manual_session", + "[P2P] Connecting to manual outbound [{}] (attempt #{})", + addr, tried_attempts, + ); + match connector.connect(addr.clone()).await { + Ok(channel) => { + info!( + target: "net::manual_session", + "[P2P] Manual outbound connected [{}]", addr, + ); + + let stop_sub = + channel.subscribe_stop().await.expect("Channel should not be stopped"); + + // Register the new channel + self.register_channel(channel.clone(), ex.clone()).await?; + + // Channel is now connected but not yet setup + // Remove pending lock since register_channel will add the channel to p2p + self.p2p().remove_pending(&addr).await; + + // Notify that channel processing has finished + if *self.notify.lock().await { + self.channel_subscriber.notify(Ok(channel)).await; + } + + // Wait for channel to close + stop_sub.receive().await; + info!( + target: "net::manual_session", + "[P2P] Manual outbound disconnected [{}]", addr, + ); + // DEV NOTE: Here we can choose to attempt reconnection again + return Ok(()) + } + Err(e) => { + warn!( + target: "net::manual_session", + "[P2P] Unable to connect to manual outbound [{}]: {}", + addr, e, + ); + } + } + + // Wait and try again. + // TODO: Should we notify about the failure now, or after all attempts + // have failed? + if *self.notify.lock().await { + self.channel_subscriber.notify(Err(Error::ConnectFailed)).await; + } + remaining = if attempts == 0 { 1 } else { remaining - 1 }; if remaining == 0 { break } - self.p2p().add_pending(addr.clone()).await; - - for transport in &transports { - // Replace addr transport - let mut transport_addr = addr.clone(); - transport_addr.set_scheme(&transport.to_scheme())?; - info!(target: "net::manual_session", "Connecting to manual outbound [{}]", transport_addr); - match connector.connect(transport_addr.clone()).await { - Ok(channel) => { - // Blacklist goes here - info!(target: "net::manual_session", "Connected to manual outbound [{}]", transport_addr); - - let stop_sub = channel.subscribe_stop().await; - if stop_sub.is_err() { - continue - } - - self.clone().register_channel(channel.clone(), executor.clone()).await?; - - // Channel is now connected but not yet setup - - // Remove pending lock since register_channel will add the channel to p2p - self.p2p().remove_pending(&addr).await; - - //self.clone().attach_protocols(channel, executor.clone()).await?; - - // Notify that channel processing has been finished - if *self.notify.lock().await { - self.channel_subscriber.notify(Ok(channel)).await; - } - - // Wait for channel to close - stop_sub.unwrap().receive().await; - } - Err(err) => { - info!(target: "net::manual_session", "Unable to connect to manual outbound [{}]: {}", addr, err); - } - } - } - - // Notify that channel processing has been finished (failed) - if *self.notify.lock().await { - self.channel_subscriber.notify(Err(Error::ConnectFailed)).await; - } - - sleep(settings.connect_timeout_seconds.into()).await; + info!( + target: "net::manual_session", + "[P2P] Waiting {} seconds until next manual outbound connection attempt [{}]", + settings.outbound_connect_timeout, addr, + ); + sleep(settings.outbound_connect_timeout).await; } warn!( - target: "net::manual_session", - "Suspending manual connection to [{}] after {} failed attempts.", - &addr, - attempts + target: "net::manual_session", + "[P2P] Suspending manual connection to {} after {} failed attempts", + addr, attempts, ); - Ok(()) - } + self.p2p().remove_pending(&addr).await; - /// Subscribe to a channel. - pub async fn subscribe_channel(&self) -> Subscription> { - self.channel_subscriber.clone().subscribe().await + Ok(()) } /// Enable channel_subscriber notifications. @@ -185,38 +201,19 @@ impl ManualSession { pub async fn disable_notify(self: Arc) { *self.notify.lock().await = false; } - - // Starts sending keep-alive and address messages across the channels. - /*async fn attach_protocols( - self: Arc, - channel: ChannelPtr, - executor: Arc>, - ) -> Result<()> { - let hosts = self.p2p().hosts(); - - let protocol_ping = ProtocolPing::new(channel.clone(), self.p2p()); - let protocol_addr = ProtocolAddress::new(channel, hosts).await; - - protocol_ping.start(executor.clone()).await; - protocol_addr.start(executor).await; - - Ok(()) - }*/ } #[async_trait] impl Session for ManualSession { - async fn get_info(&self) -> serde_json::Value { - json!({ - "key": 110 - }) - } - - fn p2p(&self) -> Arc { + fn p2p(&self) -> P2pPtr { self.p2p.upgrade().unwrap() } - fn type_id(&self) -> SessionBitflag { + fn type_id(&self) -> SessionBitFlag { SESSION_MANUAL } + + async fn get_info(&self) -> serde_json::Value { + todo!() + } } diff --git a/src/net/session/mod.rs b/src/net/session/mod.rs index 1493beb5f..b67bcc42d 100644 --- a/src/net/session/mod.rs +++ b/src/net/session/mod.rs @@ -16,77 +16,37 @@ * along with this program. If not, see . */ -use std::sync::{Arc, Weak}; - +use async_std::sync::{Arc, Weak}; use async_trait::async_trait; use log::debug; use smol::Executor; +use super::{channel::ChannelPtr, p2p::P2pPtr, protocol::ProtocolVersion}; use crate::Result; -use super::{p2p::P2pPtr, protocol::ProtocolVersion, ChannelPtr}; - -/// Seed sync session creates a connection to the seed nodes specified in settings. -/// A new seed sync session is created every time we call p2p::start(). The seed -/// sync session loops through all the configured seeds and tries to connect to -/// them using a Connector. Seed sync either connects successfully, -/// fails with an error or times out. -/// -/// If a seed node connects successfully, it runs a version exchange protocol, -/// stores the channel in the p2p list of channels, and disconnects, removing -/// the channel from the channel list. -/// -/// The channel is registered using Session trait method, register_channel(). -/// This invokes the Protocol Registry method attach(). Usually this returns a -/// list of protocols that we loop through and start. In this case, attach() -/// uses the bitflag selector to identify seed sessions and exclude them. -/// -/// The version exchange occurs inside register_channel(). We create a handshake -/// task that runs the version exchange with the function -/// perform_handshake_protocols(). This runs the version exchange protocol, -/// stores the channel in the p2p list of channels, and subscribes to a stop -/// signal. -pub mod seedsync_session; - -pub mod manual_session; - -/// Inbound connections session. Manages the creation of inbound sessions. Used -/// to create an inbound session and start and stop the session. -/// -/// Class consists of 3 pointers: a weak pointer to the p2p parent class, an -/// acceptor pointer, and a stoppable task pointer. Using a weak pointer to P2P -/// allows us to avoid circular dependencies. pub mod inbound_session; - -/// Outbound connections session. Manages the creation of outbound sessions. -/// Used to create an outbound session and stop and start the session. -/// -/// Class consists of a weak pointer to the p2p interface and a vector -/// of outbound connection slots. Using a weak pointer to p2p allows us to avoid -/// circular dependencies. The vector of slots is wrapped in a mutex lock. This -/// is switched on everytime we instantiate a connection slot and insures that -/// no other part of the program uses the slots at the same time. +pub use inbound_session::{InboundSession, InboundSessionPtr}; +pub mod manual_session; +pub use manual_session::{ManualSession, ManualSessionPtr}; pub mod outbound_session; +pub use outbound_session::{OutboundSession, OutboundSessionPtr}; +pub mod seedsync_session; +pub use seedsync_session::{SeedSyncSession, SeedSyncSessionPtr}; -// bitwise selectors for the protocol_registry -pub type SessionBitflag = u32; -pub const SESSION_INBOUND: SessionBitflag = 0b0001; -pub const SESSION_OUTBOUND: SessionBitflag = 0b0010; -pub const SESSION_MANUAL: SessionBitflag = 0b0100; -pub const SESSION_SEED: SessionBitflag = 0b1000; -pub const SESSION_ALL: SessionBitflag = 0b1111; - -pub use inbound_session::InboundSession; -pub use manual_session::ManualSession; -pub use outbound_session::OutboundSession; -pub use seedsync_session::SeedSyncSession; +/// Bitwise selectors for the `protocol_registry` +pub type SessionBitFlag = u32; +pub const SESSION_INBOUND: SessionBitFlag = 0b0001; +pub const SESSION_OUTBOUND: SessionBitFlag = 0b0010; +pub const SESSION_MANUAL: SessionBitFlag = 0b0100; +pub const SESSION_SEED: SessionBitFlag = 0b1000; +pub const SESSION_ALL: SessionBitFlag = 0b1111; pub type SessionWeakPtr = Arc>; -/// Removes channel from the list of connected channels when a stop signal is -/// received. -async fn remove_sub_on_stop(p2p: P2pPtr, channel: ChannelPtr) { - debug!(target: "net", "remove_sub_on_stop() [START]"); +/// Removes channel from the list of connected channels when a stop signal +/// is received. +pub async fn remove_sub_on_stop(p2p: P2pPtr, channel: ChannelPtr) { + debug!(target: "net::session::remove_sub_on_stop()", "[START]"); // Subscribe to stop events let stop_sub = channel.clone().subscribe_stop().await; @@ -95,37 +55,40 @@ async fn remove_sub_on_stop(p2p: P2pPtr, channel: ChannelPtr) { stop_sub.unwrap().receive().await; } - debug!(target: "net", - "remove_sub_on_stop(): received stop event. Removing channel {}", - channel.address() + debug!( + target: "net::session::remove_sub_on_stop()", + "Received stop event. Removing channel {}", channel.address(), ); + // Remove channel from p2p p2p.remove(channel).await; - debug!(target: "net", "remove_sub_on_stop() [END]"); + debug!(target: "net::session::remove_sub_on_stop()", "[END]"); } +/// Session trait. Defines methods that are used across sessions. +/// Implements registering the channel and initializing the channel by +/// performing a network handshake. #[async_trait] -/// Session trait. -/// Defines methods that are used across sessions. Implements registering the -/// channel and initializing the channel by performing a network handshake. pub trait Session: Sync { - /// Registers a new channel with the session. Performs a network handshake - /// and starts the channel. - // if we need to pass Self as an Arc we can do so like this: - // pub trait MyTrait: Send + Sync { - // async fn foo(&self, self_: Arc) {} - // } + /// Registers a new channel with the session. + /// Performs a network handshake and starts the channel. + /// If we need to pass `Self` as an `Arc` we can do so like this: + /// ``` + /// pub trait MyTrait: Send + Sync { + /// async fn foo(&self, self_: Arc) {} + /// } + /// ``` async fn register_channel( &self, channel: ChannelPtr, executor: Arc>, ) -> Result<()> { - debug!(target: "net", "Session::register_channel() [START]"); + debug!(target: "net::session::register_channel()", "[START]"); - // Protocols should all be initialized but not started - // We do this so that the protocols can begin receiving and buffering messages - // while the handshake protocol is ongoing. - // They are currently in sleep mode. + // Protocols should all be initialized but not started. + // We do this so that the protocols can begin receiving and buffering + // messages while the handshake protocol is ongoing. They are currently + // in sleep mode. let p2p = self.p2p(); let protocols = p2p.protocol_registry().attach(self.type_id(), channel.clone(), p2p.clone()).await; @@ -144,23 +107,23 @@ pub trait Session: Sync { handshake_task.await?; // Now the channel is ready - debug!(target: "net", "Session handshake complete. Activating remaining protocols"); + debug!(target: "net::session::register_channel()", "Session handshake complete"); + debug!(target: "net::session::register_channel()", "Activating remaining protocols"); - // Now start all the protocols - // They are responsible for managing their own lifetimes and - // correctly self destructing when the channel ends. + // Now start all the protocols. They are responsible for managing their own + // lifetimes and correctly selfdestructing when the channel ends. for protocol in protocols { - // Activate protocol protocol.start(executor.clone()).await?; } - debug!(target: "net", "Session::register_channel() [END]"); + debug!(target: "net::session::register_channel()", "[END]"); + Ok(()) } /// Performs network handshake to initialize channel. Adds the channel to - /// the list of connected channels, and prepares to remove the channel - /// when a stop signal is received. + /// the list of connected channels, and prepares to remove the channel when + /// a stop signal is received. async fn perform_handshake_protocols( &self, protocol_version: Arc, @@ -175,17 +138,19 @@ pub trait Session: Sync { // Add channel to p2p self.p2p().store(channel.clone()).await; - // Subscribe to stop, so can remove from p2p + // Subscribe to stop, so we can remove from p2p executor.spawn(remove_sub_on_stop(self.p2p(), channel)).detach(); // Channel is ready for use Ok(()) } - async fn get_info(&self) -> serde_json::Value; - - /// Returns a pointer to the p2p network interface. + /// Returns a pointer to the p2p network interface fn p2p(&self) -> P2pPtr; - fn type_id(&self) -> u32; + /// Return the session bit flag for the session type + fn type_id(&self) -> SessionBitFlag; + + /// Get network debug info + async fn get_info(&self) -> serde_json::Value; } diff --git a/src/net/session/outbound_session.rs b/src/net/session/outbound_session.rs index 91947640b..1acb2ab6d 100644 --- a/src/net/session/outbound_session.rs +++ b/src/net/session/outbound_session.rs @@ -16,27 +16,41 @@ * along with this program. If not, see . */ -use std::fmt; +//! Outbound connections session. Manages the creation of outbound sessions. +//! Used to create an outbound session and to stop and start the session. +//! +//! Class consists of a weak pointer to the p2p interface and a vector of +//! outbound connection slots. Using a weak pointer to p2p allows us to +//! avoid circular dependencies. The vector of slots is wrapped in a mutex +//! lock. This is switched on every time we instantiate a connection slot +//! and insures that no other part of the program uses the slots at the +//! same time. + +use std::collections::HashSet; use async_std::sync::{Arc, Mutex, Weak}; use async_trait::async_trait; -use log::{debug, error, info, warn}; -use rand::seq::SliceRandom; -use serde_json::{json, Value}; +use log::{debug, error, info}; +use serde_json::json; use smol::Executor; use url::Url; +use super::{ + super::{ + channel::ChannelPtr, + connector::Connector, + message::GetAddrsMessage, + p2p::{P2p, P2pPtr}, + }, + Session, SessionBitFlag, SESSION_OUTBOUND, +}; use crate::{ - net::{message, transport::TransportName}, - system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr, Subscription}, - util::async_util, + system::{StoppableTask, StoppableTaskPtr, Subscriber, SubscriberPtr}, + util::async_util::sleep, Error, Result, }; -use super::{ - super::{ChannelPtr, Connector, P2p}, - Session, SessionBitflag, SESSION_OUTBOUND, -}; +pub type OutboundSessionPtr = Arc; #[derive(Clone)] enum OutboundState { @@ -45,8 +59,8 @@ enum OutboundState { Connected, } -impl fmt::Display for OutboundState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl std::fmt::Display for OutboundState { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{}", @@ -96,43 +110,44 @@ impl Default for OutboundInfo { pub struct OutboundSession { p2p: Weak, connect_slots: Mutex>, - slot_info: Mutex>, /// Subscriber used to signal channels processing channel_subscriber: SubscriberPtr>, /// Flag to toggle channel_subscriber notifications notify: Mutex, + /// Channel debug info + slot_info: Mutex>, } impl OutboundSession { /// Create a new outbound session. - pub fn new(p2p: Weak) -> Arc { + pub fn new(p2p: Weak) -> OutboundSessionPtr { Arc::new(Self { p2p, - connect_slots: Mutex::new(Vec::new()), - slot_info: Mutex::new(Vec::new()), + connect_slots: Mutex::new(vec![]), channel_subscriber: Subscriber::new(), notify: Mutex::new(false), + slot_info: Mutex::new(vec![]), }) } /// Start the outbound session. Runs the channel connect loop. - pub async fn start(self: Arc, executor: Arc>) -> Result<()> { - let slots_count = self.p2p().settings().outbound_connections; - info!(target: "net::outbound_session", "Starting {} outbound connection slots.", slots_count); + pub async fn start(self: Arc, ex: Arc>) -> Result<()> { + let n_slots = self.p2p().settings().outbound_connections; + info!(target: "net::outbound_session", "[P2P] Starting {} outbound connection slots.", n_slots); // Activate mutex lock on connection slots. let mut connect_slots = self.connect_slots.lock().await; - self.slot_info.lock().await.resize(slots_count as usize, Default::default()); + self.slot_info.lock().await.resize(n_slots, Default::default()); - for i in 0..slots_count { + for i in 0..n_slots { let task = StoppableTask::new(); task.clone().start( - self.clone().channel_connect_loop(i, executor.clone()), + self.clone().channel_connect_loop(i, ex.clone()), // Ignore stop handler |_| async {}, Error::NetworkServiceStopped, - executor.clone(), + ex.clone(), ); connect_slots.push(task); @@ -141,7 +156,7 @@ impl OutboundSession { Ok(()) } - /// Stop the outbound session. + /// Stops the outbound session. pub async fn stop(&self) { let connect_slots = &*self.connect_slots.lock().await; @@ -153,115 +168,129 @@ impl OutboundSession { /// Creates a connector object and tries to connect using it. pub async fn channel_connect_loop( self: Arc, - slot_number: u32, - executor: Arc>, + slot_number: usize, + ex: Arc>, ) -> Result<()> { let parent = Arc::downgrade(&self); - let connector = Connector::new(self.p2p().settings(), Arc::new(parent)); - // Retrieve preferent outbound transports - let outbound_transports = &self.p2p().settings().outbound_transports; + // Retrieve whitelisted outbound transports + let transports = &self.p2p().settings().allowed_transports; + // This is the main outbound connection loop where we try to establish + // a connection in the slot. The `try_connect` function will block in + // case the connection was sucessfully established. If it fails, then + // we will wait for a defined number of seconds and try to fill the + // slot again. This function should never exit during the lifetime of + // the P2P network, as it is supposed to represent an outbound slot we + // want to fill. + // The actual connection logic and peer selection is in `try_connect`. + // If the connection is successful, `try_connect` will wait for a stop + // signal and then exit. Once it exits, we'll run `try_connect` again + // and attempt to fill the slot with another peer. loop { - match self - .try_connect(slot_number, executor.clone(), &connector, outbound_transports) - .await - { - Ok(_) => { - info!(target: "net::outbound_session", "#{} slot disconnected", slot_number) + match self.try_connect(slot_number, &connector, transports, ex.clone()).await { + Ok(()) => { + info!( + target: "net::outbound_session", + "[P2P] Outbound slot #{} disconnected", + slot_number + ); } - Err(err) => { - error!(target: "net::outbound_session", "#{} slot connection failed: {}", slot_number, err) + Err(e) => { + error!( + target: "net::outbound_session", + "[P2P] Outbound slot #{} connection failed: {}", + slot_number, e, + ); } } - - async_util::sleep(self.p2p().settings().outbound_retry_seconds).await; } } - /// Start making an outbound connection, using provided connector. - /// Loads a valid address then tries to connect. Once connected, - /// registers the channel, removes it from the list of pending channels, - /// and starts sending messages across the channel, otherwise returns a network error. + /// Start making an outbound connection, using provided [`Connector`]. + /// Tries to find a valid address to connect to, otherwise does peer + /// discovery. The peer discovery loops until some peer we can connect + /// to is found. Once connected, registers the channel, removes it from + /// the list of pending channels, and starts sending messages across the + /// channel. In case of any failures, a network error is returned and the + /// main connect loop (parent of this function) will iterate again. async fn try_connect( &self, - slot_number: u32, - executor: Arc>, + slot_number: usize, connector: &Connector, - outbound_transports: &Vec, + transports: &[String], + ex: Arc>, ) -> Result<()> { - let addr = self.load_address(slot_number).await?; - info!(target: "net::outbound_session", "#{} processing outbound [{}]", slot_number, addr); - { - let info = &mut self.slot_info.lock().await[slot_number as usize]; - info.addr = Some(addr.clone()); - info.state = OutboundState::Pending; - } + debug!( + target: "net::outbound_session::try_connect()", + "[P2P] Finding a host to connect to for outbound slot #{}", + slot_number, + ); - // Check that addr transport is in configured outbound transport - let addr_transport = TransportName::try_from(addr.clone())?; - let transports = if outbound_transports.contains(&addr_transport) { - vec![addr_transport] - } else { - warn!(target: "net::outbound_session", "#{} address {} transport is not in accepted outbound transports, will try with: {:?}", slot_number, addr, outbound_transports); - outbound_transports.clone() - }; + // Find an address to connect to. We also do peer discovery here if needed. + let addr = self.load_address(slot_number, transports).await?; + info!( + target: "net::outbound_session::try_connect()", + "[P2P] Connecting outbound slot #{} [{}]", + slot_number, addr, + ); - for transport in transports { - // Replace addr transport - let mut transport_addr = addr.clone(); - transport_addr.set_scheme(&transport.to_scheme())?; - info!(target: "net::outbound_session", "#{} connecting to outbound [{}]", slot_number, transport_addr); - match connector.connect(transport_addr.clone()).await { - Ok(channel) => { - // Blacklist goes here - info!(target: "net::outbound_session", "#{} connected to outbound [{}]", slot_number, transport_addr); + match connector.connect(addr.clone()).await { + Ok(channel) => { + info!( + target: "net::outbound_session::try_connect()", + "[P2P] Outbound slot #{} connected [{}]", + slot_number, addr + ); - let stop_sub = channel.subscribe_stop().await; - if stop_sub.is_err() { - continue - } + let stop_sub = + channel.subscribe_stop().await.expect("Channel should not be stopped"); - self.register_channel(channel.clone(), executor.clone()).await?; + // Register the new channel + self.register_channel(channel.clone(), ex.clone()).await?; - // Channel is now connected but not yet setup + // Channel is now connected but not yet setup + // Remove pending lock since register_channel will add the channel to p2p + self.p2p().remove_pending(&addr).await; - // Remove pending lock since register_channel will add the channel to p2p - self.p2p().remove_pending(&addr).await; - { - let info = &mut self.slot_info.lock().await[slot_number as usize]; - info.channel = Some(channel.clone()); - info.state = OutboundState::Connected; - } + dnet!(self, + let info = &mut self.slot_info.lock().await[slot_number]; + info.channel = Some(channel.clone()); + info.state = OutboundState::Connected; + ); - // Notify that channel processing has been finished - if *self.notify.lock().await { - self.channel_subscriber.notify(Ok(channel)).await; - } - - // Wait for channel to close - stop_sub.unwrap().receive().await; - - return Ok(()) - } - Err(err) => { - error!(target: "net::outbound_session", "Unable to connect to outbound [{}]: {}", &transport_addr, err); + // Notify that channel processing has been finished + if *self.notify.lock().await { + self.channel_subscriber.notify(Ok(channel)).await; } + + // Wait for channel to close + stop_sub.receive().await; + return Ok(()) + } + + Err(e) => { + error!( + target: "net::outbound_session::try_connect()", + "[P2P] Unable to connect outbound slot #{} [{}]: {}", + slot_number, addr, e + ); } } - // Remove url from hosts + // At this point we failed to connect. We'll drop this peer now. + // TODO: We could potentially implement a quarantine zone for this. self.p2p().hosts().remove(&addr).await; - { - let info = &mut self.slot_info.lock().await[slot_number as usize]; + dnet!(self, + let info = &mut self.slot_info.lock().await[slot_number]; info.addr = None; info.channel = None; info.state = OutboundState::Open; - } + ); - // Notify that channel processing has been finished (failed) + // Notify that channel processing failed if *self.notify.lock().await { self.channel_subscriber.notify(Err(Error::ConnectFailed)).await; } @@ -269,94 +298,139 @@ impl OutboundSession { Err(Error::ConnectFailed) } - /// Loops through host addresses to find a outbound address that we can - /// connect to. Checks whether address is valid by making sure it isn't + /// Loops through host addresses to find an outbound address that we can + /// connect to. Check whether the address is valid by making sure it isn't /// our own inbound address, then checks whether it is already connected - /// (exists) or connecting (pending). If no address was found, we try to - /// to discover new peers. Keeps looping until address is found that passes all checks. - async fn load_address(&self, slot_number: u32) -> Result { + /// (exists) or connecting (pending). If no address was found, we'll attempt + /// to do peer discovery and try to fill the slot again. + async fn load_address(&self, slot_number: usize, transports: &[String]) -> Result { loop { let p2p = self.p2p(); - let self_inbound_addr = p2p.settings().external_addr.clone(); + let retry_sleep = p2p.settings().outbound_connect_timeout; - let mut addrs; - - { - let hosts = p2p.hosts().load_all().await; - addrs = hosts; + if *p2p.peer_discovery_running.lock().await { + debug!( + target: "net::outbound_session::load_address()", + "[P2P] #{} Peer discovery active, waiting {} seconds...", + slot_number, retry_sleep, + ); + sleep(retry_sleep).await; } - addrs.shuffle(&mut rand::thread_rng()); + // Collect hosts + let mut hosts = HashSet::new(); - for addr in addrs { - if p2p.exists(&addr).await? { - continue - } + // If transport mixing is enabled, then for example we're allowed to + // use tor:// to connect to tcp:// and tor+tls:// to connect to tcp+tls://. + // However, **do not** mix tor:// and tcp+tls://, nor tor+tls:// and tcp://. + let transport_mixing = self.p2p().settings().transport_mixing; + macro_rules! mix_transport { + ($a:expr, $b:expr) => { + if transports.contains(&$a.to_string()) && transport_mixing { + let mut a_to_b = p2p.hosts().load_with_schemes(&[$b.to_string()]).await; + for addr in a_to_b.iter_mut() { + addr.set_scheme($a).unwrap(); + hosts.insert(addr.clone()); + } + } + }; + } + mix_transport!("tor", "tcp"); + mix_transport!("tor+tls", "tcp+tls"); + mix_transport!("nym", "tcp"); + mix_transport!("nym+tls", "tcp+tls"); - // Check if address is in peers list - if p2p.settings().peers.contains(&addr) { - continue - } - - // Obtain a lock on this address to prevent duplicate connections - if !p2p.add_pending(addr.clone()).await { - continue - } - - if self_inbound_addr.contains(&addr) { - continue - } - - return Ok(addr) + // And now the actual requested transports + for addr in p2p.hosts().load_with_schemes(transports).await { + hosts.insert(addr); } - // Peer discovery - if p2p.settings().peer_discovery { - debug!(target: "net::outbound_session", "#{} No available address found, entering peer discovery mode.", slot_number); - self.peer_discovery(slot_number).await?; - debug!(target: "net::outbound_session", "#{} Discovery mode ended.", slot_number); + // Try to find an unused host in the set. + for host in &hosts { + // Check if we already have this connection established + if p2p.exists(host).await { + continue + } + + // Check if we already have this configured as a manual peer + if p2p.settings().peers.contains(host) { + continue + } + + // Obtain a lock on this address to prevent duplicate connection + if !p2p.add_pending(host).await { + continue + } + + dnet!(self, + let info = &mut self.slot_info.lock().await[slot_number]; + info.addr = Some(host.clone()); + info.state = OutboundState::Pending; + ); + + return Ok(host.clone()) } - // Sleep and then retry - debug!(target: "net::outbound_session", "Retrying connect slot #{}", slot_number); - async_util::sleep(p2p.settings().outbound_retry_seconds).await; + // We didn't find a host to connect to, let's try to find more peers. + info!( + target: "net::outbound_session::load_address()", + "[P2P] Outbound #{}: No peers found. Starting peer discovery...", + slot_number, + ); + // NOTE: A design decision here is to do a sleep inside peer_discovery() + // so that there's a certain period (outbound_connect_timeout) of time + // to send the GetAddr, receive Addrs, and sort things out. By sleeping + // inside peer_discovery, it will block here in the slot sessions, while + // other slots can keep trying to find hosts. This is also why we sleep + // in the beginning of this loop if peer discovery is currently active. + self.peer_discovery(slot_number).await; } } - /// Try to find new peers to update available hosts. - async fn peer_discovery(&self, slot_number: u32) -> Result<()> { - // Check that another slot(thread) already tries to update hosts + /// Activate peer discovery if not active already. This will loop through all + /// connected P2P channels and send out a `GetAddrs` message to request more + /// peers. Other parts of the P2P stack will then handle the incoming addresses + /// and place them in the hosts list. + /// This function will also sleep [`Settings:outbound_connect_timeout`] seconds + /// after broadcasting in order to let the P2P stack receive and work through + /// the addresses it is expecting. + async fn peer_discovery(&self, slot_number: usize) { let p2p = self.p2p(); - if !p2p.clone().start_discovery().await { - debug!(target: "net::outbound_session", "#{} P2P already on discovery mode.", slot_number); - return Ok(()) + + if *p2p.peer_discovery_running.lock().await { + info!( + target: "net::outbound_session::peer_discovery()", + "[P2P] Outbound #{}: Peer discovery already active", + slot_number, + ); + return } - debug!(target: "net::outbound_session", "#{} Discovery mode started.", slot_number); + info!( + target: "net::outbound_session::peer_discovery()", + "[P2P] Outbound #{}: Started peer discovery", + slot_number, + ); + *p2p.peer_discovery_running.lock().await = true; - // Getting a random connected channel to ask for peers - let channel = match p2p.clone().random_channel().await { - Some(c) => c, - None => { - debug!(target: "net::outbound_session", "#{} No peers found.", slot_number); - p2p.clone().stop_discovery().await; - return Ok(()) - } - }; + // Broadcast the GetAddrs message to all active channels + let get_addrs = GetAddrsMessage { max: p2p.settings().outbound_connections as u32 }; + info!( + target: "net::outbound_session::peer_discovery()", + "[P2P] Outbound #{}: Broadcasting GetAddrs across active channels", + slot_number, + ); + p2p.broadcast(&get_addrs).await; - // Ask peer - debug!(target: "net::outbound_session", "#{} Asking peer: {}", slot_number, channel.address()); - let get_addr_msg = message::GetAddrsMessage {}; - channel.send(get_addr_msg).await?; - - p2p.stop_discovery().await; - - Ok(()) - } - - /// Subscribe to a channel. - pub async fn subscribe_channel(&self) -> Subscription> { - self.channel_subscriber.clone().subscribe().await + // Now sleep to let the GetAddrs propagate, and hopefully + // in the meantime we'll get some peers. + debug!( + target: "net::outbound_session::peer_discovery()", + "[P2P] Outbound #{}: Sleeping {} seconds", + slot_number, p2p.settings().outbound_connect_timeout, + ); + sleep(p2p.settings().outbound_connect_timeout).await; + *p2p.peer_discovery_running.lock().await = false; } /// Enable channel_subscriber notifications. @@ -372,14 +446,22 @@ impl OutboundSession { #[async_trait] impl Session for OutboundSession { + fn p2p(&self) -> P2pPtr { + self.p2p.upgrade().unwrap() + } + + fn type_id(&self) -> SessionBitFlag { + SESSION_OUTBOUND + } + async fn get_info(&self) -> serde_json::Value { - let mut slots = Vec::new(); + let mut slots = vec![]; for info in &*self.slot_info.lock().await { slots.push(info.get_info().await); } let hosts = self.p2p().hosts().load_all().await; - let addrs: Vec = + let addrs: Vec = hosts.iter().map(|addr| serde_json::Value::String(addr.to_string())).collect(); json!({ @@ -387,12 +469,4 @@ impl Session for OutboundSession { "hosts": serde_json::Value::Array(addrs), }) } - - fn p2p(&self) -> Arc { - self.p2p.upgrade().unwrap() - } - - fn type_id(&self) -> SessionBitflag { - SESSION_OUTBOUND - } } diff --git a/src/net/session/seedsync_session.rs b/src/net/session/seedsync_session.rs index 7ab3bb080..885d42b7b 100644 --- a/src/net/session/seedsync_session.rs +++ b/src/net/session/seedsync_session.rs @@ -16,6 +16,27 @@ * along with this program. If not, see . */ +//! Seed sync session creates a connection to the seed nodes specified in settings. +//! A new seed sync session is created every time we call [`P2p::start()`]. The +//! seed sync session loops through all the configured seeds and tries to connect +//! to them using a [`Connector`]. Seed sync either connects successfully, fails +//! with an error, or times out. +//! +//! If a seed node connects successfully, it runs a version exchange protocol, +//! stores the channel in the p2p list of channels, and discoonnects, removing +//! the channel from the channel list. +//! +//! The channel is registered using the [`Session::register_channel()`] trait +//! method. This invokes the Protocol Registry method `attach()`. Usually this +//! returns a list of protocols that we loop through and start. In this case, +//! `attach()` uses the bitflag selector to identify seed sessions and exclude +//! them. +//! +//! The version exchange occurs inside `register_channel()`. We create a handshake +//! task that runs the version exchange with the `perform_handshake_protocols()` +//! function. This runs the version exchange protocol, stores the channel in the +//! p2p list of channels, and subscribes to a stop signal. + use std::time::Duration; use async_std::{ @@ -24,161 +45,161 @@ use async_std::{ }; use async_trait::async_trait; use futures::future::join_all; -use log::*; -use serde_json::json; +use log::{debug, error, info, warn}; use smol::Executor; use url::Url; +use super::{ + super::{connector::Connector, p2p::P2p}, + P2pPtr, Session, SessionBitFlag, SESSION_SEED, +}; use crate::Result; -use super::{ - super::{Connector, P2p}, - Session, SessionBitflag, SESSION_SEED, -}; +pub type SeedSyncSessionPtr = Arc; -/// Defines seed connections session. +/// Defines seed connections session pub struct SeedSyncSession { p2p: Weak, } impl SeedSyncSession { - /// Create a new seed sync session instance. - pub fn new(p2p: Weak) -> Arc { + /// Create a new seed sync session instance + pub fn new(p2p: Weak) -> SeedSyncSessionPtr { Arc::new(Self { p2p }) } - /// Start the seed sync session. Creates a new task for every seed connection and - /// starts the seed on each task. + /// Start the seed sync session. Creates a new task for every seed + /// connection and starts the seed on each task. pub async fn start(self: Arc, executor: Arc>) -> Result<()> { - debug!(target: "net::seedsync_session", "SeedSyncSession::start() [START]"); + debug!(target: "net::session::seedsync_session", "SeedSyncSession::start() [START]"); let settings = self.p2p().settings(); if settings.seeds.is_empty() { - warn!(target: "net::seedsync_session", "Skipping seed sync process since no seeds are configured."); - // Store external addresses in hosts explicitly - if !settings.external_addr.is_empty() { - self.p2p().hosts().store(settings.external_addr.clone()).await - } + warn!( + target: "net::session::seedsync_session", + "[P2P] Skipping seed sync process since no seeds are configured.", + ); return Ok(()) } - // if cached addresses then quit - - let mut tasks = Vec::new(); - - // This loops through all the seeds and tries to start them. - // If the seed_query_timeout_seconds times out before they are finished, - // it will return an error. + // Gather tasks so we can execute concurrently + let mut tasks = Vec::with_capacity(settings.seeds.len()); + let conn_timeout = Duration::from_secs(settings.seed_query_timeout); for (i, seed) in settings.seeds.iter().enumerate() { - let ex2 = executor.clone(); - let self2 = self.clone(); - let sett2 = settings.clone(); - tasks.push(async move { - let task = self2.clone().start_seed(i, seed.clone(), ex2.clone()); + let ex_ = executor.clone(); + let self_ = self.clone(); - let result = - timeout(Duration::from_secs(sett2.seed_query_timeout_seconds.into()), task) - .await; + tasks.push(async move { + let task = self_.clone().start_seed(i, seed.clone(), ex_.clone()); + let result = timeout(conn_timeout, task).await; match result { Ok(t) => match t { Ok(()) => { - info!(target: "net::seedsync_session", "Seed #{} connected successfully", i) + info!( + target: "net::session::seedsync_session", + "[P2P] Seed #{} connected successfully", i, + ); } Err(err) => { - warn!(target: "net::seedsync_session", "Seed #{} failed for reason {}", i, err) + warn!( + target: "net::session::seedsync_session", + "[P2P] Seed #{} connection failed: {}", i, err, + ); } }, - Err(_err) => error!(target: "net::seedsync_session", "Seed #{} timed out", i), + Err(_) => { + error!( + target: "net::session::seedsync_session", + "[P2P] Seed #{} timed out", i + ); + } } }); } + // Poll concurrently join_all(tasks).await; // Seed process complete if self.p2p().hosts().is_empty().await { - warn!(target: "net::seedsync_session", "Hosts pool still empty after seeding"); + warn!(target: "net::session::seedsync_session", "[P2P] Hosts pool empty after seeding"); } - debug!(target: "net::seedsync_session", "SeedSyncSession::start() [END]"); + debug!(target: "net::session::seedsync_session", "SeedSyncSession::start() [END]"); Ok(()) } - /// Connects to a seed socket address. + /// Connects to a seed socket address async fn start_seed( self: Arc, seed_index: usize, seed: Url, - executor: Arc>, + ex: Arc>, ) -> Result<()> { - debug!(target: "net::seedsync_session", "SeedSyncSession::start_seed(i={}) [START]", seed_index); - let (_hosts, settings) = { - let p2p = self.p2p.upgrade().unwrap(); - (p2p.hosts(), p2p.settings()) - }; + debug!( + target: "net::session::seedsync_session", "SeedSyncSession::start_seed(i={}) [START]", + seed_index + ); + let settings = self.p2p.upgrade().unwrap().settings(); let parent = Arc::downgrade(&self); let connector = Connector::new(settings.clone(), Arc::new(parent)); + match connector.connect(seed.clone()).await { - Ok(channel) => { - // Blacklist goes here + Ok(ch) => { + info!( + target: "net::session::seedsync_session", + "[P2P] Connected seed #{} [{}]", seed_index, seed, + ); - info!(target: "net::seedsync_session", "Connected seed #{} [{}]", seed_index, seed); - - if let Err(err) = - self.clone().register_channel(channel.clone(), executor.clone()).await - { - warn!(target: "net::seedsync_session", "Failure during seed sync session #{} [{}]: {}", seed_index, seed, err); + if let Err(e) = self.clone().register_channel(ch.clone(), ex.clone()).await { + warn!( + target: "net::session::seedsync_session", + "[P2P] Failure during sync seed session #{} [{}]: {}", + seed_index, seed, e, + ); } - info!(target: "net::seedsync_session", "Disconnecting from seed #{} [{}]", seed_index, seed); - channel.stop().await; - - debug!(target: "net::seedsync_session", "SeedSyncSession::start_seed(i={}) [END]", seed_index); - Ok(()) + info!( + target: "net::session::seedsync_session", + "[P2P] Disconnecting from seed #{} [{}]", + seed_index, seed, + ); + ch.stop().await; } - Err(err) => { - warn!(target: "net::seedsync_session", "Failure contacting seed #{} [{}]: {}", seed_index, seed, err); - Err(err) + + Err(e) => { + warn!( + target: "net::session:seedsync_session", + "[P2P] Failure contacting seed #{} [{}]: {}", + seed_index, seed, e + ); + return Err(e) } } + + debug!( + target: "net::session::seedsync_session", + "SeedSyncSession::start_seed(i={}) [END]", + seed_index + ); + + Ok(()) } - - // Starts keep-alive messages and seed protocol. - /*async fn attach_protocols( - self: Arc, - channel: ChannelPtr, - hosts: HostsPtr, - settings: SettingsPtr, - executor: Arc>, - ) -> Result<()> { - let protocol_ping = ProtocolPing::new(channel.clone(), self.p2p()); - protocol_ping.start(executor.clone()).await; - - let protocol_seed = ProtocolSeed::new(channel.clone(), hosts, settings.clone()); - // This will block until seed process is complete - protocol_seed.start(executor.clone()).await?; - - channel.stop().await; - - Ok(()) - }*/ } #[async_trait] impl Session for SeedSyncSession { - async fn get_info(&self) -> serde_json::Value { - json!({ - "key": 110 - }) - } - - fn p2p(&self) -> Arc { + fn p2p(&self) -> P2pPtr { self.p2p.upgrade().unwrap() } - fn type_id(&self) -> SessionBitflag { + fn type_id(&self) -> SessionBitFlag { SESSION_SEED } + + async fn get_info(&self) -> serde_json::Value { + todo!() + } } diff --git a/src/net/settings.rs b/src/net/settings.rs index 904b384d5..a089fa1be 100644 --- a/src/net/settings.rs +++ b/src/net/settings.rs @@ -16,210 +16,171 @@ * along with this program. If not, see . */ -use std::sync::Arc; - -use serde::Deserialize; +use async_std::sync::Arc; use structopt::StructOpt; -use structopt_toml::StructOptToml; use url::Url; -use crate::net::transport::TransportName; - -/// Atomic pointer to network settings. +/// Atomic pointer to network settings pub type SettingsPtr = Arc; -/// Default settings for the network. Can be manually configured. -#[derive(Clone, Debug)] +/// P2P network settings. The scope of this is a P2P network instance +/// configured by the library user. +#[derive(Debug, Clone)] pub struct Settings { - /// P2P accept addresses node listens to for inbound connections - pub inbound: Vec, - /// Outbound connection slots number - pub outbound_connections: u32, - /// Manual connections retry limit, 0 for forever looping - pub manual_attempt_limit: u32, - /// Seed connection establishment timeout - pub seed_query_timeout_seconds: u32, - /// Connection establishment timeout - pub connect_timeout_seconds: u32, - /// Exchange versions (handshake) timeout - pub channel_handshake_seconds: u32, - /// Ping-pong exhange execution interval - pub channel_heartbeat_seconds: u32, - /// Try to fill an outbound slot interval - pub outbound_retry_seconds: u64, - /// P2P external addresses node advertises so other peers can reach us - /// and connect to us, as long us inbound addresses are also configured - pub external_addr: Vec, + /// Only used for debugging, compromises privacy when set + pub node_id: String, + /// P2P accept addresses the instance listens on for inbound connections + pub inbound_addrs: Vec, + /// P2P external addresses the instance advertises so other peers can + /// reach us and connect to us, as long as inbound addrs are configured + pub external_addrs: Vec, /// Peer nodes to manually connect to pub peers: Vec, - /// Seed nodes to connect to for peers retrieval and/or advertising our own - /// external address + /// Seed nodes to connect to for peer discovery and/or adversising our + /// own external addresses pub seeds: Vec, - /// Only used for debugging. Compromises privacy when set. - pub node_id: String, - /// Application version, used for verification between peers - pub app_version: Option, - /// Prefered transports for outbound connections - pub outbound_transports: Vec, + /// Application version, used for convenient protocol matching + pub app_version: semver::Version, + /// Whitelisted network transports for outbound connections + pub allowed_transports: Vec, + /// Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`) + pub transport_mixing: bool, + /// Outbound connection slots number, this many connections will be + /// attempted. (This does not include manual connections) + pub outbound_connections: usize, + /// Manual connections retry limit, 0 for forever looping + pub manual_attempt_limit: usize, + /// Seed connection establishment timeout (in seconds) + pub seed_query_timeout: u64, + /// Outbound connection establishment timeout (in seconds) + pub outbound_connect_timeout: u64, + /// Exchange versions (handshake) timeout (in seconds) + pub channel_handshake_timeout: u64, + /// Ping-pong exchange execution interval (in seconds) + pub channel_heartbeat_interval: u64, /// Allow localnet hosts pub localnet: bool, - /// Enable peer discovery - pub peer_discovery: bool, - /// Enable channel logging - pub channel_log: bool, } impl Default for Settings { fn default() -> Self { + let version = option_env!("CARGO_PKG_VERSION").unwrap_or("0.0.0"); + let app_version = semver::Version::parse(version).unwrap(); + Self { - inbound: Vec::new(), + node_id: String::new(), + inbound_addrs: vec![], + external_addrs: vec![], + peers: vec![], + seeds: vec![], + app_version, + allowed_transports: vec![], + transport_mixing: true, outbound_connections: 0, manual_attempt_limit: 0, - seed_query_timeout_seconds: 8, - connect_timeout_seconds: 10, - channel_handshake_seconds: 4, - channel_heartbeat_seconds: 10, - outbound_retry_seconds: 20, - external_addr: Vec::new(), - peers: Vec::new(), - seeds: Vec::new(), - node_id: String::new(), - app_version: Some(option_env!("CARGO_PKG_VERSION").unwrap_or("").to_string()), - outbound_transports: get_outbound_transports(vec![]), + seed_query_timeout: 30, + outbound_connect_timeout: 15, + channel_handshake_timeout: 4, + channel_heartbeat_interval: 10, localnet: false, - peer_discovery: true, - channel_log: false, } } } +// The following is used so we can have P2P settings configurable +// from TOML files. + /// Defines the network settings. -#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)] +#[derive(Clone, Debug, serde::Deserialize, structopt::StructOpt, structopt_toml::StructOptToml)] #[structopt()] pub struct SettingsOpt { - /// P2P accept addresses node listens to for inbound connections + /// P2P accept address node listens to for inbound connections #[serde(default)] #[structopt(long = "accept")] pub inbound: Vec, /// Outbound connection slots number #[structopt(long = "slots")] - pub outbound_connections: Option, + pub outbound_connections: Option, - /// P2P external addresses node advertises so other peers can reach us - /// and connect to us, as long us inbound addresses are also configured + /// P2P external addresses node advertises so other peers can + /// reach us and connect to us, as long as inbound addresses + /// are also configured #[serde(default)] #[structopt(long)] - pub external_addr: Vec, + pub external_addrs: Vec, /// Peer nodes to manually connect to #[serde(default)] #[structopt(long)] pub peers: Vec, - /// Seed nodes to connect to for peers retrieval and/or advertising our own - /// external address + /// Seed nodes to connect to for peers retrieval and/or + /// advertising our own external addresses #[serde(default)] #[structopt(long)] pub seeds: Vec, /// Manual connections retry limit #[structopt(skip)] - pub manual_attempt_limit: Option, + pub manual_attempt_limit: Option, - /// Seed connection establishment timeout + /// Seed connection establishment timeout in seconds #[structopt(skip)] - pub seed_query_timeout_seconds: Option, + pub seed_query_timeout: Option, - /// Connection establishment timeout + /// Connection establishment timeout in seconds #[structopt(skip)] - pub connect_timeout_seconds: Option, + pub outbound_connect_timeout: Option, - /// Exchange versions (handshake) timeout + /// Exchange versions (handshake) timeout in seconds #[structopt(skip)] - pub channel_handshake_seconds: Option, + pub channel_handshake_timeout: Option, - /// Ping-pong exhange execution interval + /// Ping-pong exchange execution interval in seconds #[structopt(skip)] - pub channel_heartbeat_seconds: Option, - - /// Try to fill an outbound slot interval - #[structopt(skip)] - pub outbound_retry_seconds: Option, + pub channel_heartbeat_interval: Option, /// Only used for debugging. Compromises privacy when set. #[serde(default)] #[structopt(skip)] pub node_id: String, - /// Application version, used for verification between peers - #[serde(default)] - #[structopt(skip)] - pub app_version: Option, - - /// Prefered transports for outbound connections + /// Preferred transports for outbound connections #[serde(default)] #[structopt(long = "transports")] - pub outbound_transports: Vec, + pub allowed_transports: Vec, + + #[structopt(long)] + pub transport_mixing: bool, /// Allow localnet hosts #[serde(default)] #[structopt(long)] pub localnet: bool, - - /// Enable peer discovery - #[serde(default = "default_as_true")] - #[structopt(long)] - pub peer_discovery: bool, - - /// Enable channel logging - #[serde(default)] - #[structopt(long)] - pub channel_log: bool, } impl From for Settings { - fn from(settings_opt: SettingsOpt) -> Self { + fn from(opt: SettingsOpt) -> Self { + let version = option_env!("CARGO_PKG_VERSION").unwrap_or("0.0.0"); + let app_version = semver::Version::parse(version).unwrap(); + Self { - inbound: settings_opt.inbound, - outbound_connections: settings_opt.outbound_connections.unwrap_or(0), - manual_attempt_limit: settings_opt.manual_attempt_limit.unwrap_or(0), - seed_query_timeout_seconds: settings_opt.seed_query_timeout_seconds.unwrap_or(8), - connect_timeout_seconds: settings_opt.connect_timeout_seconds.unwrap_or(10), - channel_handshake_seconds: settings_opt.channel_handshake_seconds.unwrap_or(4), - channel_heartbeat_seconds: settings_opt.channel_heartbeat_seconds.unwrap_or(10), - outbound_retry_seconds: settings_opt.outbound_retry_seconds.unwrap_or(1200), - external_addr: settings_opt.external_addr, - peers: settings_opt.peers, - seeds: settings_opt.seeds, - node_id: settings_opt.node_id, - app_version: settings_opt.app_version, - outbound_transports: get_outbound_transports(settings_opt.outbound_transports), - localnet: settings_opt.localnet, - peer_discovery: settings_opt.peer_discovery, - channel_log: settings_opt.channel_log, + node_id: opt.node_id, + inbound_addrs: opt.inbound, + external_addrs: opt.external_addrs, + peers: opt.peers, + seeds: opt.seeds, + app_version, + allowed_transports: opt.allowed_transports, + transport_mixing: opt.transport_mixing, + outbound_connections: opt.outbound_connections.unwrap_or(0), + manual_attempt_limit: opt.manual_attempt_limit.unwrap_or(0), + seed_query_timeout: opt.seed_query_timeout.unwrap_or(30), + outbound_connect_timeout: opt.outbound_connect_timeout.unwrap_or(15), + channel_handshake_timeout: opt.channel_handshake_timeout.unwrap_or(4), + channel_heartbeat_interval: opt.channel_heartbeat_interval.unwrap_or(10), + localnet: opt.localnet, } } } - -/// Auxiliary function to convert outbound transport `Vec` -/// to `Vec`, using defaults if empty. -pub fn get_outbound_transports(opt_outbound_transports: Vec) -> Vec { - let mut outbound_transports = vec![]; - for transport in opt_outbound_transports { - let transport_name = TransportName::try_from(transport.as_str()).unwrap(); - outbound_transports.push(transport_name); - } - - if outbound_transports.is_empty() { - let tls = TransportName::Tcp(Some("tls".into())); - outbound_transports.push(tls); - } - - outbound_transports -} - -/// Auxiliary function to set serde bool value to true. -fn default_as_true() -> bool { - true -} diff --git a/src/net/transport.rs b/src/net/transport.rs index 2bb0334bf..8127964aa 100644 --- a/src/net/transport.rs +++ b/src/net/transport.rs @@ -16,120 +16,275 @@ * along with this program. If not, see . */ -use std::{net::SocketAddr, time::Duration}; +use std::time::Duration; use async_trait::async_trait; -use futures::prelude::*; -use futures_rustls::{TlsAcceptor, TlsStream}; +use futures::{AsyncRead, AsyncWrite}; use url::Url; -use crate::Result; +use crate::{Error, Result}; -mod upgrade_tls; -pub use upgrade_tls::TlsUpgrade; +/// TLS Upgrade Mechanism +pub(crate) mod tls; -mod tcp; -pub use tcp::TcpTransport; +#[cfg(feature = "p2p-transport-tcp")] +/// TCP Transport +pub(crate) mod tcp; -mod tor; -pub use tor::TorTransport; +#[cfg(feature = "p2p-transport-tor")] +/// Tor transport +pub(crate) mod tor; -mod unix; -pub use unix::UnixTransport; +#[cfg(feature = "p2p-transport-nym")] +/// Nym transport +pub(crate) mod nym; -mod nym; -pub use nym::NymTransport; +/// Dialer variants +#[derive(Debug, Clone)] +pub enum DialerVariant { + #[cfg(feature = "p2p-transport-tcp")] + /// Plain TCP + Tcp(tcp::TcpDialer), -/// A helper function to convert SocketAddr to Url and add scheme -pub(crate) fn socket_addr_to_url(addr: SocketAddr, scheme: &str) -> Result { - let url = Url::parse(&format!("{}://{}", scheme, addr))?; - Ok(url) + #[cfg(feature = "p2p-transport-tcp")] + /// TCP with TLS + TcpTls(tcp::TcpDialer), + + #[cfg(feature = "p2p-transport-tor")] + /// Tor + Tor(tor::TorDialer), + + #[cfg(feature = "p2p-transport-tor")] + /// Tor with TLS + TorTls(tor::TorDialer), + + #[cfg(feature = "p2p-transport-nym")] + /// Nym + Nym(nym::NymDialer), + + #[cfg(feature = "p2p-transport-nym")] + /// Nym with TLS + NymTls(nym::NymDialer), } -/// Used as wrapper for stream used by Transport trait -pub trait TransportStream: AsyncWrite + AsyncRead + Unpin + Send + Sync {} +/// Listener variants +#[derive(Debug, Clone)] +pub enum ListenerVariant { + #[cfg(feature = "p2p-transport-tcp")] + /// Plain TCP + Tcp(tcp::TcpListener), -/// Used as wrapper for listener used by Transport trait -#[async_trait] -pub trait TransportListener: Send + Sync + Unpin { - async fn next(&self) -> Result<(Box, Url)>; + #[cfg(feature = "p2p-transport-tcp")] + /// TCP with TLS + TcpTls(tcp::TcpListener), } -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum TransportName { - Tcp(Option), - Tor(Option), - Nym(Option), - Unix, +/// A dialer that is able to transparently operate over arbitrary transports. +pub struct Dialer { + /// The endpoint to connect to + endpoint: Url, + /// The dialer variant (transport protocol) + variant: DialerVariant, } -impl TransportName { - pub fn to_scheme(&self) -> String { - match self { - Self::Tcp(None) => "tcp".into(), - Self::Tcp(Some(opt)) => format!("tcp+{}", opt), - Self::Tor(None) => "tor".into(), - Self::Tor(Some(opt)) => format!("tor+{}", opt), - Self::Nym(None) => "nym".into(), - Self::Nym(Some(opt)) => format!("nym+{}", opt), - Self::Unix => "unix".into(), +impl Dialer { + /// Instantiate a new [`Dialer`] with the given [`Url`]. + /// Must contain a scheme, host string, and a port. + pub async fn new(endpoint: Url) -> Result { + if endpoint.host_str().is_none() || endpoint.port().is_none() { + return Err(Error::InvalidDialerScheme) + } + + match endpoint.scheme().to_lowercase().as_str() { + #[cfg(feature = "p2p-transport-tcp")] + "tcp" => { + // Build a TCP dialer + let variant = tcp::TcpDialer::new(None).await?; + let variant = DialerVariant::Tcp(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-tcp")] + "tcp+tls" => { + // Build a TCP dialer wrapped with TLS + let variant = tcp::TcpDialer::new(None).await?; + let variant = DialerVariant::TcpTls(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-tor")] + "tor" => { + // Build a Tor dialer + let variant = tor::TorDialer::new().await?; + let variant = DialerVariant::Tor(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-tor")] + "tor+tls" => { + // Build a Tor dialer wrapped with TLS + let variant = tor::TorDialer::new().await?; + let variant = DialerVariant::TorTls(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-nym")] + "nym" => { + // Build a Nym dialer + let variant = nym::NymDialer::new().await?; + let variant = DialerVariant::Nym(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-nym")] + "nym+tls" => { + // Build a Nym dialer wrapped with TLS + let variant = nym::NymDialer::new().await?; + let variant = DialerVariant::NymTls(variant); + Ok(Self { endpoint, variant }) + } + + x => Err(Error::UnsupportedTransport(x.to_string())), } } -} -impl TryFrom<&str> for TransportName { - type Error = crate::Error; + /// Dial an instantiated [`Dialer`]. This creates a connection and returns a stream. + pub async fn dial(&self, timeout: Option) -> Result> { + match &self.variant { + #[cfg(feature = "p2p-transport-tcp")] + DialerVariant::Tcp(dialer) => { + // NOTE: sockaddr here is an array, can contain both ipv4 and ipv6 + let sockaddr = self.endpoint.socket_addrs(|| None)?; + let stream = dialer.do_dial(sockaddr[0], timeout).await?; + Ok(Box::new(stream)) + } - fn try_from(scheme: &str) -> Result { - let transport_name = match scheme { - "tcp" => Self::Tcp(None), - "tcp+tls" | "tls" => Self::Tcp(Some("tls".into())), - "tor" => Self::Tor(None), - "tor+tls" => Self::Tor(Some("tls".into())), - "nym" => Self::Nym(None), - "nym+tls" => Self::Nym(Some("tls".into())), - "unix" => Self::Unix, - n => return Err(crate::Error::UnsupportedTransport(n.into())), - }; - Ok(transport_name) + #[cfg(feature = "p2p-transport-tcp")] + DialerVariant::TcpTls(dialer) => { + let sockaddr = self.endpoint.socket_addrs(|| None)?; + let stream = dialer.do_dial(sockaddr[0], timeout).await?; + let tlsupgrade = tls::TlsUpgrade::new(); + let stream = tlsupgrade.upgrade_dialer_tls(stream).await?; + Ok(Box::new(stream)) + } + + #[cfg(feature = "p2p-transport-tor")] + DialerVariant::Tor(dialer) => { + let host = self.endpoint.host_str().unwrap(); + let port = self.endpoint.port().unwrap(); + let stream = dialer.do_dial(host, port, timeout).await?; + Ok(Box::new(stream)) + } + + #[cfg(feature = "p2p-transport-tor")] + DialerVariant::TorTls(dialer) => { + let host = self.endpoint.host_str().unwrap(); + let port = self.endpoint.port().unwrap(); + let stream = dialer.do_dial(host, port, timeout).await?; + let tlsupgrade = tls::TlsUpgrade::new(); + let stream = tlsupgrade.upgrade_dialer_tls(stream).await?; + Ok(Box::new(stream)) + } + + #[cfg(feature = "p2p-transport-nym")] + DialerVariant::Nym(dialer) => { + todo!(); + } + + #[cfg(feature = "p2p-transport-nym")] + DialerVariant::NymTls(dialer) => { + todo!(); + } + } + } + + /// Return a reference to the `Dialer` endpoint + pub fn endpoint(&self) -> &Url { + &self.endpoint } } -impl TryFrom for TransportName { - type Error = crate::Error; +/// A listener that is able to transparently listen over arbitrary transports. +pub struct Listener { + /// The address to open the listener on + endpoint: Url, + /// The listener variant (transport protocol) + variant: ListenerVariant, +} - fn try_from(url: Url) -> Result { - Self::try_from(url.scheme()) +impl Listener { + /// Instantiate a new [`Listener`] with the given [`Url`]. + /// Must contain a scheme, host string, and a port. + pub async fn new(endpoint: Url) -> Result { + if endpoint.host_str().is_none() || endpoint.port().is_none() { + return Err(Error::InvalidListenerScheme) + } + + match endpoint.scheme().to_lowercase().as_str() { + #[cfg(feature = "p2p-transport-tcp")] + "tcp" => { + // Build a TCP listener + let variant = tcp::TcpListener::new(1024).await?; + let variant = ListenerVariant::Tcp(variant); + Ok(Self { endpoint, variant }) + } + + #[cfg(feature = "p2p-transport-tcp")] + "tcp+tls" => { + // Build a TCP listener wrapped with TLS + let variant = tcp::TcpListener::new(1024).await?; + let variant = ListenerVariant::TcpTls(variant); + Ok(Self { endpoint, variant }) + } + + x => Err(Error::UnsupportedTransport(x.to_string())), + } + } + + /// Listen on an instantiated [`Listener`]. + /// This will open a socket and return the listener. + pub async fn listen(&self) -> Result> { + match &self.variant { + #[cfg(feature = "p2p-transport-tcp")] + ListenerVariant::Tcp(listener) => { + let sockaddr = self.endpoint.socket_addrs(|| None)?; + let l = listener.do_listen(sockaddr[0]).await?; + Ok(Box::new(l)) + } + + #[cfg(feature = "p2p-transport-tcp")] + ListenerVariant::TcpTls(listener) => { + let sockaddr = self.endpoint.socket_addrs(|| None)?; + let l = listener.do_listen(sockaddr[0]).await?; + let tlsupgrade = tls::TlsUpgrade::new(); + let l = tlsupgrade.upgrade_listener_tcp_tls(l).await?; + Ok(Box::new(l)) + } + } + } + + pub fn endpoint(&self) -> &Url { + &self.endpoint } } -/// The `Transport` trait serves as a base for implementing transport protocols. -/// Base transports can optionally be upgraded with TLS in order to support encryption. -/// The implementation of our TLS authentication can be found in the -/// [`upgrade_tls`](TlsUpgrade) module. -pub trait Transport { - type Acceptor; - type Connector; +/// Wrapper trait for async streams +pub trait PtStream: AsyncRead + AsyncWrite + Unpin + Send {} - type Listener: Future>; - type Dial: Future>; +#[cfg(feature = "p2p-transport-tcp")] +impl PtStream for async_std::net::TcpStream {} - type TlsListener: Future>; - type TlsDialer: Future>>; +#[cfg(feature = "p2p-transport-tcp")] +impl PtStream for async_rustls::TlsStream {} - fn listen_on(self, url: Url) -> Result - where - Self: Sized; +#[cfg(feature = "p2p-transport-tor")] +impl PtStream for arti_client::DataStream {} - fn upgrade_listener(self, acceptor: Self::Acceptor) -> Result - where - Self: Sized; +#[cfg(feature = "p2p-transport-tor")] +impl PtStream for async_rustls::TlsStream {} - fn dial(self, url: Url, timeout: Option) -> Result - where - Self: Sized; - - fn upgrade_dialer(self, stream: Self::Connector) -> Result - where - Self: Sized; +/// Wrapper trait for async listeners +#[async_trait] +pub trait PtListener: Send + Sync + Unpin { + async fn next(&self) -> Result<(Box, Url)>; } diff --git a/src/net/transport/nym.rs b/src/net/transport/nym.rs index e2e0a1e34..356a50e63 100644 --- a/src/net/transport/nym.rs +++ b/src/net/transport/nym.rs @@ -16,114 +16,55 @@ * along with this program. If not, see . */ -use std::{io, net::SocketAddr, pin::Pin, time::Duration}; +use std::time::Duration; -use async_std::net::{TcpListener, TcpStream}; -use fast_socks5::client::{Config, Socks5Stream}; -use futures::prelude::*; -use futures_rustls::{TlsAcceptor, TlsStream}; -use socket2::{Domain, Socket, TcpKeepalive, Type}; +use rand::{rngs::OsRng, RngCore}; use url::Url; -use crate::{Error, Result}; +use crate::{util::encoding::base32, Result}; -use super::{TlsUpgrade, Transport}; +/// Unique, randomly-generated per-connection ID that's used to +/// identify which connection a message belongs to. +#[derive(Clone, Eq, PartialEq, Hash)] +struct ConnectionId([u8; 32]); -#[derive(Clone)] -pub struct NymTransport { - socks_url: Url, -} - -impl NymTransport { - pub fn new() -> Result { - let socks_url = Url::parse("socks5://127.0.0.1:1080")?; - Ok(Self { socks_url }) +impl ConnectionId { + fn generate() -> Self { + let mut bytes = [0u8; 32]; + OsRng.fill_bytes(&mut bytes); + Self(bytes) } - pub async fn do_dial(self, url: Url) -> Result> { - let socks_url_str = self.socks_url.socket_addrs(|| None)?[0].to_string(); - let host = url.host().unwrap().to_string(); - let port = url.port().unwrap_or(80); - let config = Config::default(); - let stream = if !self.socks_url.username().is_empty() && self.socks_url.password().is_some() - { - Socks5Stream::connect_with_password( - socks_url_str, - host, - port, - self.socks_url.username().to_string(), - self.socks_url.password().unwrap().to_string(), - config, - ) - .await? - } else { - Socks5Stream::connect(socks_url_str, host, port, config).await? - }; - Ok(stream) - } - - fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { - let domain = if socket_addr.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 }; - let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; - - if socket_addr.is_ipv6() { - socket.set_only_v6(true)?; - } - - // TODO: Perhaps make these configurable - socket.set_nodelay(true)?; - let keepalive = TcpKeepalive::new().with_time(Duration::from_secs(30)); - socket.set_tcp_keepalive(&keepalive)?; - // TODO: Make sure to disallow running multiple instances of a program using this. - socket.set_reuse_port(true)?; - - Ok(socket) - } - - pub async fn do_listen(self, url: Url) -> Result { - let socket_addr = url.socket_addrs(|| None)?[0]; - let socket = self.create_socket(socket_addr)?; - socket.bind(&socket_addr.into())?; - socket.listen(1024)?; - socket.set_nonblocking(true)?; - Ok(TcpListener::from(std::net::TcpListener::from(socket))) + fn from_bytes(bytes: &[u8]) -> Self { + let mut id = [0u8; 32]; + id[..].copy_from_slice(&bytes[0..32]); + ConnectionId(id) } } -impl Transport for NymTransport { - type Acceptor = TcpListener; - type Connector = Socks5Stream; - - type Listener = Pin> + Send>>; - type Dial = Pin> + Send>>; - - type TlsListener = Pin> + Send>>; - type TlsDialer = Pin>> + Send>>; - - fn listen_on(self, url: Url) -> Result { - match url.scheme() { - "nym" | "nym+tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - Ok(Box::pin(self.do_listen(url))) - } - - fn upgrade_listener(self, acceptor: Self::Acceptor) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_listener_tls(acceptor))) - } - - fn dial(self, url: Url, _timeout: Option) -> Result { - match url.scheme() { - "nym" | "nym+tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - - Ok(Box::pin(self.do_dial(url))) - } - - fn upgrade_dialer(self, connector: Self::Connector) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_dialer_tls(connector))) +impl std::fmt::Debug for ConnectionId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", base32::encode(false, &self.0).to_ascii_lowercase()) + } +} + +/// Nym Dialer implementation +#[derive(Debug, Clone)] +pub struct NymDialer; + +impl NymDialer { + /// Instantiate a new [`NymDialer`] object + pub(crate) async fn new() -> Result { + Ok(Self {}) + } + + pub(crate) async fn do_dial( + &self, + endpoint: Url, // Recipient + timeout: Option, + ) -> Result<()> { + let id = ConnectionId::generate(); + + Ok(()) } } diff --git a/src/net/transport/tcp.rs b/src/net/transport/tcp.rs index 9c0161327..ba9101ca3 100644 --- a/src/net/transport/tcp.rs +++ b/src/net/transport/tcp.rs @@ -16,118 +16,32 @@ * along with this program. If not, see . */ -use async_std::net::{TcpListener, TcpStream}; -use std::{io, net::SocketAddr, pin::Pin, time::Duration}; +use std::{io, time::Duration}; +use async_rustls::{TlsAcceptor, TlsStream}; +use async_std::net::{SocketAddr, TcpListener as AsyncStdTcpListener, TcpStream}; use async_trait::async_trait; -use futures::prelude::*; -use futures_rustls::{TlsAcceptor, TlsStream}; -use log::{debug, error}; use socket2::{Domain, Socket, TcpKeepalive, Type}; use url::Url; -use super::{socket_addr_to_url, TlsUpgrade, Transport, TransportListener, TransportStream}; -use crate::{Error, Result}; +use super::{PtListener, PtStream}; +use crate::Result; -impl TransportStream for TcpStream {} -impl TransportStream for TlsStream {} - -#[async_trait] -impl TransportListener for TcpListener { - async fn next(&self) -> Result<(Box, Url)> { - let (stream, peer_addr) = match self.accept().await { - Ok((s, a)) => (s, a), - Err(err) => { - error!(target: "net::tcp", "Error listening for connections: {}", err); - return Err(Error::AcceptConnectionFailed(self.local_addr()?.to_string())) - } - }; - let url = socket_addr_to_url(peer_addr, "tcp")?; - Ok((Box::new(stream), url)) - } -} - -#[async_trait] -impl TransportListener for (TlsAcceptor, TcpListener) { - async fn next(&self) -> Result<(Box, Url)> { - let (stream, peer_addr) = match self.1.accept().await { - Ok((s, a)) => (s, a), - Err(err) => { - error!(target: "net::tcp", "Error listening for connections: {}", err); - return Err(Error::AcceptConnectionFailed(self.1.local_addr()?.to_string())) - } - }; - - let stream = self.0.accept(stream).await; - - let url = socket_addr_to_url(peer_addr, "tcp+tls")?; - - if let Err(err) = stream { - error!(target: "net::tcp", "Error wrapping the connection {} with tls: {}", url, err); - return Err(Error::AcceptTlsConnectionFailed(self.1.local_addr()?.to_string())) - } - - Ok((Box::new(TlsStream::Server(stream?)), url)) - } -} - -#[derive(Copy, Clone)] -pub struct TcpTransport { - /// TTL to set for opened sockets, or `None` for default +/// TCP Dialer implementation +#[derive(Debug, Clone)] +pub struct TcpDialer { + /// TTL to set for opened sockets, or `None` for default. ttl: Option, - /// Size of the listen backlog for listen sockets - backlog: i32, } -impl Transport for TcpTransport { - type Acceptor = TcpListener; - type Connector = TcpStream; - - type Listener = Pin> + Send>>; - type Dial = Pin> + Send>>; - - type TlsListener = Pin> + Send>>; - type TlsDialer = Pin>> + Send>>; - - fn listen_on(self, url: Url) -> Result { - match url.scheme() { - "tcp" | "tcp+tls" | "tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - - let socket_addr = url.socket_addrs(|| None)?[0]; - debug!(target: "net::tcp", "{} transport: listening on {}", url.scheme(), socket_addr); - Ok(Box::pin(self.do_listen(socket_addr))) +impl TcpDialer { + /// Instantiate a new [`TcpDialer`] with optional TTL. + pub(crate) async fn new(ttl: Option) -> Result { + Ok(Self { ttl }) } - fn upgrade_listener(self, acceptor: Self::Acceptor) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_listener_tls(acceptor))) - } - - fn dial(self, url: Url, timeout: Option) -> Result { - match url.scheme() { - "tcp" | "tcp+tls" | "tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - - let socket_addr = url.socket_addrs(|| None)?[0]; - debug!(target: "net::tcp", "{} transport: dialing {}", url.scheme(), socket_addr); - Ok(Box::pin(self.do_dial(socket_addr, timeout))) - } - - fn upgrade_dialer(self, connector: Self::Connector) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_dialer_tls(connector))) - } -} - -impl TcpTransport { - pub fn new(ttl: Option, backlog: i32) -> Self { - Self { ttl, backlog } - } - - fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { + /// Internal helper function to create a TCP socket. + async fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { let domain = if socket_addr.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 }; let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; @@ -139,30 +53,21 @@ impl TcpTransport { socket.set_ttl(ttl)?; } - // TODO: Perhaps make these configurable socket.set_nodelay(true)?; let keepalive = TcpKeepalive::new().with_time(Duration::from_secs(20)); socket.set_tcp_keepalive(&keepalive)?; - // TODO: Make sure to disallow running multiple instances of a program using this. socket.set_reuse_port(true)?; Ok(socket) } - async fn do_listen(self, socket_addr: SocketAddr) -> Result { - let socket = self.create_socket(socket_addr)?; - socket.bind(&socket_addr.into())?; - socket.listen(self.backlog)?; - socket.set_nonblocking(true)?; - Ok(TcpListener::from(std::net::TcpListener::from(socket))) - } - - async fn do_dial( - self, + /// Internal dial function + pub(crate) async fn do_dial( + &self, socket_addr: SocketAddr, timeout: Option, ) -> Result { - let socket = self.create_socket(socket_addr)?; + let socket = self.create_socket(socket_addr).await?; let connection = if timeout.is_some() { socket.connect_timeout(&socket_addr.into(), timeout.unwrap()) @@ -175,10 +80,83 @@ impl TcpTransport { Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {} Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} Err(err) => return Err(err.into()), - }; + } socket.set_nonblocking(true)?; let stream = TcpStream::from(std::net::TcpStream::from(socket)); Ok(stream) } } + +/// TCP Listener implementation +#[derive(Debug, Clone)] +pub struct TcpListener { + /// Size of the listen backlog for listen sockets + backlog: i32, +} + +impl TcpListener { + /// Instantiate a new [`TcpListener`] with given backlog size. + pub async fn new(backlog: i32) -> Result { + Ok(Self { backlog }) + } + + /// Internal helper function to create a TCP socket. + async fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { + let domain = if socket_addr.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 }; + let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; + + if socket_addr.is_ipv6() { + socket.set_only_v6(true)?; + } + + socket.set_nodelay(true)?; + let keepalive = TcpKeepalive::new().with_time(Duration::from_secs(20)); + socket.set_tcp_keepalive(&keepalive)?; + socket.set_reuse_port(true)?; + + Ok(socket) + } + + /// Internal listen function + pub(crate) async fn do_listen(&self, socket_addr: SocketAddr) -> Result { + let socket = self.create_socket(socket_addr).await?; + socket.bind(&socket_addr.into())?; + socket.listen(self.backlog)?; + socket.set_nonblocking(true)?; + Ok(AsyncStdTcpListener::from(std::net::TcpListener::from(socket))) + } +} + +#[async_trait] +impl PtListener for AsyncStdTcpListener { + async fn next(&self) -> Result<(Box, Url)> { + let (stream, peer_addr) = match self.accept().await { + Ok((s, a)) => (s, a), + Err(e) => return Err(e.into()), + }; + + let url = Url::parse(&format!("tcp://{}", peer_addr))?; + Ok((Box::new(stream), url)) + } +} + +#[async_trait] +impl PtListener for (TlsAcceptor, AsyncStdTcpListener) { + async fn next(&self) -> Result<(Box, Url)> { + let (stream, peer_addr) = match self.1.accept().await { + Ok((s, a)) => (s, a), + Err(e) => return Err(e.into()), + }; + + let stream = self.0.accept(stream).await; + + let url = Url::parse(&format!("tcp+tls://{}", peer_addr))?; + + if let Err(e) = stream { + return Err(e.into()) + } + + Ok((Box::new(TlsStream::Server(stream.unwrap())), url)) + } +} diff --git a/src/net/transport/upgrade_tls.rs b/src/net/transport/tls.rs similarity index 92% rename from src/net/transport/upgrade_tls.rs rename to src/net/transport/tls.rs index fdc8deaf1..45210377c 100644 --- a/src/net/transport/upgrade_tls.rs +++ b/src/net/transport/tls.rs @@ -18,9 +18,7 @@ use std::time::SystemTime; -use async_std::{net::TcpListener, sync::Arc}; -use futures::prelude::*; -use futures_rustls::{ +use async_rustls::{ rustls, rustls::{ client::{ServerCertVerified, ServerCertVerifier}, @@ -31,6 +29,7 @@ use futures_rustls::{ }, TlsAcceptor, TlsConnector, TlsStream, }; +use async_std::sync::Arc; use log::error; use rustls_pemfile::pkcs8_private_keys; use x509_parser::{ @@ -201,14 +200,19 @@ impl TlsUpgrade { let secret_key = pkcs8_private_keys(&mut keypair_pem.as_bytes()).unwrap(); let secret_key = rustls::PrivateKey(secret_key[0].clone()); - let altname = base32::encode(false, keypair.pk.as_slice()).to_ascii_lowercase(); - let altnames = vec![altname]; + let altnames = vec![base32::encode(false, keypair.pk.as_slice())]; + let mut cert_params = rcgen::CertificateParams::new(altnames); cert_params.alg = &rcgen::PKCS_ED25519; cert_params.key_pair = Some(rcgen::KeyPair::from_pem(&keypair_pem).unwrap()); + cert_params.extended_key_usages = vec![ + rcgen::ExtendedKeyUsagePurpose::ClientAuth, + rcgen::ExtendedKeyUsagePurpose::ServerAuth, + ]; let certificate = rcgen::Certificate::from_params(cert_params).unwrap(); - let certificate = rustls::Certificate(certificate.serialize_der().unwrap()); + let certificate = certificate.serialize_der().unwrap(); + let certificate = rustls::Certificate(certificate); let client_cert_verifier = Arc::new(ClientCertificateVerifier {}); let server_config = Arc::new( @@ -237,22 +241,24 @@ impl TlsUpgrade { Self { server_config, client_config } } - pub async fn upgrade_listener_tls( - self, - listener: TcpListener, - ) -> Result<(TlsAcceptor, TcpListener)> { - Ok((TlsAcceptor::from(self.server_config), listener)) - } - pub async fn upgrade_dialer_tls(self, stream: IO) -> Result> where - IO: AsyncRead + AsyncWrite + Unpin, + IO: super::PtStream, { let server_name = ServerName::try_from("dark.fi").unwrap(); let connector = TlsConnector::from(self.client_config); let stream = connector.connect(server_name, stream).await?; Ok(TlsStream::Client(stream)) } + + // FIXME: Try to find a transparent way for this instead of implementing separately for all + #[cfg(feature = "p2p-transport-tcp")] + pub async fn upgrade_listener_tcp_tls( + self, + listener: async_std::net::TcpListener, + ) -> Result<(TlsAcceptor, async_std::net::TcpListener)> { + Ok((TlsAcceptor::from(self.server_config), listener)) + } } impl Default for TlsUpgrade { diff --git a/src/net/transport/tor.rs b/src/net/transport/tor.rs index d17779cfc..0c20677e6 100644 --- a/src/net/transport/tor.rs +++ b/src/net/transport/tor.rs @@ -16,307 +16,39 @@ * along with this program. If not, see . */ -use std::{ - io, - io::{BufRead, BufReader, Write}, - net::SocketAddr, - pin::Pin, - time::Duration, -}; +use std::time::Duration; -use async_std::{ - net::{TcpListener, TcpStream}, - sync::Arc, -}; -use fast_socks5::client::{Config, Socks5Stream}; -use futures::prelude::*; -use futures_rustls::{TlsAcceptor, TlsStream}; -use socket2::{Domain, Socket, TcpKeepalive, Type}; -use url::Url; +use arti_client::{BootstrapBehavior, DataStream, TorClient}; +use async_std::future; -use crate::{Error, Result}; +use crate::Result; -use super::{TlsUpgrade, Transport, TransportStream}; +/// Tor Dialer implementation +#[derive(Debug, Clone)] +pub struct TorDialer; -/// Implements communication through the tor proxy service. -/// -/// ## Dialing -/// -/// The tor service must be running for dialing to work. Url of it has to be passed to the -/// constructor. -/// -/// ## Listening -/// -/// Two ways of setting up hidden services are allowed: hidden services manually set up by the user -/// in the torc file or ephemereal hidden services created and deleted on the fly. For the latter, -/// the user must set up the tor control port[^controlport]. -/// -/// Having manually configured services forces the program to use pre-defined ports, i.e. it has no -/// way of changing them. -/// -/// Before calling [listen_on][transportlisten] on a local address, make sure that either a hidden -/// service pointing to that address was configured or that [create_ehs][torcreateehs] was called -/// with this address. -/// -/// [^controlport] [Open control port](https://wiki.archlinux.org/title/tor#Open_Tor_ControlPort) -/// -/// ### Warning on cloning -/// Cloning this structure increments the reference count to the already open -/// socket, which means ephemereal hidden services opened with the cloned instance will live as -/// long as there are clones. For this reason, I'd clone it only when you are sure you want this -/// behaviour. Don't be lazy! -/// -/// [transportlisten]: Transport -/// [torcreateehs]: TorTransport::create_ehs -#[derive(Clone)] -pub struct TorTransport { - socks_url: Url, - tor_controller: Option, -} - -/// Represents information needed to communicate with the Tor control socket -#[derive(Clone)] -struct TorController { - socket: Arc, // Need to hold this socket open as long as the tor trasport is alive, so ephemeral services are dropped when TorTransport is dropped - auth: String, -} - -/// Contains the configuration to communicate with the Tor Controler -/// -/// When cloned, the socket is not reopened since we use reference count. -/// The hidden services created live as long as clones of the struct. -impl TorController { - /// Creates a new TorTransport - /// - /// # Arguments - /// - /// * `url` - url to connect to the tor control. For example tcp://127.0.0.1:9051 - /// - /// * `auth` - either authentication cookie bytes (32 bytes) as hex in a string - /// or a password as a quoted string. - /// - /// Cookie string: `assert_eq!(auth,"886b9177aec471965abd34b6a846dc32cf617dcff0625cba7a414e31dd4b75a0")` - /// - /// Password string: `assert_eq!(auth,"\"mypassword\"")` - pub fn new(url: Url, auth: String) -> Result { - let socket_addr = url.socket_addrs(|| None)?[0]; - let domain = if socket_addr.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 }; - let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; - if socket_addr.is_ipv6() { - socket.set_only_v6(true)?; - } - - match socket.connect(&socket_addr.into()) { - Ok(()) => {} - Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {} - Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} - Err(err) => return Err(err.into()), - }; - Ok(Self { socket: Arc::new(socket), auth }) +impl TorDialer { + /// Instantiate a new [`TorDialer`] object + pub(crate) async fn new() -> Result { + Ok(Self {}) } - /// Creates an ephemeral hidden service pointing to local address, returns onion address - /// - /// # Arguments - /// - /// * `url` - url that the hidden service maps to. - pub fn create_ehs(&self, url: Url) -> Result { - let local_socket = self.socket.try_clone()?; - let mut stream = std::net::TcpStream::from(local_socket); - stream.set_write_timeout(Some(Duration::from_secs(2)))?; - let host = url.host().unwrap(); - let port = url.port().unwrap(); + /// Internal dial function + pub(crate) async fn do_dial( + &self, + host: &str, + port: u16, + timeout: Option, + ) -> Result { + let client = TorClient::builder() + .bootstrap_behavior(BootstrapBehavior::OnDemand) + .create_unbootstrapped()?; - let payload = format!( - "AUTHENTICATE {a}\r\nADD_ONION NEW:ED25519-V3 Flags=DiscardPK Port={p},{h}:{p}\r\n", - a = self.auth, - p = port, - h = host - ); - stream.write_all(payload.as_bytes())?; - // 1s is maybe a bit too much. Gives tor time to reply - stream.set_read_timeout(Some(Duration::from_secs(1)))?; - let mut reader = BufReader::new(stream); - let mut repl = String::new(); - while let Ok(nbytes) = reader.read_line(&mut repl) { - if nbytes == 0 { - break - } + if timeout.is_some() { + let res = future::timeout(timeout.unwrap(), client.connect((host, port))).await?; + return Ok(res?) } - let spl: Vec<&str> = repl.split('\n').collect(); - if spl.len() != 4 { - return Err(Error::TorError(format!("Unsuccessful reply from TorControl: {:?}", spl))) - } - - let onion: Vec<&str> = spl[1].split('=').collect(); - if onion.len() != 2 { - return Err(Error::TorError(format!("Unsuccessful reply from TorControl: {:?}", spl))) - } - - let onion = &onion[1][..onion[1].len() - 1]; - let hurl = format!("tcp://{}.onion:{}", onion, port); - Ok(Url::parse(&hurl)?) - } -} - -impl TorTransport { - /// Creates a new TorTransport - /// - /// # Arguments - /// - /// * `socks_url` - url to connect to the tor service. For example socks5://127.0.0.1:9050 - /// - /// * `control_info` - Possibility to open a control connection to create ephemeral hidden - /// services that live as long as the TorTransport. - /// It is a tuple of the control socket url and authentication cookie as string - /// represented in hex. - pub fn new(socks_url: Url, control_info: Option<(Url, String)>) -> Result { - match control_info { - Some(info) => { - let (url, auth) = info; - let tor_controller = Some(TorController::new(url, auth)?); - Ok(Self { socks_url, tor_controller }) - } - None => Ok(Self { socks_url, tor_controller: None }), - } - } - - /// Query the environment for listener Tor variables, or fallback to defaults - pub fn get_listener_env() -> Result<(Url, Url, String)> { - let socks5_url = Url::parse( - &std::env::var("DARKFI_TOR_SOCKS5_URL") - .unwrap_or_else(|_| "socks5://127.0.0.1:9050".to_string()), - )?; - - let torc_url = Url::parse( - &std::env::var("DARKFI_TOR_CONTROL_URL") - .unwrap_or_else(|_| "tcp://127.0.0.1:9051".to_string()), - )?; - - let auth_cookie = std::env::var("DARKFI_TOR_COOKIE"); - if auth_cookie.is_err() { - return Err(Error::TorError( - "Please set the env var DARKFI_TOR_COOKIE to the configured Tor cookie file.\n\ - For example:\n\ - export DARKFI_TOR_COOKIE='/var/lib/tor/control_auth_cookie'" - .to_string(), - )) - } - - Ok((socks5_url, torc_url, auth_cookie.unwrap())) - } - - /// Query the environment for the dialer Tor variables, or fallback to defaults - pub fn get_dialer_env() -> Result { - Ok(Url::parse( - &std::env::var("DARKFI_TOR_SOCKS5_URL") - .unwrap_or_else(|_| "socks5://127.0.0.1:9050".to_string()), - )?) - } - - /// Creates an ephemeral hidden service pointing to local address, returns onion address - /// when successful. - /// - /// # Arguments - /// - /// * `url` - url that the hidden service maps to. - pub fn create_ehs(&self, url: Url) -> Result { - let tor_controller = self.tor_controller.as_ref(); - - if tor_controller.is_none() { - return Err(Error::TorError("No controller configured for this transport".to_string())) - }; - - tor_controller.unwrap().create_ehs(url) - } - - pub async fn do_dial(self, url: Url) -> Result> { - let socks_url_str = self.socks_url.socket_addrs(|| None)?[0].to_string(); - let host = url.host().unwrap().to_string(); - let port = url.port().unwrap_or(80); - let config = Config::default(); - let stream = if !self.socks_url.username().is_empty() && self.socks_url.password().is_some() - { - Socks5Stream::connect_with_password( - socks_url_str, - host, - port, - self.socks_url.username().to_string(), - self.socks_url.password().unwrap().to_string(), - config, - ) - .await? - } else { - Socks5Stream::connect(socks_url_str, host, port, config).await? - }; - Ok(stream) - } - - fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { - let domain = if socket_addr.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 }; - let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; - - if socket_addr.is_ipv6() { - socket.set_only_v6(true)?; - } - - // TODO: Perhaps make these configurable - socket.set_nodelay(true)?; - let keepalive = TcpKeepalive::new().with_time(Duration::from_secs(30)); - socket.set_tcp_keepalive(&keepalive)?; - // TODO: Make sure to disallow running multiple instances of a program using this. - socket.set_reuse_port(true)?; - - Ok(socket) - } - - pub async fn do_listen(self, url: Url) -> Result { - let socket_addr = url.socket_addrs(|| None)?[0]; - let socket = self.create_socket(socket_addr)?; - socket.bind(&socket_addr.into())?; - socket.listen(1024)?; - socket.set_nonblocking(true)?; - Ok(TcpListener::from(std::net::TcpListener::from(socket))) - } -} - -impl TransportStream for Socks5Stream {} - -impl Transport for TorTransport { - type Acceptor = TcpListener; - type Connector = Socks5Stream; - - type Listener = Pin> + Send>>; - type Dial = Pin> + Send>>; - - type TlsListener = Pin> + Send>>; - type TlsDialer = Pin>> + Send>>; - - fn listen_on(self, url: Url) -> Result { - match url.scheme() { - "tor" | "tor+tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - Ok(Box::pin(self.do_listen(url))) - } - - fn upgrade_listener(self, acceptor: Self::Acceptor) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_listener_tls(acceptor))) - } - - fn dial(self, url: Url, _timeout: Option) -> Result { - match url.scheme() { - "tor" | "tor+tls" => {} - "tcp" | "tcp+tls" | "tls" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - Ok(Box::pin(self.do_dial(url))) - } - - fn upgrade_dialer(self, connector: Self::Connector) -> Result { - let tlsupgrade = TlsUpgrade::new(); - Ok(Box::pin(tlsupgrade.upgrade_dialer_tls(connector))) + Ok(client.connect((host, port)).await?) } } diff --git a/src/net/transport/unix.rs b/src/net/transport/unix.rs deleted file mode 100644 index 789dd84ba..000000000 --- a/src/net/transport/unix.rs +++ /dev/null @@ -1,142 +0,0 @@ -/* This file is part of DarkFi (https://dark.fi) - * - * Copyright (C) 2020-2023 Dyne.org foundation - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -use std::{os::unix::net::SocketAddr, pin::Pin, time::Duration}; - -use async_std::os::unix::net::{UnixListener, UnixStream}; -use async_trait::async_trait; -use futures::prelude::*; -use futures_rustls::{TlsAcceptor, TlsStream}; -use log::{debug, error}; -use url::Url; - -use super::{Transport, TransportListener, TransportStream}; -use crate::{Error, Result}; - -fn unix_socket_addr_to_string(addr: std::os::unix::net::SocketAddr) -> String { - addr.as_pathname() - .unwrap_or(&std::path::PathBuf::from("unix:///")) - .to_str() - .unwrap_or("unix:///") - .into() -} - -impl TransportStream for UnixStream {} - -#[async_trait] -impl TransportListener for UnixListener { - async fn next(&self) -> Result<(Box, Url)> { - let (stream, peer_addr) = match self.accept().await { - Ok((s, a)) => (s, a), - Err(err) => { - error!(target: "net::unix", "Error listening for connections: {}", err); - return Err(Error::AcceptConnectionFailed(unix_socket_addr_to_string( - self.local_addr()?, - ))) - } - }; - let url = Url::parse(&unix_socket_addr_to_string(peer_addr))?; - Ok((Box::new(stream), url)) - } -} - -#[async_trait] -impl TransportListener for (TlsAcceptor, UnixListener) { - async fn next(&self) -> Result<(Box, Url)> { - unimplemented!("TLS not supported for Unix sockets"); - } -} - -#[derive(Copy, Clone)] -pub struct UnixTransport {} - -impl Transport for UnixTransport { - type Acceptor = UnixListener; - type Connector = UnixStream; - - type Listener = Pin> + Send>>; - type Dial = Pin> + Send>>; - - type TlsListener = Pin> + Send>>; - type TlsDialer = Pin>> + Send>>; - - fn listen_on(self, url: Url) -> Result { - match url.scheme() { - "unix" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - - let socket_path = url.path(); - let socket_addr = SocketAddr::from_pathname(socket_path)?; - debug!(target: "net::unix", "{} transport: listening on {}", url.scheme(), socket_path); - Ok(Box::pin(self.do_listen(socket_addr))) - } - - fn upgrade_listener(self, _acceptor: Self::Acceptor) -> Result { - unimplemented!("TLS not supported for Unix sockets"); - } - - fn dial(self, url: Url, timeout: Option) -> Result { - match url.scheme() { - "unix" => {} - x => return Err(Error::UnsupportedTransport(x.to_string())), - } - - let socket_path = url.path(); - let socket_addr = SocketAddr::from_pathname(socket_path)?; - debug!(target: "net::unix", "{} transport: dialing {}", url.scheme(), socket_path); - Ok(Box::pin(self.do_dial(socket_addr, timeout))) - } - - fn upgrade_dialer(self, _connector: Self::Connector) -> Result { - unimplemented!("TLS not supported for Unix sockets"); - } -} - -impl Default for UnixTransport { - fn default() -> Self { - Self::new() - } -} - -impl UnixTransport { - pub fn new() -> Self { - Self {} - } - - async fn do_listen(self, socket_addr: SocketAddr) -> Result { - // We're a bit rough here and delete the socket. - let socket_path = socket_addr.as_pathname().unwrap(); - if std::fs::metadata(socket_path).is_ok() { - std::fs::remove_file(socket_path)?; - } - - let socket = UnixListener::bind(socket_path).await?; - Ok(socket) - } - - async fn do_dial( - self, - socket_addr: SocketAddr, - _timeout: Option, - ) -> Result { - let socket_path = socket_addr.as_pathname().unwrap(); - let stream = UnixStream::connect(&socket_path).await?; - Ok(stream) - } -} diff --git a/src/rpc/client.rs b/src/rpc/client.rs index 2617cbf6a..8156b4d49 100644 --- a/src/rpc/client.rs +++ b/src/rpc/client.rs @@ -27,9 +27,7 @@ use url::Url; use super::jsonrpc::{ErrorCode, JsonError, JsonRequest, JsonResult}; use crate::{ - net::transport::{ - TcpTransport, TorTransport, Transport, TransportName, TransportStream, UnixTransport, - }, + net::transport::{Dialer, PtStream}, system::SubscriberPtr, Error, Result, }; @@ -171,63 +169,17 @@ impl RpcClient { let (result_send, result_recv) = smol::channel::unbounded(); let (stop_send, stop_recv) = smol::channel::unbounded(); - let transport_name = TransportName::try_from(uri.clone())?; - - macro_rules! reqrep { - ($stream:expr, $transport:expr, $upgrade:expr) => {{ - if let Err(err) = $stream { - error!(target: "rpc::client", "JSON-RPC client setup for {} failed: {}", uri, err); - return Err(Error::ConnectFailed) - } - - let stream = $stream?.await; - if let Err(err) = stream { - error!(target: "rpc::client", "JSON-RPC client connection to {} failed: {}", uri, err); - return Err(Error::ConnectFailed) - } - - let stream = stream?; - match $upgrade { - None => { - smol::spawn(Self::reqrep_loop(stream, result_send, data_recv, stop_recv)) - .detach(); - } - Some(u) if u == "tls" => { - let stream = $transport.upgrade_dialer(stream)?.await?; - smol::spawn(Self::reqrep_loop(stream, result_send, data_recv, stop_recv)) - .detach(); - } - Some(u) => return Err(Error::UnsupportedTransportUpgrade(u)), - } - }}; - } - - match transport_name { - TransportName::Tcp(upgrade) => { - let transport = TcpTransport::new(None, 1024); - let stream = transport.dial(uri.clone(), None); - reqrep!(stream, transport, upgrade); - } - TransportName::Tor(upgrade) => { - let socks5_url = TorTransport::get_dialer_env()?; - let transport = TorTransport::new(socks5_url, None)?; - let stream = transport.clone().dial(uri.clone(), None); - reqrep!(stream, transport, upgrade); - } - TransportName::Unix => { - let transport = UnixTransport::new(); - let stream = transport.dial(uri.clone(), None); - reqrep!(stream, transport, None); - } - _ => unimplemented!(), - } + let dialer = Dialer::new(uri.clone()).await?; + // TODO: Revisit the timeout here + let stream = dialer.dial(None).await?; + smol::spawn(Self::reqrep_loop(stream, result_send, data_recv, stop_recv)).detach(); Ok((data_send, result_recv, stop_send)) } /// Internal function that loops on a given stream and multiplexes the data. - async fn reqrep_loop( - mut stream: T, + async fn reqrep_loop( + mut stream: Box, result_send: smol::channel::Sender, data_recv: smol::channel::Receiver<(Value, bool)>, stop_recv: smol::channel::Receiver<()>, diff --git a/src/rpc/server.rs b/src/rpc/server.rs index 1cd4723dd..03f925320 100644 --- a/src/rpc/server.rs +++ b/src/rpc/server.rs @@ -25,11 +25,8 @@ use url::Url; use super::jsonrpc::{JsonRequest, JsonResult}; use crate::{ - net::transport::{ - TcpTransport, TorTransport, Transport, TransportListener, TransportName, TransportStream, - UnixTransport, - }, - Error, Result, + net::transport::{Listener, PtListener, PtStream}, + Result, }; /// Asynchronous trait implementing a handler for incoming JSON-RPC requests. @@ -43,7 +40,7 @@ pub trait RequestHandler: Sync + Send { /// Internal accept function that runs inside a loop for accepting incoming /// JSON-RPC requests and passing them to the [`RequestHandler`]. async fn accept( - mut stream: Box, + mut stream: Box, peer_addr: Url, rh: Arc, ) -> Result<()> { @@ -115,7 +112,7 @@ async fn accept( /// Wrapper function around [`accept()`] to take the incoming connection and /// pass it forward. async fn run_accept_loop( - listener: Box, + listener: Box, rh: Arc, ex: Arc>, ) -> Result<()> { @@ -142,63 +139,8 @@ pub async fn listen_and_serve( ) -> Result<()> { debug!(target: "rpc::server", "Trying to bind listener on {}", accept_url); - macro_rules! accept { - ($listener:expr, $transport:expr, $upgrade:expr) => {{ - if let Err(err) = $listener { - error!(target: "rpc::server", "JSON-RPC server setup for {} failed: {}", accept_url, err); - return Err(Error::BindFailed(accept_url.as_str().into())) - } - - let listener = $listener?.await; - if let Err(err) = listener { - error!(target: "rpc::server", "JSON-RPC listener bind to {} failed: {}", accept_url, err); - return Err(Error::BindFailed(accept_url.as_str().into())) - } - - let listener = listener?; - match $upgrade { - None => { - info!(target: "rpc::server", "JSON-RPC listener bound to {}", accept_url); - run_accept_loop(Box::new(listener), rh, ex.clone()).await?; - } - Some(u) if u == "tls" => { - let tls_listener = $transport.upgrade_listener(listener)?.await?; - info!(target: "rpc::server", "JSON-RPC listener bound to {}", accept_url); - run_accept_loop(Box::new(tls_listener), rh, ex.clone()).await?; - } - Some(u) => return Err(Error::UnsupportedTransportUpgrade(u)), - } - }}; - } - - let transport_name = TransportName::try_from(accept_url.clone())?; - match transport_name { - TransportName::Tcp(upgrade) => { - let transport = TcpTransport::new(None, 1024); - let listener = transport.listen_on(accept_url.clone()); - accept!(listener, transport, upgrade); - } - - TransportName::Tor(upgrade) => { - let (socks5_url, torc_url, auth_cookie) = TorTransport::get_listener_env()?; - let auth_cookie = hex::encode(std::fs::read(auth_cookie).unwrap()); - let transport = TorTransport::new(socks5_url, Some((torc_url, auth_cookie)))?; - - // Generate EHS pointing to local address - let hurl = transport.create_ehs(accept_url.clone())?; - info!(target: "rpc::server", "Created ephemeral hidden service: {}", hurl.to_string()); - - let listener = transport.clone().listen_on(accept_url.clone()); - accept!(listener, transport, upgrade); - } - - TransportName::Unix => { - let transport = UnixTransport::new(); - let listener = transport.listen_on(accept_url.clone()); - accept!(listener, transport, None); - } - _ => unimplemented!(), - } + let listener = Listener::new(accept_url).await?.listen().await?; + run_accept_loop(listener, rh, ex.clone()).await?; Ok(()) } diff --git a/src/system/stoppable_task.rs b/src/system/stoppable_task.rs index fd1825ba9..c08476f64 100644 --- a/src/system/stoppable_task.rs +++ b/src/system/stoppable_task.rs @@ -23,6 +23,7 @@ use smol::Executor; pub type StoppableTaskPtr = Arc; +#[derive(Debug)] pub struct StoppableTask { stop_send: smol::channel::Sender<()>, stop_recv: smol::channel::Receiver<()>, diff --git a/src/system/subscriber.rs b/src/system/subscriber.rs index 1dac2a12c..824502d0d 100644 --- a/src/system/subscriber.rs +++ b/src/system/subscriber.rs @@ -26,6 +26,7 @@ pub type SubscriberPtr = Arc>; pub type SubscriptionId = u64; +#[derive(Debug)] pub struct Subscription { id: SubscriptionId, recv_queue: smol::channel::Receiver, @@ -54,7 +55,8 @@ impl Subscription { } } -// Simple broadcast (publish-subscribe) class +/// Simple broadcast (publish-subscribe) class +#[derive(Debug)] pub struct Subscriber { subs: Mutex>>, } diff --git a/tests/network_transports.rs b/tests/network_transports.rs index f045a51df..3634e1871 100644 --- a/tests/network_transports.rs +++ b/tests/network_transports.rs @@ -16,66 +16,31 @@ * along with this program. If not, see . */ -use std::{env::var, fs}; - use async_std::{ io, io::{ReadExt, WriteExt}, - stream::StreamExt, task, }; use url::Url; -use darkfi::net::transport::{NymTransport, TcpTransport, TorTransport, Transport, UnixTransport}; - -#[async_std::test] -async fn unix_transport() { - let unix = UnixTransport::new(); - let url = Url::parse("unix:///tmp/darkfi_test.sock").unwrap(); - - let listener = unix.listen_on(url.clone()).unwrap().await.unwrap(); - - task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let (reader, writer) = &mut (&stream, &stream); - io::copy(reader, writer).await.unwrap(); - } - }); - - let payload = b"ohai unix"; - - let mut client = unix.dial(url, None).unwrap().await.unwrap(); - client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 9]; - client.read_exact(&mut buf).await.unwrap(); - - std::fs::remove_file("/tmp/darkfi_test.sock").unwrap(); - assert_eq!(buf, payload); -} +use darkfi::net::transport::{Dialer, Listener}; #[async_std::test] async fn tcp_transport() { - let tcp = TcpTransport::new(None, 1024); let url = Url::parse("tcp://127.0.0.1:5432").unwrap(); - - let listener = tcp.listen_on(url.clone()).unwrap().await.unwrap(); - + let listener = Listener::new(url.clone()).await.unwrap().listen().await.unwrap(); task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let (reader, writer) = &mut (&stream, &stream); - io::copy(reader, writer).await.unwrap(); - } + let (stream, _) = listener.next().await.unwrap(); + let (mut reader, mut writer) = smol::io::split(stream); + io::copy(&mut reader, &mut writer).await.unwrap(); }); let payload = b"ohai tcp"; - let mut client = tcp.dial(url, None).unwrap().await.unwrap(); + let dialer = Dialer::new(url).await.unwrap(); + let mut client = dialer.dial(None).await.unwrap(); client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 8]; + let mut buf = vec![0u8; 8]; client.read_exact(&mut buf).await.unwrap(); assert_eq!(buf, payload); @@ -83,193 +48,20 @@ async fn tcp_transport() { #[async_std::test] async fn tcp_tls_transport() { - let tcp = TcpTransport::new(None, 1024); let url = Url::parse("tcp+tls://127.0.0.1:5433").unwrap(); - - let listener = tcp.listen_on(url.clone()).unwrap().await.unwrap(); - let (acceptor, listener) = tcp.upgrade_listener(listener).unwrap().await.unwrap(); - + let listener = Listener::new(url.clone()).await.unwrap().listen().await.unwrap(); task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let stream = acceptor.accept(stream).await.unwrap(); - let (mut reader, mut writer) = smol::io::split(stream); - match io::copy(&mut reader, &mut writer).await { - Ok(_) => {} - Err(e) => { - if e.kind() != std::io::ErrorKind::UnexpectedEof { - panic!("{}", e); - } - } - } - } + let (stream, _) = listener.next().await.unwrap(); + let (mut reader, mut writer) = smol::io::split(stream); + io::copy(&mut reader, &mut writer).await.unwrap(); }); let payload = b"ohai tls"; - let client = tcp.dial(url, None).unwrap().await.unwrap(); - let mut client = tcp.upgrade_dialer(client).unwrap().await.unwrap(); + let dialer = Dialer::new(url).await.unwrap(); + let mut client = dialer.dial(None).await.unwrap(); client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 8]; - client.read_exact(&mut buf).await.unwrap(); - - assert_eq!(buf, payload); -} - -#[async_std::test] -#[ignore] -async fn tor_transport_no_control() { - let url = Url::parse("socks5://127.0.0.1:9050").unwrap(); - let hurl = var("DARKFI_TOR_LOCAL_ADDRESS") -.expect("Please set the env var DARKFI_TOR_LOCAL_ADDRESS to the configured local address in hidden service. \ -For example: \'export DARKFI_TOR_LOCAL_ADDRESS=\"tcp://127.0.0.1:8080\"\'"); - let hurl = Url::parse(&hurl).unwrap(); - - let onion = var("DARKFI_TOR_PUBLIC_ADDRESS").expect( - "Please set the env var DARKFI_TOR_PUBLIC_ADDRESS to the configured onion address. \ -For example: \'export DARKFI_TOR_PUBLIC_ADDRESS=\"tor://abcdefghij234567.onion\"\'", - ); - - let tor = TorTransport::new(url, None).unwrap(); - let listener = tor.clone().listen_on(hurl).unwrap().await.unwrap(); - - task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let (reader, writer) = &mut (&stream, &stream); - io::copy(reader, writer).await.unwrap(); - } - }); - - let payload = b"ohai tor"; - let url = Url::parse(&onion).unwrap(); - let mut client = tor.dial(url, None).unwrap().await.unwrap(); - client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 8]; - client.read_exact(&mut buf).await.unwrap(); - assert_eq!(buf, payload); -} - -#[async_std::test] -#[ignore] -async fn tor_transport_with_control() { - let auth_cookie = var("DARKFI_TOR_COOKIE").expect( - "Please set the env var DARKFI_TOR_COOKIE to the configured tor cookie file. \ -For example: \'export DARKFI_TOR_COOKIE=\"/var/lib/tor/control_auth_cookie\"\'", - ); - let auth_cookie = hex::encode(fs::read(auth_cookie).unwrap()); - let socks_url = Url::parse("socks5://127.0.0.1:9050").unwrap(); - let torc_url = Url::parse("tcp://127.0.0.1:9051").unwrap(); - let local_url = Url::parse("tcp://127.0.0.1:8787").unwrap(); - - let tor = TorTransport::new(socks_url, Some((torc_url, auth_cookie))).unwrap(); - // generate EHS pointing to local address - let hurl = tor.create_ehs(local_url.clone()).unwrap(); - - let listener = tor.clone().listen_on(local_url).unwrap().await.unwrap(); - - task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let (reader, writer) = &mut (&stream, &stream); - io::copy(reader, writer).await.unwrap(); - } - }); - - let payload = b"ohai tor"; - - let mut client = tor.dial(hurl, None).unwrap().await.unwrap(); - client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 8]; - client.read_exact(&mut buf).await.unwrap(); - assert_eq!(buf, payload); -} - -#[async_std::test] -#[should_panic(expected = "Socks5Error(ReplyError(HostUnreachable))")] -#[ignore] -async fn tor_transport_with_control_dropped() { - let auth_cookie = var("DARKFI_TOR_COOKIE").expect( - "Please set the env var DARKFI_TOR_COOKIE to the configured tor cookie file. \ -For example: \'export DARKFI_TOR_COOKIE=\"/var/lib/tor/control_auth_cookie\"\'", - ); - let auth_cookie = hex::encode(fs::read(auth_cookie).unwrap()); - let socks_url = Url::parse("socks5://127.0.0.1:9050").unwrap(); - let torc_url = Url::parse("tcp://127.0.0.1:9051").unwrap(); - let local_url = Url::parse("tcp://127.0.0.1:8787").unwrap(); - let hurl; - // We create a new scope for the Transport, to see if when we drop it, the host is still reachable - { - let tor = TorTransport::new(socks_url.clone(), Some((torc_url, auth_cookie))).unwrap(); - // generate EHS pointing to local address - hurl = tor.create_ehs(local_url.clone()).unwrap(); - // Nothing is listening, but the host is reachable. - // In this case, dialing should fail with Socks5Error(ReplyError(GeneralFailure)); - // And not with Socks5Error(ReplyError(HostUnreachable)) - } - - let tor_client = TorTransport::new(socks_url, None).unwrap(); - // Try to reach the host - let _client = tor_client.dial(hurl, None).unwrap().await.unwrap(); -} - -#[async_std::test] -#[ignore] -async fn nym_transport() { - let target_url = Url::parse("nym://127.0.0.1:25553").unwrap(); - - let nym = NymTransport::new().unwrap(); - - let listener = nym.clone().listen_on(target_url.clone()).unwrap().await.unwrap(); - - task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let (reader, writer) = &mut (&stream, &stream); - io::copy(reader, writer).await.unwrap(); - } - }); - - let payload = b"ohai nym"; - - let mut client = nym.dial(target_url, None).unwrap().await.unwrap(); - client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 8]; - client.read_exact(&mut buf).await.unwrap(); - - assert_eq!(buf, payload); -} - -#[async_std::test] -#[ignore] -async fn nym_tls_transport() { - let target_url = Url::parse("nym+tls://127.0.0.1:25553").unwrap(); - - let nym = NymTransport::new().unwrap(); - - let listener = nym.clone().listen_on(target_url.clone()).unwrap().await.unwrap(); - let (acceptor, listener) = nym.clone().upgrade_listener(listener).unwrap().await.unwrap(); - - task::spawn(async move { - let mut incoming = listener.incoming(); - while let Some(stream) = incoming.next().await { - let stream = stream.unwrap(); - let stream = acceptor.accept(stream).await.unwrap(); - let (mut reader, mut writer) = smol::io::split(stream); - io::copy(&mut reader, &mut writer).await.unwrap(); - } - }); - - let payload = b"ohai nymtls"; - - let client = nym.clone().dial(target_url, None).unwrap().await.unwrap(); - let mut client = nym.upgrade_dialer(client).unwrap().await.unwrap(); - client.write_all(payload).await.unwrap(); - let mut buf = vec![0_u8; 11]; + let mut buf = vec![0u8; 8]; client.read_exact(&mut buf).await.unwrap(); assert_eq!(buf, payload);