diff --git a/Cargo.lock b/Cargo.lock
index ff9b9c3f6..ad4ec8ac5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1931,7 +1931,6 @@ dependencies = [
"darkfi",
"darkfi-sdk",
"darkfi-serial",
- "darkfi_consensus_contract",
"darkfi_dao_contract",
"darkfi_deployooor_contract",
"darkfi_money_contract",
@@ -2032,28 +2031,6 @@ dependencies = [
"url",
]
-[[package]]
-name = "darkfi_consensus_contract"
-version = "0.4.1"
-dependencies = [
- "blake3 1.5.0",
- "bs58",
- "chacha20poly1305",
- "darkfi",
- "darkfi-contract-test-harness",
- "darkfi-sdk",
- "darkfi-serial",
- "darkfi_money_contract",
- "getrandom 0.2.12",
- "halo2_proofs",
- "log",
- "rand 0.8.5",
- "simplelog",
- "sled",
- "smol",
- "thiserror",
-]
-
[[package]]
name = "darkfi_dao_contract"
version = "0.4.1"
@@ -2114,7 +2091,7 @@ dependencies = [
]
[[package]]
-name = "darkfid2"
+name = "darkfid"
version = "0.4.1"
dependencies = [
"async-trait",
@@ -2124,7 +2101,6 @@ dependencies = [
"darkfi-contract-test-harness",
"darkfi-sdk",
"darkfi-serial",
- "darkfi_consensus_contract",
"darkfi_money_contract",
"easy-parallel",
"log",
diff --git a/Cargo.toml b/Cargo.toml
index 194099dd4..6a644f3dc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -20,8 +20,7 @@ doctest = false
[workspace]
members = [
"bin/zkas",
- #"bin/darkfid",
- "bin/darkfid2",
+ "bin/darkfid",
"bin/darkfi-mmproxy",
"bin/drk",
#"bin/faucetd",
@@ -46,7 +45,6 @@ members = [
"src/contract/test-harness",
"src/contract/money",
"src/contract/dao",
- "src/contract/consensus",
"src/contract/deployooor",
"example/dchat/dchatd",
diff --git a/Makefile b/Makefile
index 1843864e0..49d4a5a59 100644
--- a/Makefile
+++ b/Makefile
@@ -18,7 +18,7 @@ PROOFS_BIN = $(PROOFS_SRC:=.bin)
# List of all binaries built
BINS = \
zkas \
- darkfid2 \
+ darkfid \
darkfi-mmproxy \
darkirc \
genev \
@@ -42,11 +42,10 @@ $(PROOFS_BIN): zkas $(PROOFS_SRC)
contracts: zkas
$(MAKE) -C src/contract/money
- $(MAKE) -C src/contract/consensus
$(MAKE) -C src/contract/dao
$(MAKE) -C src/contract/deployooor
-darkfid2: contracts
+darkfid: contracts
$(MAKE) -C bin/$@ \
PREFIX="$(PREFIX)" \
CARGO="$(CARGO)" \
@@ -147,11 +146,10 @@ coverage: contracts $(PROOFS_BIN)
clean:
$(MAKE) -C src/contract/money clean
- $(MAKE) -C src/contract/consensus clean
$(MAKE) -C src/contract/dao clean
$(MAKE) -C src/contract/deployooor clean
$(MAKE) -C bin/zkas clean
- $(MAKE) -C bin/darkfid2 clean
+ $(MAKE) -C bin/darkfid clean
$(MAKE) -C bin/darkfi-mmproxy clean
$(MAKE) -C bin/darkirc clean
$(MAKE) -C bin/genev/genev-cli clean
diff --git a/bin/darkfid/Cargo.toml b/bin/darkfid/Cargo.toml
index 1792e06a6..983770170 100644
--- a/bin/darkfid/Cargo.toml
+++ b/bin/darkfid/Cargo.toml
@@ -9,14 +9,24 @@ license = "AGPL-3.0-only"
edition = "2021"
[dependencies]
-async-trait = "0.1.77"
+# Darkfi
+darkfi = {path = "../../", features = ["async-daemonize", "bs58"]}
+darkfi_money_contract = {path = "../../src/contract/money"}
+darkfi-contract-test-harness = {path = "../../src/contract/test-harness"}
+darkfi-sdk = {path = "../../src/sdk"}
+darkfi-serial = {path = "../../src/serial"}
+
+# Misc
blake3 = "1.5.0"
bs58 = "0.5.0"
-darkfi = {path = "../../", features = ["async-daemonize", "validator"]}
-darkfi-sdk = {path = "../../src/sdk", features = ["async"]}
-darkfi-serial = {path = "../../src/serial"}
log = "0.4.20"
+num-bigint = "0.4.4"
+rand = "0.8.5"
sled = "0.34.7"
+toml = "0.8.8"
+
+# JSON-RPC
+async-trait = "0.1.77"
tinyjson = "2.5.1"
url = "2.5.0"
diff --git a/bin/darkfid/Makefile b/bin/darkfid/Makefile
index dbe2bb0a9..c5a759666 100644
--- a/bin/darkfid/Makefile
+++ b/bin/darkfid/Makefile
@@ -6,29 +6,37 @@ PREFIX = $(HOME)/.cargo
# Cargo binary
CARGO = cargo +nightly
+# Compile target
+RUST_TARGET = $(shell rustc -Vv | grep '^host: ' | cut -d' ' -f2)
+# Uncomment when doing musl static builds
+#RUSTFLAGS = -C target-feature=+crt-static -C link-self-contained=yes
+
SRC = \
Cargo.toml \
../../Cargo.toml \
- $(shell find src -type f) \
- $(shell find ../../src -type f) \
+ $(shell find src -type f -name '*.rs') \
+ $(shell find ../../src -type f -name '*.rs') \
+ $(shell find ../../src/contract -type f -name '*.wasm')
-BIN = ../../darkfid
+BIN = $(shell grep '^name = ' Cargo.toml | cut -d' ' -f3 | tr -d '"')
all: $(BIN)
$(BIN): $(SRC)
- $(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) --release --package darkfid
- cp -f ../../target/$(RUST_TARGET)/release/darkfid $@
+ RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --target=$(RUST_TARGET) --release --package $@
+ cp -f ../../target/$(RUST_TARGET)/release/$@ $@
+ cp -f ../../target/$(RUST_TARGET)/release/$@ ../../$@
clean:
- rm -f $(BIN)
+ RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(RUST_TARGET) --release --package $(BIN)
+ rm -f $(BIN) ../../$(BIN)
install: all
mkdir -p $(DESTDIR)$(PREFIX)/bin
cp -f $(BIN) $(DESTDIR)$(PREFIX)/bin
- chmod 755 $(DESTDIR)$(PREFIX)/bin/darkfid
+ chmod 755 $(DESTDIR)$(PREFIX)/bin/$(BIN)
uninstall:
- rm -f $(DESTDIR)$(PREFIX)/bin/darkfid
+ rm -f $(DESTDIR)$(PREFIX)/bin/$(BIN)
.PHONY: all clean install uninstall
diff --git a/bin/darkfid/darkfid_config.toml b/bin/darkfid/darkfid_config.toml
index 61aeeb883..3d2ffd473 100644
--- a/bin/darkfid/darkfid_config.toml
+++ b/bin/darkfid/darkfid_config.toml
@@ -6,80 +6,470 @@
## The default values are left commented. They can be overridden either by
## uncommenting, or by using the command-line.
-# Chain to use (testnet, mainnet)
-chain = "testnet"
-
-# Path to the wallet database
-wallet_path = "~/.config/darkfi/darkfid_wallet_testnet.db"
-
-# Password for the wallet database
-#wallet_pass = "changeme"
-
-# Path to the blockchain database directory
-database = "~/.config/darkfi/darkfid_blockchain_testnet"
-
# JSON-RPC listen URL
rpc_listen = "tcp://127.0.0.1:8340"
+# Blockchain network to use
+network = "testnet"
+
+# Localnet blockchain network configuration
+[network_config."localnet"]
+# Path to the blockchain database directory
+database = "~/.local/darkfi/darkfid_blockchain_localnet"
+
+# Finalization threshold, denominated by number of blocks
+threshold = 3
+
+# minerd JSON-RPC endpoint
+minerd_endpoint = "tcp://127.0.0.1:28467"
+
+# PoW block production target, in seconds
+pow_target = 10
+
+# Optional fixed PoW difficulty, used for testing
+pow_fixed_difficulty = 1
+
+# Epoch duration, denominated by number of blocks/slots
+epoch_length = 10
+
+# PoS slot duration, in seconds
+slot_time = 10
+
+# Whitelisted faucet addresses
+faucet_pub = []
+
# Participate in the consensus protocol
-consensus = false
+consensus = true
-# Enable single-node mode for local testing
-single_node = false
+# Wallet address to receive consensus rewards.
+# This is a dummy one so the miner can start,
+# replace with your own one.
+recipient = "5ZHfYpt4mpJcwBNxfEyxLzeFJUEeoePs5NQ5jVEgHrMf"
-# P2P accept addresses for the consensus protocol
-#consensus_p2p_accept = ["tls://127.0.0.1:8341"]
+# Skip syncing process and start node right away
+skip_sync = true
-# P2P external addresses for the consensus protocol
-#consensus_p2p_external = ["tls://127.0.0.1:8341"]
+# Enable PoS testing mode for local testing
+pos_testing_mode = true
-# Connection slots for the consensus protocol
-#consensus_slots = 8
+## Localnet sync P2P network settings
+[network_config."localnet".sync_net]
+# P2P accept addresses the instance listens on for inbound connections
+inbound = ["tcp+tls://0.0.0.0:8242"]
-# Seed nodes to connect to for the consensus protocol
-#consensus_p2p_seed = []
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+#external_addrs = []
-# Seed nodes JSON-RPC listen URL for clock synchronization
-#consensus_seed_rpc = []
+# Peer nodes to manually connect to
+#peers = []
-# Peers to connect to for the consensus protocol
-#consensus_p2p_peer = []
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+#seeds = []
-# Peers JSON-RPC listen URL for clock synchronization
-#consensus_peer_rpc = []
+# Whitelisted network transports for outbound connections
+#allowed_transports = ["tcp+tls"]
-# Prefered transports of outbound connections for the consensus protocol
-#consensus_p2p_transports = ["tls", "tcp"]
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
-# P2P accept addresses for the syncing protocol
-sync_p2p_accept = ["tls://0.0.0.0:8342"]
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+#outbound_connections = 8
-# P2P external addresses for the syncing protocol
-#sync_p2p_external = ["tls://127.0.0.1:8342"]
+# Inbound connections slots number, this many active inbound connections
+# will be allowed. (This does not include manual or outbound connections)
+#inbound_connections = 0
-# Connection slots for the syncing protocol
-sync_slots = 8
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
-# Seed nodes to connect to for the syncing protocol
-sync_p2p_seed = ["tls://lilith0.dark.fi:8342", "tls://lilith1.dark.fi:8342"]
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
-# Peers to connect to for the syncing protocol
-#sync_p2p_peer = []
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
-# Prefered transports of outbound connections for the syncing protocol
-sync_p2p_transports = ["tls"]
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
-# Enable localnet hosts
-localnet = false
+# Allow localnet hosts
+localnet = true
-# Enable channel log
-#channel_log = false
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
-# Whitelisted cashier addresses
-#cashier_pub = []
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
+
+## Localnet consensus P2P network settings
+[network_config."localnet".consensus_net]
+# P2P accept addresses the instance listens on for inbound connections
+#inbound = ["tcp+tls://0.0.0.0:8241"]
+
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+#external_addrs = []
+
+# Peer nodes to manually connect to
+#peers = []
+
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+#seeds = []
+
+# Whitelisted network transports for outbound connections
+#allowed_transports = ["tcp+tls"]
+
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
+
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+#outbound_connections = 8
+
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
+
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
+
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
+
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
+
+# Allow localnet hosts
+localnet = true
+
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
+
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
+
+# Testnet blockchain network configuration
+[network_config."testnet"]
+# Path to the blockchain database directory
+database = "~/.local/darkfi/darkfid_blockchain_testnet"
+
+# Finalization threshold, denominated by number of blocks
+threshold = 6
+
+# minerd JSON-RPC endpoint
+minerd_endpoint = "tcp://127.0.0.1:28467"
+
+# PoW block production target, in seconds
+pow_target = 90
+
+# Epoch duration, denominated by number of blocks/slots
+epoch_length = 10
+
+# PoS slot duration, in seconds
+slot_time = 90
# Whitelisted faucet addresses
faucet_pub = ["3ce5xa3PjuQGFtTaF7AvMJp7fGxqeGRJx7zj3LCwNCkP"]
-# Verify system clock is correct
-#clock_sync = true
+# Participate in the consensus protocol
+consensus = false
+
+# Wallet address to receive consensus rewards
+#recipient = "YOUR_WALLET_ADDRESS_HERE"
+
+# Skip syncing process and start node right away
+skip_sync = false
+
+# Enable PoS testing mode for local testing
+pos_testing_mode = false
+
+## Testnet sync P2P network settings
+[network_config."testnet".sync_net]
+# P2P accept addresses the instance listens on for inbound connections
+# You can also use an IPv6 address
+inbound = ["tcp+tls://0.0.0.0:8342"]
+# IPv6 version:
+#inbound = ["tcp+tls://[::]:8342"]
+# Combined:
+#inbound = ["tcp+tls://0.0.0.0:8342", "tcp+tls://[::]:8342"]
+
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+# You can also use an IPv6 address
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342"]
+# IPv6 version:
+#external_addrs = ["tcp+tls://[ipv6 address here]:8342"]
+# Combined:
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342", "tcp+tls://[ipv6 address here]:8342"]
+
+# Peer nodes to manually connect to
+#peers = []
+
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+seeds = ["tcp+tls://lilith0.dark.fi:8342", "tcp+tls://lilith1.dark.fi:8342"]
+
+# Whitelisted network transports for outbound connections
+allowed_transports = ["tcp+tls"]
+
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
+
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+outbound_connections = 8
+
+# Inbound connections slots number, this many active inbound connections
+# will be allowed. (This does not include manual or outbound connections)
+#inbound_connections = 0
+
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
+
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
+
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
+
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
+
+# Allow localnet hosts
+localnet = false
+
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
+
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
+
+## Testnet consensus P2P network settings
+[network_config."testnet".consensus_net]
+# P2P accept addresses the instance listens on for inbound connections
+# You can also use an IPv6 address
+inbound = ["tcp+tls://0.0.0.0:8341"]
+# IPv6 version:
+#inbound = ["tcp+tls://[::]:8341"]
+# Combined:
+#inbound = ["tcp+tls://0.0.0.0:8341", "tcp+tls://[::]:8341"]
+
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+# You can also use an IPv6 address
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341"]
+# IPv6 version:
+#external_addrs = ["tcp+tls://[ipv6 address here]:8341"]
+# Combined:
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341", "tcp+tls://[ipv6 address here]:8341"]
+
+# Peer nodes to manually connect to
+#peers = []
+
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+seeds = ["tcp+tls://lilith0.dark.fi:8341", "tcp+tls://lilith1.dark.fi:8341"]
+
+# Whitelisted network transports for outbound connections
+allowed_transports = ["tcp+tls"]
+
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
+
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+#outbound_connections = 8
+
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
+
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
+
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
+
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
+
+# Allow localnet hosts
+localnet = false
+
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
+
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
+
+# Mainnet blockchain network configuration
+[network_config."mainnet"]
+# Path to the blockchain database directory
+database = "~/.local/darkfi/darkfid_blockchain_mainnet"
+
+# Finalization threshold, denominated by number of blocks
+threshold = 11
+
+# minerd JSON-RPC endpoint
+minerd_endpoint = "tcp://127.0.0.1:28467"
+
+# PoW block production target, in seconds
+pow_target = 90
+
+# Epoch duration, denominated by number of blocks/slots
+epoch_length = 10
+
+# PoS slot duration, in seconds
+slot_time = 90
+
+# Whitelisted faucet addresses
+faucet_pub = []
+
+# Participate in the consensus protocol
+consensus = false
+
+# Wallet address to receive consensus rewards
+#recipient = "YOUR_WALLET_ADDRESS_HERE"
+
+# Skip syncing process and start node right away
+skip_sync = false
+
+# Enable PoS testing mode for local testing
+pos_testing_mode = false
+
+## Mainnet sync P2P network settings
+[network_config."mainnet".sync_net]
+# P2P accept addresses the instance listens on for inbound connections
+# You can also use an IPv6 address
+inbound = ["tcp+tls://0.0.0.0:8442"]
+# IPv6 version:
+#inbound = ["tcp+tls://[::]:8442"]
+# Combined:
+#inbound = ["tcp+tls://0.0.0.0:8442", "tcp+tls://[::]:8442"]
+
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+# You can also use an IPv6 address
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442"]
+# IPv6 version:
+#external_addrs = ["tcp+tls://[ipv6 address here]:8442"]
+# Combined:
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442", "tcp+tls://[ipv6 address here]:8442"]
+
+# Peer nodes to manually connect to
+#peers = []
+
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+seeds = ["tcp+tls://lilith0.dark.fi:8442", "tcp+tls://lilith1.dark.fi:8442"]
+
+# Whitelisted network transports for outbound connections
+allowed_transports = ["tcp+tls"]
+
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
+
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+outbound_connections = 8
+
+# Inbound connections slots number, this many active inbound connections
+# will be allowed. (This does not include manual or outbound connections)
+#inbound_connections = 0
+
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
+
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
+
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
+
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
+
+# Allow localnet hosts
+localnet = false
+
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
+
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
+
+## Mainnet consensus P2P network settings
+[network_config."mainnet".consensus_net]
+# P2P accept addresses the instance listens on for inbound connections
+# You can also use an IPv6 address
+inbound = ["tcp+tls://0.0.0.0:8441"]
+# IPv6 version:
+#inbound = ["tcp+tls://[::]:8441"]
+# Combined:
+#inbound = ["tcp+tls://0.0.0.0:8441", "tcp+tls://[::]:8441"]
+
+# P2P external addresses the instance advertises so other peers can
+# reach us and connect to us, as long as inbound addrs are configured.
+# You can also use an IPv6 address
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441"]
+# IPv6 version:
+#external_addrs = ["tcp+tls://[ipv6 address here]:8441"]
+# Combined:
+#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441", "tcp+tls://[ipv6 address here]:8441"]
+
+# Peer nodes to manually connect to
+#peers = []
+
+# Seed nodes to connect to for peer discovery and/or adversising our
+# own external addresses
+seeds = ["tcp+tls://lilith0.dark.fi:8441", "tcp+tls://lilith1.dark.fi:8441"]
+
+# Whitelisted network transports for outbound connections
+allowed_transports = ["tcp+tls"]
+
+# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
+#transport_mixing = true
+
+# Outbound connection slots number, this many connections will be
+# attempted. (This does not include manual connections)
+#outbound_connections = 8
+
+# Manual connections retry limit, 0 for forever looping
+#manual_attempt_limit = 0
+
+# Outbound connection timeout (in seconds)
+#outbound_connect_timeout = 10
+
+# Exchange versions (handshake) timeout (in seconds)
+#channel_handshake_timeout = 4
+
+# Ping-pong exchange execution interval (in seconds)
+#channel_heartbeat_interval = 10
+
+# Allow localnet hosts
+localnet = false
+
+# Delete a peer from hosts if they've been quarantined N times
+#hosts_quarantine_limit = 50
+
+# Cooling off time for peer discovery when unsuccessful
+#outbound_peer_discovery_cooloff_time = 30
+
+# Time between peer discovery attempts
+#outbound_peer_discovery_attempt_time = 5
diff --git a/bin/darkfid2/genesis_block_localnet b/bin/darkfid/genesis_block_localnet
similarity index 100%
rename from bin/darkfid2/genesis_block_localnet
rename to bin/darkfid/genesis_block_localnet
diff --git a/bin/darkfid2/genesis_block_mainnet b/bin/darkfid/genesis_block_mainnet
similarity index 100%
rename from bin/darkfid2/genesis_block_mainnet
rename to bin/darkfid/genesis_block_mainnet
diff --git a/bin/darkfid2/genesis_block_testnet b/bin/darkfid/genesis_block_testnet
similarity index 100%
rename from bin/darkfid2/genesis_block_testnet
rename to bin/darkfid/genesis_block_testnet
diff --git a/bin/darkfid/src/error.rs b/bin/darkfid/src/error.rs
index 16c1b486e..7ed774619 100644
--- a/bin/darkfid/src/error.rs
+++ b/bin/darkfid/src/error.rs
@@ -21,16 +21,6 @@ use darkfi::rpc::jsonrpc::{ErrorCode::ServerError, JsonError, JsonResult};
/// Custom RPC errors available for darkfid.
/// Please sort them sensefully.
pub enum RpcError {
- /*
- // Wallet/Key-related errors
- NoRowsFoundInWallet = -32101,
- Keygen = -32101,
- KeypairFetch = -32102,
- KeypairNotFound = -32103,
- InvalidKeypair = -32104,
- InvalidAddressParam = -32105,
- DecryptionFailed = -32106,
- */
// Transaction-related errors
TxSimulationFail = -32110,
TxBroadcastFail = -32111,
@@ -44,20 +34,13 @@ pub enum RpcError {
// Contract-related errors
ContractZkasDbNotFound = -32200,
+
+ // Misc errors
+ PingFailed = -32300,
}
fn to_tuple(e: RpcError) -> (i32, String) {
let msg = match e {
- /*
- // Wallet/Key-related errors
- RpcError::NoRowsFoundInWallet => "No queried rows found in wallet",
- RpcError::Keygen => "Failed generating keypair",
- RpcError::KeypairFetch => "Failed fetching keypairs from wallet",
- RpcError::KeypairNotFound => "Keypair not found",
- RpcError::InvalidKeypair => "Invalid keypair",
- RpcError::InvalidAddressParam => "Invalid address parameter",
- RpcError::DecryptionFailed => "Decryption failed",
- */
// Transaction-related errors
RpcError::TxSimulationFail => "Failed simulating transaction state change",
RpcError::TxBroadcastFail => "Failed broadcasting transaction",
@@ -68,6 +51,8 @@ fn to_tuple(e: RpcError) -> (i32, String) {
RpcError::ParseError => "Parse error",
// Contract-related errors
RpcError::ContractZkasDbNotFound => "zkas database not found for given contract",
+ // Misc errors
+ RpcError::PingFailed => "Miner daemon ping error",
};
(e as i32, msg.to_string())
diff --git a/bin/darkfid/src/main.rs b/bin/darkfid/src/main.rs
index 79e9cea76..7bfa83f75 100644
--- a/bin/darkfid/src/main.rs
+++ b/bin/darkfid/src/main.rs
@@ -16,49 +16,65 @@
* along with this program. If not, see .
*/
-use std::{collections::HashSet, path::Path, str::FromStr, sync::Arc};
-
-use async_trait::async_trait;
-use darkfi_sdk::crypto::PublicKey;
-use log::{error, info};
-use smol::{
- lock::{Mutex, MutexGuard},
- stream::StreamExt,
+use std::{
+ collections::{HashMap, HashSet},
+ str::FromStr,
+ sync::Arc,
};
+
+use log::{error, info};
+use smol::{lock::Mutex, stream::StreamExt};
use structopt_toml::{serde::Deserialize, structopt::StructOpt, StructOptToml};
use url::Url;
use darkfi::{
- async_daemonize, cli_desc,
- consensus::{
- constants::{
- MAINNET_BOOTSTRAP_TIMESTAMP, MAINNET_GENESIS_HASH_BYTES, MAINNET_GENESIS_TIMESTAMP,
- MAINNET_INITIAL_DISTRIBUTION, TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES,
- TESTNET_GENESIS_TIMESTAMP, TESTNET_INITIAL_DISTRIBUTION,
- },
- proto::{ProtocolProposal, ProtocolSync, ProtocolSyncConsensus, ProtocolTx},
- task::{block_sync_task, proposal_task},
- validator::ValidatorStatePtr,
- ValidatorState,
- },
- net,
- net::P2pPtr,
+ async_daemonize,
+ blockchain::BlockInfo,
+ cli_desc,
+ net::{settings::SettingsOpt, P2pPtr},
rpc::{
- clock_sync::check_clock,
- jsonrpc::{ErrorCode::MethodNotFound, JsonError, JsonRequest, JsonResult},
+ client::RpcClient,
+ jsonrpc::JsonSubscriber,
server::{listen_and_serve, RequestHandler},
},
system::{StoppableTask, StoppableTaskPtr},
- util::path::expand_path,
- wallet::{WalletDb, WalletPtr},
+ util::{path::expand_path, time::TimeKeeper},
+ validator::{utils::genesis_txs_total, Validator, ValidatorConfig, ValidatorPtr},
Error, Result,
};
+use darkfi_sdk::crypto::PublicKey;
+use darkfi_serial::deserialize_async;
+
+#[cfg(test)]
+mod tests;
mod error;
use error::{server_error, RpcError};
+/// JSON-RPC requests handler and methods
+mod rpc;
+mod rpc_blockchain;
+mod rpc_tx;
+
+/// Validator async tasks
+mod task;
+use task::{miner_task, sync_task};
+
+/// P2P net protocols
+mod proto;
+
+/// Utility functions
+mod utils;
+use utils::{parse_blockchain_config, spawn_consensus_p2p, spawn_sync_p2p};
+
const CONFIG_FILE: &str = "darkfid_config.toml";
const CONFIG_FILE_CONTENTS: &str = include_str!("../darkfid_config.toml");
+/// Note:
+/// If you change these don't forget to remove their corresponding database folder,
+/// since if it already has a genesis block, provided one is ignored.
+const GENESIS_BLOCK_LOCALNET: &str = include_str!("../genesis_block_localnet");
+const GENESIS_BLOCK_TESTNET: &str = include_str!("../genesis_block_testnet");
+const GENESIS_BLOCK_MAINNET: &str = include_str!("../genesis_block_mainnet");
#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)]
#[serde(default)]
@@ -68,109 +84,13 @@ struct Args {
/// Configuration file to use
config: Option,
- #[structopt(long, default_value = "testnet")]
- /// Chain to use (testnet, mainnet)
- chain: String,
-
- #[structopt(long)]
- /// Participate in consensus
- consensus: bool,
-
- #[structopt(long)]
- /// Enable single-node mode for local testing
- single_node: bool,
-
- #[structopt(long, default_value = "~/.config/darkfi/darkfid_wallet.db")]
- /// Path to wallet database
- wallet_path: String,
-
- #[structopt(long, default_value = "changeme")]
- /// Password for the wallet database
- wallet_pass: String,
-
- #[structopt(long, default_value = "~/.config/darkfi/darkfid_blockchain")]
- /// Path to blockchain database
- database: String,
-
- #[structopt(long, default_value = "tcp://127.0.0.1:8340")]
+ #[structopt(short, long, default_value = "tcp://127.0.0.1:8340")]
/// JSON-RPC listen URL
rpc_listen: Url,
- #[structopt(long)]
- /// P2P accept addresses for the consensus protocol (repeatable flag)
- consensus_p2p_accept: Vec,
-
- #[structopt(long)]
- /// P2P external addresses for the consensus protocol (repeatable flag)
- consensus_p2p_external: Vec,
-
- #[structopt(long, default_value = "8")]
- /// Connection slots for the consensus protocol
- consensus_slots: usize,
-
- #[structopt(long)]
- /// Connect to peer for the consensus protocol (repeatable flag)
- consensus_p2p_peer: Vec,
-
- #[structopt(long)]
- /// Peers JSON-RPC listen URL for clock synchronization (repeatable flag)
- consensus_peer_rpc: Vec,
-
- #[structopt(long)]
- /// Connect to seed for the consensus protocol (repeatable flag)
- consensus_p2p_seed: Vec,
-
- #[structopt(long)]
- /// Seed nodes JSON-RPC listen URL for clock synchronization (repeatable flag)
- consensus_seed_rpc: Vec,
-
- #[structopt(long)]
- /// Prefered transports of outbound connections for the consensus protocol (repeatable flag)
- consensus_p2p_transports: Vec,
-
- #[structopt(long)]
- /// P2P accept addresses for the syncing protocol (repeatable flag)
- sync_p2p_accept: Vec,
-
- #[structopt(long)]
- /// P2P external addresses for the syncing protocol (repeatable flag)
- sync_p2p_external: Vec,
-
- #[structopt(long, default_value = "8")]
- /// Connection slots for the syncing protocol
- sync_slots: usize,
-
- #[structopt(long)]
- /// Connect to peer for the syncing protocol (repeatable flag)
- sync_p2p_peer: Vec,
-
- #[structopt(long)]
- /// Connect to seed for the syncing protocol (repeatable flag)
- sync_p2p_seed: Vec,
-
- #[structopt(long)]
- /// Prefered transports of outbound connections for the syncing protocol (repeatable flag)
- sync_p2p_transports: Vec,
-
- #[structopt(long)]
- /// Enable localnet hosts
- localnet: bool,
-
- #[structopt(long)]
- /// Enable channel log
- channel_log: bool,
-
- #[structopt(long)]
- /// Whitelisted cashier public key (repeatable flag)
- cashier_pub: Vec,
-
- #[structopt(long)]
- /// Whitelisted faucet public key (repeatable flag)
- faucet_pub: Vec,
-
- #[structopt(long)]
- /// Verify system clock is correct
- clock_sync: bool,
+ #[structopt(short, long, default_value = "testnet")]
+ /// Blockchain network to use
+ network: String,
#[structopt(short, long)]
/// Set log file to ouput into
@@ -181,268 +101,230 @@ struct Args {
verbose: u8,
}
-pub struct Darkfid {
- synced: Mutex, // AtomicBool is weird in Arc
- consensus_p2p: Option,
- sync_p2p: Option,
- _wallet: WalletPtr,
- validator_state: ValidatorStatePtr,
- rpc_connections: Mutex>,
+/// Defines a blockchain network configuration.
+/// Default values correspond to a local network.
+#[derive(Clone, Debug, serde::Deserialize, structopt::StructOpt, structopt_toml::StructOptToml)]
+#[structopt()]
+pub struct BlockchainNetwork {
+ #[structopt(long, default_value = "~/.local/darkfi/darkfid_blockchain_localnet")]
+ /// Path to blockchain database
+ pub database: String,
+
+ #[structopt(long, default_value = "3")]
+ /// Finalization threshold, denominated by number of blocks
+ pub threshold: usize,
+
+ #[structopt(long, default_value = "tcp://127.0.0.1:28467")]
+ /// minerd JSON-RPC endpoint
+ pub minerd_endpoint: Url,
+
+ #[structopt(long, default_value = "10")]
+ /// PoW block production target, in seconds
+ pub pow_target: usize,
+
+ #[structopt(long)]
+ /// Optional fixed PoW difficulty, used for testing
+ pub pow_fixed_difficulty: Option,
+
+ #[structopt(long, default_value = "10")]
+ /// Epoch duration, denominated by number of blocks/slots
+ pub epoch_length: u64,
+
+ #[structopt(long, default_value = "10")]
+ /// PoS slot duration, in seconds
+ pub slot_time: u64,
+
+ #[structopt(long)]
+ /// Whitelisted faucet public key (repeatable flag)
+ pub faucet_pub: Vec,
+
+ #[structopt(long)]
+ /// Participate in the consensus protocol
+ pub consensus: bool,
+
+ #[structopt(long)]
+ /// Wallet address to receive consensus rewards
+ pub recipient: Option,
+
+ #[structopt(long)]
+ /// Skip syncing process and start node right away
+ pub skip_sync: bool,
+
+ #[structopt(long)]
+ /// Enable PoS testing mode for local testing
+ pub pos_testing_mode: bool,
+
+ /// Syncing network settings
+ #[structopt(flatten)]
+ pub sync_net: SettingsOpt,
+
+ /// Consensus network settings
+ #[structopt(flatten)]
+ pub consensus_net: SettingsOpt,
}
-// JSON-RPC methods
-mod rpc_blockchain;
-mod rpc_misc;
-mod rpc_tx;
-mod rpc_wallet;
-
-// Internal methods
-//mod internal;
-
-#[async_trait]
-impl RequestHandler for Darkfid {
- async fn handle_request(&self, req: JsonRequest) -> JsonResult {
- match req.method.as_str() {
- // =====================
- // Miscellaneous methods
- // =====================
- "ping" => return self.pong(req.id, req.params).await,
- "clock" => return self.misc_clock(req.id, req.params).await,
- "sync_dnet_switch" => return self.misc_sync_dnet_switch(req.id, req.params).await,
- "consensus_dnet_switch" => {
- return self.misc_consensus_dnet_switch(req.id, req.params).await
- }
-
- // ==================
- // Blockchain methods
- // ==================
- "blockchain.get_slot" => return self.blockchain_get_slot(req.id, req.params).await,
- "blockchain.get_tx" => return self.blockchain_get_tx(req.id, req.params).await,
- "blockchain.last_known_slot" => {
- return self.blockchain_last_known_slot(req.id, req.params).await
- }
- "blockchain.subscribe_blocks" => {
- return self.blockchain_subscribe_blocks(req.id, req.params).await
- }
- "blockchain.subscribe_err_txs" => {
- return self.blockchain_subscribe_err_txs(req.id, req.params).await
- }
- "blockchain.lookup_zkas" => {
- return self.blockchain_lookup_zkas(req.id, req.params).await
- }
-
- // ===================
- // Transaction methods
- // ===================
- "tx.simulate" => return self.tx_simulate(req.id, req.params).await,
- "tx.broadcast" => return self.tx_broadcast(req.id, req.params).await,
-
- // ==============
- // Wallet methods
- // ==============
- "wallet.exec_sql" => return self.wallet_exec_sql(req.id, req.params).await,
- "wallet.query_row_single" => {
- return self.wallet_query_row_single(req.id, req.params).await
- }
- "wallet.query_row_multi" => {
- return self.wallet_query_row_multi(req.id, req.params).await
- }
-
- // ==============
- // Invalid method
- // ==============
- _ => return JsonError::new(MethodNotFound, None, req.id).into(),
- }
- }
-
- async fn connections_mut(&self) -> MutexGuard<'_, HashSet> {
- self.rpc_connections.lock().await
- }
+/// Daemon structure
+pub struct Darkfid {
+ /// Syncing P2P network pointer
+ sync_p2p: P2pPtr,
+ /// Optional consensus P2P network pointer
+ consensus_p2p: Option,
+ /// Validator(node) pointer
+ validator: ValidatorPtr,
+ /// A map of various subscribers exporting live info from the blockchain
+ subscribers: HashMap<&'static str, JsonSubscriber>,
+ /// JSON-RPC connection tracker
+ rpc_connections: Mutex>,
+ /// JSON-RPC client to execute requests to the miner daemon
+ rpc_client: Option,
}
impl Darkfid {
pub async fn new(
- validator_state: ValidatorStatePtr,
+ sync_p2p: P2pPtr,
consensus_p2p: Option,
- sync_p2p: Option,
- _wallet: WalletPtr,
+ validator: ValidatorPtr,
+ subscribers: HashMap<&'static str, JsonSubscriber>,
+ rpc_client: Option,
) -> Self {
Self {
- synced: Mutex::new(false),
- consensus_p2p,
sync_p2p,
- _wallet,
- validator_state,
+ consensus_p2p,
+ validator,
+ subscribers,
rpc_connections: Mutex::new(HashSet::new()),
+ rpc_client,
}
}
}
async_daemonize!(realmain);
async fn realmain(args: Args, ex: Arc>) -> Result<()> {
- if args.consensus && args.clock_sync {
- // We verify that if peer/seed nodes are configured, their rpc config also exists
- if ((!args.consensus_p2p_peer.is_empty() && args.consensus_peer_rpc.is_empty()) ||
- (args.consensus_p2p_peer.is_empty() && !args.consensus_peer_rpc.is_empty())) ||
- ((!args.consensus_p2p_seed.is_empty() && args.consensus_seed_rpc.is_empty()) ||
- (args.consensus_p2p_seed.is_empty() && !args.consensus_seed_rpc.is_empty()))
- {
- error!(
- "Consensus peer/seed nodes misconfigured: both p2p and rpc urls must be present"
- );
- return Err(Error::ConfigInvalid)
+ info!(target: "darkfid", "Initializing DarkFi node...");
+
+ // Grab blockchain network configuration
+ let (blockchain_config, genesis_block) = match args.network.as_str() {
+ "localnet" => {
+ (parse_blockchain_config(args.config, "localnet").await?, GENESIS_BLOCK_LOCALNET)
}
- // We verify that the system clock is valid before initializing
- let peers = [&args.consensus_peer_rpc[..], &args.consensus_seed_rpc[..]].concat();
- if (check_clock(&peers).await).is_err() {
- error!("System clock is invalid, terminating...");
- return Err(Error::InvalidClock)
- };
- }
-
- // Initialize or load wallet
- let wallet = WalletDb::new(Some(expand_path(&args.wallet_path)?), Some(&args.wallet_pass))?;
-
- // Initialize or open sled database
- let db_path =
- Path::new(expand_path(&args.database)?.to_str().unwrap()).join(args.chain.clone());
- let sled_db = sled::open(&db_path)?;
-
- // Initialize validator state
- let (bootstrap_ts, genesis_ts, genesis_data, initial_distribution) = match args.chain.as_str() {
- "mainnet" => (
- *MAINNET_BOOTSTRAP_TIMESTAMP,
- *MAINNET_GENESIS_TIMESTAMP,
- *MAINNET_GENESIS_HASH_BYTES,
- *MAINNET_INITIAL_DISTRIBUTION,
- ),
- "testnet" => (
- *TESTNET_BOOTSTRAP_TIMESTAMP,
- *TESTNET_GENESIS_TIMESTAMP,
- *TESTNET_GENESIS_HASH_BYTES,
- *TESTNET_INITIAL_DISTRIBUTION,
- ),
- x => {
- error!("Unsupported chain `{}`", x);
+ "testnet" => {
+ (parse_blockchain_config(args.config, "testnet").await?, GENESIS_BLOCK_TESTNET)
+ }
+ "mainnet" => {
+ (parse_blockchain_config(args.config, "mainnet").await?, GENESIS_BLOCK_MAINNET)
+ }
+ _ => {
+ error!("Unsupported chain `{}`", args.network);
return Err(Error::UnsupportedChain)
}
};
- // Parse faucet addresses
- let mut faucet_pubkeys = vec![];
- for i in args.cashier_pub {
- let pk = PublicKey::from_str(&i)?;
- faucet_pubkeys.push(pk);
+ if blockchain_config.pos_testing_mode {
+ info!(target: "darkfid", "Node is configured to run in PoS testing mode!");
}
- for i in args.faucet_pub {
- let pk = PublicKey::from_str(&i)?;
- faucet_pubkeys.push(pk);
+ // Parse the genesis block
+ let bytes = bs58::decode(&genesis_block.trim()).into_vec()?;
+ let genesis_block: BlockInfo = deserialize_async(&bytes).await?;
+
+ // Initialize or open sled database
+ let db_path = expand_path(&blockchain_config.database)?;
+ let sled_db = sled::open(&db_path)?;
+
+ // Initialize validator configuration
+ let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
+
+ let time_keeper = TimeKeeper::new(
+ genesis_block.header.timestamp,
+ blockchain_config.epoch_length,
+ blockchain_config.slot_time,
+ 0,
+ );
+
+ let pow_fixed_difficulty = if let Some(diff) = blockchain_config.pow_fixed_difficulty {
+ info!(target: "darkfid", "Node is configured to run with fixed PoW difficulty: {}", diff);
+ Some(diff.into())
+ } else {
+ None
+ };
+
+ let config = ValidatorConfig::new(
+ time_keeper,
+ blockchain_config.threshold,
+ blockchain_config.pow_target,
+ pow_fixed_difficulty,
+ genesis_block,
+ genesis_txs_total,
+ vec![],
+ blockchain_config.pos_testing_mode,
+ false, // TODO: Make configurable
+ );
+
+ // Initialize validator
+ let validator = Validator::new(&sled_db, config).await?;
+
+ // Here we initialize various subscribers that can export live blockchain/consensus data.
+ let mut subscribers = HashMap::new();
+ subscribers.insert("blocks", JsonSubscriber::new("blockchain.subscribe_blocks"));
+ subscribers.insert("txs", JsonSubscriber::new("blockchain.subscribe_txs"));
+ if blockchain_config.consensus {
+ subscribers.insert("proposals", JsonSubscriber::new("blockchain.subscribe_proposals"));
}
- if args.single_node {
- info!("Node is configured to run in single-node mode!");
- }
+ // Initialize syncing P2P network
+ let sync_p2p =
+ spawn_sync_p2p(&blockchain_config.sync_net.into(), &validator, &subscribers, ex.clone())
+ .await;
- // Initialize validator state
- let state = ValidatorState::new(
- &sled_db,
- bootstrap_ts,
- genesis_ts,
- genesis_data,
- initial_distribution,
- wallet.clone(),
- faucet_pubkeys,
- args.consensus,
- args.single_node,
- )
- .await?;
-
- let sync_p2p = {
- info!("Registering block sync P2P protocols...");
- let sync_network_settings = net::Settings {
- inbound_addrs: args.sync_p2p_accept,
- outbound_connections: args.sync_slots,
- external_addrs: args.sync_p2p_external,
- peers: args.sync_p2p_peer.clone(),
- seeds: args.sync_p2p_seed.clone(),
- allowed_transports: args.sync_p2p_transports,
- localnet: args.localnet,
- ..Default::default()
+ // Initialize consensus P2P network
+ let (consensus_p2p, rpc_client) = if blockchain_config.consensus {
+ let Ok(rpc_client) = RpcClient::new(blockchain_config.minerd_endpoint, ex.clone()).await
+ else {
+ error!(target: "darkfid", "Failed to initialize miner daemon rpc client, check if minerd is running");
+ return Err(Error::RpcClientStopped)
};
-
- let p2p = net::P2p::new(sync_network_settings, ex.clone()).await;
- let registry = p2p.protocol_registry();
-
- let _state = state.clone();
- registry
- .register(net::SESSION_ALL, move |channel, p2p| {
- let state = _state.clone();
- async move {
- ProtocolSync::init(channel, state, p2p, args.consensus)
- .await
- .unwrap()
- }
- })
- .await;
-
- let _state = state.clone();
- registry
- .register(net::SESSION_ALL, move |channel, p2p| {
- let state = _state.clone();
- async move { ProtocolTx::init(channel, state, p2p).await.unwrap() }
- })
- .await;
-
- Some(p2p)
+ (
+ Some(
+ spawn_consensus_p2p(
+ &blockchain_config.consensus_net.into(),
+ &validator,
+ &subscribers,
+ ex.clone(),
+ )
+ .await,
+ ),
+ Some(rpc_client),
+ )
+ } else {
+ (None, None)
};
- // P2P network settings for the consensus protocol
- let consensus_p2p = {
- if !args.consensus {
- None
- } else {
- info!("Registering consensus P2P protocols...");
- let consensus_network_settings = net::Settings {
- inbound_addrs: args.consensus_p2p_accept,
- outbound_connections: args.consensus_slots,
- external_addrs: args.consensus_p2p_external,
- peers: args.consensus_p2p_peer.clone(),
- seeds: args.consensus_p2p_seed.clone(),
- allowed_transports: args.consensus_p2p_transports,
- localnet: args.localnet,
- ..Default::default()
- };
- let p2p = net::P2p::new(consensus_network_settings, ex.clone()).await;
- let registry = p2p.protocol_registry();
-
- let _state = state.clone();
- registry
- .register(net::SESSION_ALL, move |channel, p2p| {
- let state = _state.clone();
- async move { ProtocolProposal::init(channel, state, p2p).await.unwrap() }
- })
- .await;
-
- let _state = state.clone();
- registry
- .register(net::SESSION_ALL, move |channel, p2p| {
- let state = _state.clone();
- async move { ProtocolSyncConsensus::init(channel, state, p2p).await.unwrap() }
- })
- .await;
-
- Some(p2p)
- }
- };
-
- // Initialize program state
- let darkfid =
- Darkfid::new(state.clone(), consensus_p2p.clone(), sync_p2p.clone(), wallet.clone()).await;
+ // Initialize node
+ let darkfid = Darkfid::new(
+ sync_p2p.clone(),
+ consensus_p2p.clone(),
+ validator.clone(),
+ subscribers,
+ rpc_client,
+ )
+ .await;
let darkfid = Arc::new(darkfid);
+ info!(target: "darkfid", "Node initialized successfully!");
+
+ // Pinging minerd daemon to verify it listens
+ if blockchain_config.consensus {
+ if let Err(e) = darkfid.ping_miner_daemon().await {
+ error!(target: "darkfid", "Failed to ping miner daemon: {}", e);
+ return Err(Error::RpcClientStopped)
+ }
+ }
// JSON-RPC server
- info!("Starting JSON-RPC server");
+ info!(target: "darkfid", "Starting JSON-RPC server");
+ // Here we create a task variable so we can manually close the
+ // task later. P2P tasks don't need this since it has its own
+ // stop() function to shut down, also terminating the task we
+ // created for it.
let rpc_task = StoppableTask::new();
let darkfid_ = darkfid.clone();
rpc_task.clone().start(
@@ -457,69 +339,81 @@ async fn realmain(args: Args, ex: Arc>) -> Result<()> {
ex.clone(),
);
- info!("Starting sync P2P network");
- sync_p2p.clone().unwrap().start().await?;
-
- // TODO: I think this is not necessary anymore
- //info!("Waiting for sync P2P outbound connections");
- //sync_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?;
-
- match block_sync_task(sync_p2p.clone().unwrap(), state.clone()).await {
- Ok(()) => *darkfid.synced.lock().await = true,
- Err(e) => error!("Failed syncing blockchain: {}", e),
- }
+ info!(target: "darkfid", "Starting sync P2P network");
+ sync_p2p.clone().start().await?;
// Consensus protocol
- let proposal_task = if args.consensus && *darkfid.synced.lock().await {
- info!("Starting consensus P2P network");
+ if blockchain_config.consensus {
+ info!(target: "darkfid", "Starting consensus P2P network");
let consensus_p2p = consensus_p2p.clone().unwrap();
consensus_p2p.clone().start().await?;
+ } else {
+ info!(target: "darkfid", "Not starting consensus P2P network");
+ }
- // TODO: I think this is not necessary anymore
- //info!("Waiting for consensus P2P outbound connections");
- //consensus_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?;
+ // Sync blockchain
+ if !blockchain_config.skip_sync {
+ sync_task(&darkfid).await?;
+ } else {
+ *darkfid.validator.synced.write().await = true;
+ }
+
+ // Clean node pending transactions
+ darkfid.validator.purge_pending_txs().await?;
+
+ // Consensus protocol
+ let consensus_task = if blockchain_config.consensus {
+ info!(target: "darkfid", "Starting consensus protocol task");
+ // Grab rewards recipient public key(address)
+ if blockchain_config.recipient.is_none() {
+ return Err(Error::ParseFailed("Recipient address missing"))
+ }
+ let recipient = match PublicKey::from_str(&blockchain_config.recipient.unwrap()) {
+ Ok(address) => address,
+ Err(_) => return Err(Error::InvalidAddress),
+ };
- info!("Starting consensus protocol task");
let task = StoppableTask::new();
task.clone().start(
- proposal_task(consensus_p2p.clone(), sync_p2p.clone().unwrap(), state, ex.clone()),
+ // Weird hack to prevent lifetimes hell
+ async move { miner_task(&darkfid, &recipient).await },
|res| async {
match res {
- Ok(()) | Err(Error::ProposalTaskStopped) => { /* Do nothing */ }
- Err(e) => error!(target: "darkfid", "Failed starting proposal task: {}", e),
+ Ok(()) | Err(Error::MinerTaskStopped) => { /* Do nothing */ }
+ Err(e) => error!(target: "darkfid", "Failed starting miner task: {}", e),
}
},
- Error::ProposalTaskStopped,
+ Error::MinerTaskStopped,
ex.clone(),
);
Some(task)
} else {
- info!("Not starting consensus P2P network");
+ info!(target: "darkfid", "Not participating in consensus");
None
};
// Signal handling for graceful termination.
let (signals_handler, signals_task) = SignalHandler::new(ex)?;
signals_handler.wait_termination(signals_task).await?;
- info!("Caught termination signal, cleaning up and exiting...");
+ info!(target: "darkfid", "Caught termination signal, cleaning up and exiting...");
info!(target: "darkfid", "Stopping JSON-RPC server...");
rpc_task.stop().await;
info!(target: "darkfid", "Stopping syncing P2P network...");
- sync_p2p.clone().unwrap().stop().await;
-
- if let Some(task) = proposal_task {
- info!(target: "darkfid", "Stopping proposal task...");
- task.stop().await;
+ sync_p2p.stop().await;
+ if blockchain_config.consensus {
info!(target: "darkfid", "Stopping consensus P2P network...");
consensus_p2p.unwrap().stop().await;
+
+ info!(target: "darkfid", "Stopping consensus task...");
+ consensus_task.unwrap().stop().await;
}
- info!("Flushing sled database...");
+ info!(target: "darkfid", "Flushing sled database...");
let flushed_bytes = sled_db.flush_async().await?;
- info!("Flushed {} bytes", flushed_bytes);
+ info!(target: "darkfid", "Flushed {} bytes", flushed_bytes);
Ok(())
}
diff --git a/bin/darkfid2/src/proto/mod.rs b/bin/darkfid/src/proto/mod.rs
similarity index 100%
rename from bin/darkfid2/src/proto/mod.rs
rename to bin/darkfid/src/proto/mod.rs
diff --git a/bin/darkfid2/src/proto/protocol_block.rs b/bin/darkfid/src/proto/protocol_block.rs
similarity index 100%
rename from bin/darkfid2/src/proto/protocol_block.rs
rename to bin/darkfid/src/proto/protocol_block.rs
diff --git a/bin/darkfid2/src/proto/protocol_proposal.rs b/bin/darkfid/src/proto/protocol_proposal.rs
similarity index 100%
rename from bin/darkfid2/src/proto/protocol_proposal.rs
rename to bin/darkfid/src/proto/protocol_proposal.rs
diff --git a/bin/darkfid2/src/proto/protocol_sync.rs b/bin/darkfid/src/proto/protocol_sync.rs
similarity index 100%
rename from bin/darkfid2/src/proto/protocol_sync.rs
rename to bin/darkfid/src/proto/protocol_sync.rs
diff --git a/bin/darkfid2/src/proto/protocol_tx.rs b/bin/darkfid/src/proto/protocol_tx.rs
similarity index 100%
rename from bin/darkfid2/src/proto/protocol_tx.rs
rename to bin/darkfid/src/proto/protocol_tx.rs
diff --git a/bin/darkfid2/src/rpc.rs b/bin/darkfid/src/rpc.rs
similarity index 100%
rename from bin/darkfid2/src/rpc.rs
rename to bin/darkfid/src/rpc.rs
diff --git a/bin/darkfid/src/rpc_blockchain.rs b/bin/darkfid/src/rpc_blockchain.rs
index 2505bd15c..b812f2d47 100644
--- a/bin/darkfid/src/rpc_blockchain.rs
+++ b/bin/darkfid/src/rpc_blockchain.rs
@@ -16,10 +16,10 @@
* along with this program. If not, see .
*/
-use std::str::FromStr;
+use std::{collections::HashMap, str::FromStr};
use darkfi_sdk::crypto::ContractId;
-use darkfi_serial::{deserialize, serialize};
+use darkfi_serial::{deserialize_async, serialize_async};
use log::{debug, error};
use tinyjson::JsonValue;
@@ -32,8 +32,7 @@ use darkfi::{
util::encoding::base64,
};
-use super::Darkfid;
-use crate::{server_error, RpcError};
+use crate::{server_error, Darkfid, RpcError};
impl Darkfid {
// RPCAPI:
@@ -48,7 +47,7 @@ impl Darkfid {
// struct serialized into base64.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.get_slot", "params": ["0"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
+ // <-- {"jsonrpc": "2.0", "result": {...}, "id": 1}
pub async fn blockchain_get_slot(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::>().unwrap();
if params.len() != 1 || !params[0].is_string() {
@@ -60,15 +59,10 @@ impl Darkfid {
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
- let validator_state = self.validator_state.read().await;
-
- let blocks = match validator_state.blockchain.get_blocks_by_slot(&[slot]) {
- Ok(v) => {
- drop(validator_state);
- v
- }
+ let blocks = match self.validator.blockchain.get_blocks_by_slot(&[slot]) {
+ Ok(v) => v,
Err(e) => {
- error!("[RPC] blockchain.get_slot: Failed fetching block by slot: {}", e);
+ error!(target: "darkfid::rpc::blockchain_get_slot", "Failed fetching block by slot: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
@@ -77,7 +71,7 @@ impl Darkfid {
return server_error(RpcError::UnknownSlot, id, None)
}
- let block = base64::encode(&serialize(&blocks[0]));
+ let block = base64::encode(&serialize_async(&blocks[0]).await);
JsonResponse::new(JsonValue::String(block), id).into()
}
@@ -96,7 +90,7 @@ impl Darkfid {
// <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
pub async fn blockchain_get_tx(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_string() {
+ if params.len() != 1 {
return JsonError::new(InvalidParams, None, id).into()
}
@@ -106,25 +100,19 @@ impl Darkfid {
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
- let validator_state = self.validator_state.read().await;
-
- let txs = match validator_state.blockchain.transactions.get(&[tx_hash], true) {
- Ok(txs) => {
- drop(validator_state);
- txs
- }
+ let txs = match self.validator.blockchain.transactions.get(&[tx_hash], true) {
+ Ok(txs) => txs,
Err(e) => {
- error!("[RPC] blockchain.get_tx: Failed fetching tx by hash: {}", e);
+ error!(target: "darkfid::rpc::blockchain_get_tx", "Failed fetching tx by hash: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
-
// This would be an logic error somewhere
assert_eq!(txs.len(), 1);
// and strict was used during .get()
let tx = txs[0].as_ref().unwrap();
- let tx_enc = base64::encode(&serialize(tx));
+ let tx_enc = base64::encode(&serialize_async(tx).await);
JsonResponse::new(JsonValue::String(tx_enc), id).into()
}
@@ -145,12 +133,12 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
- let blockchain = { self.validator_state.read().await.blockchain.clone() };
+ let blockchain = self.validator.blockchain.clone();
let Ok(last_slot) = blockchain.last() else {
return JsonError::new(InternalError, None, id).into()
};
- JsonResponse::new(JsonValue::String(last_slot.0.to_string()), id).into()
+ JsonResponse::new(JsonValue::Number(last_slot.0 as f64), id).into()
}
// RPCAPI:
@@ -166,23 +154,47 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
- self.validator_state.read().await.subscribers.get("blocks").unwrap().clone().into()
+ self.subscribers.get("blocks").unwrap().clone().into()
}
// RPCAPI:
- // Initializes a subscription to erroneous transactions notifications.
+ // Initializes a subscription to new incoming transactions.
// Once a subscription is established, `darkfid` will send JSON-RPC notifications of
- // erroneous transactions to the subscriber.
+ // new incoming transactions to the subscriber.
//
- // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_err_txs", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_err_txs", "params": [`tx_hash`]}
- pub async fn blockchain_subscribe_err_txs(&self, id: u16, params: JsonValue) -> JsonResult {
+ // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [], "id": 1}
+ // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [`tx_hash`]}
+ pub async fn blockchain_subscribe_txs(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
- self.validator_state.read().await.subscribers.get("err_txs").unwrap().clone().into()
+ self.subscribers.get("txs").unwrap().clone().into()
+ }
+
+ // RPCAPI:
+ // Initializes a subscription to new incoming proposals, asuming node participates
+ // in consensus. Once a subscription is established, `darkfid` will send JSON-RPC
+ // notifications of new incoming proposals to the subscriber.
+ //
+ // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [], "id": 1}
+ // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [`blockinfo`]}
+ pub async fn blockchain_subscribe_proposals(&self, id: u16, params: JsonValue) -> JsonResult {
+ let params = params.get::>().unwrap();
+ if !params.is_empty() {
+ return JsonError::new(InvalidParams, None, id).into()
+ }
+
+ // Since proposals subscriber is only active if we participate to consensus,
+ // we have to check if it actually exists in the subscribers map.
+ let proposals_subscriber = self.subscribers.get("proposals");
+ if proposals_subscriber.is_none() {
+ error!(target: "darkfid::rpc::blockchain_subscribe_proposals", "Proposals subscriber not found");
+ return JsonError::new(InternalError, None, id).into()
+ }
+
+ proposals_subscriber.unwrap().clone().into()
}
// RPCAPI:
@@ -193,7 +205,7 @@ impl Darkfid {
// * `array[0]`: base58-encoded contract ID string
//
// **Returns:**
- // * `array[n]`: Pairs of: `zkas_namespace` string, serialized and base64-encoded
+ // * `array[n]`: Pairs of: `zkas_namespace` string, serialized
// [`ZkBinary`](https://darkrenaissance.github.io/darkfi/development/darkfi/zkas/decoder/struct.ZkBinary.html)
// object
//
@@ -209,12 +221,12 @@ impl Darkfid {
let contract_id = match ContractId::from_str(contract_id) {
Ok(v) => v,
Err(e) => {
- error!("[RPC] blockchain.lookup_zkas: Error decoding string to ContractId: {}", e);
+ error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Error decoding string to ContractId: {}", e);
return JsonError::new(InvalidParams, None, id).into()
}
};
- let blockchain = { self.validator_state.read().await.blockchain.clone() };
+ let blockchain = self.validator.blockchain.clone();
let Ok(zkas_db) = blockchain.contracts.lookup(
&blockchain.sled_db,
@@ -222,7 +234,7 @@ impl Darkfid {
SMART_CONTRACT_ZKAS_DB_NAME,
) else {
error!(
- "[RPC] blockchain.lookup_zkas: Did not find zkas db for ContractId: {}",
+ target: "darkfid::rpc::blockchain_lookup_zkas", "Did not find zkas db for ContractId: {}",
contract_id
);
return server_error(RpcError::ContractZkasDbNotFound, id, None)
@@ -231,13 +243,13 @@ impl Darkfid {
let mut ret = vec![];
for i in zkas_db.iter() {
- debug!("Iterating over zkas db");
+ debug!(target: "darkfid::rpc::blockchain_lookup_zkas", "Iterating over zkas db");
let Ok((zkas_ns, zkas_bytes)) = i else {
- error!("Internal sled error iterating db");
+ error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Internal sled error iterating db");
return JsonError::new(InternalError, None, id).into()
};
- let Ok(zkas_ns) = deserialize(&zkas_ns) else {
+ let Ok(zkas_ns) = deserialize_async(&zkas_ns).await else {
return JsonError::new(InternalError, None, id).into()
};
@@ -250,4 +262,31 @@ impl Darkfid {
JsonResponse::new(JsonValue::Array(ret), id).into()
}
+
+ // RPCAPI:
+ // Returns the `chain_id` used for merge mining. A 32-byte hash of the genesis block.
+ //
+ // --> {"jsonrpc": "2.0", "method": "merge_mining_get_chain_id", "params": [], "id": 0}
+ // <-- {"jsonrpc": "2.0", "result": {"chain_id": 02f8...7863"}, "id": 0}
+ pub async fn merge_mining_get_chain_id(&self, id: u16, _params: JsonValue) -> JsonResult {
+ let chain_id = match self.validator.blockchain.genesis() {
+ Ok((_, v)) => v,
+ Err(e) => {
+ error!(
+ target: "darkfid::rpc::merge_mining_get_chain_id",
+ "[RPC] Error looking up genesis block: {}", e,
+ );
+ return JsonError::new(InternalError, None, id).into()
+ }
+ };
+
+ JsonResponse::new(
+ JsonValue::Object(HashMap::from([(
+ "chain_id".to_string(),
+ chain_id.to_hex().to_string().into(),
+ )])),
+ id,
+ )
+ .into()
+ }
}
diff --git a/bin/darkfid/src/rpc_misc.rs b/bin/darkfid/src/rpc_misc.rs
deleted file mode 100644
index 174d6d1f4..000000000
--- a/bin/darkfid/src/rpc_misc.rs
+++ /dev/null
@@ -1,85 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use tinyjson::JsonValue;
-
-use darkfi::{
- rpc::jsonrpc::{ErrorCode, JsonError, JsonResponse, JsonResult},
- util::time::Timestamp,
-};
-
-use super::Darkfid;
-
-impl Darkfid {
- // RPCAPI:
- // Returns current system clock as u64 (string) timestamp
- //
- // --> {"jsonrpc": "2.0", "method": "clock", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "1234"}, "id": 1}
- pub async fn misc_clock(&self, id: u16, _params: JsonValue) -> JsonResult {
- JsonResponse::new(JsonValue::String(Timestamp::current_time().0.to_string()), id).into()
- }
-
- // RPCAPI:
- // Activate or deactivate dnet in the sync P2P stack.
- // By sending `true`, dnet will be activated, and by sending `false` dnet
- // will be deactivated. Returns `true` on success.
- //
- // --> {"jsonrpc": "2.0", "method": "sync_dnet_switch", "params": [true], "id": 42}
- // <-- {"jsonrpc": "2.0", "result": true, "id": 42}
- pub async fn misc_sync_dnet_switch(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_bool() {
- return JsonError::new(ErrorCode::InvalidParams, None, id).into()
- }
-
- let switch = params[0].get::().unwrap();
-
- if *switch {
- self.sync_p2p.as_ref().unwrap().dnet_enable().await;
- } else {
- self.sync_p2p.as_ref().unwrap().dnet_disable().await;
- }
-
- JsonResponse::new(JsonValue::Boolean(true), id).into()
- }
-
- // RPCAPI:
- // Activate or deactivate dnet in the consensus P2P stack.
- // By sending `true`, dnet will be activated, and by sending `false` dnet
- // will be deactivated. Returns `true` on success.
- //
- // --> {"jsonrpc": "2.0", "method": "consensus_dnet_switch", "params": [true], "id": 42}
- // <-- {"jsonrpc": "2.0", "result": true, "id": 42}
- pub async fn misc_consensus_dnet_switch(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_bool() {
- return JsonError::new(ErrorCode::InvalidParams, None, id).into()
- }
-
- let switch = params[0].get::().unwrap();
-
- if *switch {
- self.consensus_p2p.as_ref().unwrap().dnet_enable().await;
- } else {
- self.consensus_p2p.as_ref().unwrap().dnet_disable().await;
- }
-
- JsonResponse::new(JsonValue::Boolean(true), id).into()
- }
-}
diff --git a/bin/darkfid/src/rpc_tx.rs b/bin/darkfid/src/rpc_tx.rs
index 3ac5ddb54..e551ed8db 100644
--- a/bin/darkfid/src/rpc_tx.rs
+++ b/bin/darkfid/src/rpc_tx.rs
@@ -16,12 +16,15 @@
* along with this program. If not, see .
*/
-use darkfi_serial::deserialize;
-use log::{error, warn};
+use darkfi_serial::deserialize_async;
+use log::error;
use tinyjson::JsonValue;
use darkfi::{
- rpc::jsonrpc::{ErrorCode::InvalidParams, JsonError, JsonResponse, JsonResult},
+ rpc::jsonrpc::{
+ ErrorCode::{InternalError, InvalidParams},
+ JsonError, JsonResponse, JsonResult,
+ },
tx::Transaction,
util::encoding::base64,
};
@@ -35,7 +38,7 @@ impl Darkfid {
// Returns `true` if the transaction is valid, otherwise, a corresponding
// error.
//
- // --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base58encodedTX"], "id": 1}
+ // --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": true, "id": 1}
pub async fn tx_simulate(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::>().unwrap();
@@ -43,43 +46,38 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
- if !(*self.synced.lock().await) {
- error!("[RPC] tx.simulate: Blockchain is not synced");
+ if !*self.validator.synced.read().await {
+ error!(target: "darkfid::rpc::tx_simulate", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
- let tx_enc = params[0].get::().unwrap();
- let tx_bytes = match base64::decode(tx_enc.trim()) {
+ let tx_enc = params[0].get::().unwrap().trim();
+ let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
- error!("[RPC] tx.simulate: Failed decoding base64 transaction");
+ error!(target: "darkfid::rpc::tx_simulate", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
- let tx: Transaction = match deserialize(&tx_bytes) {
+ let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
- error!("[RPC] tx.simulate: Failed deserializing bytes into Transaction: {}", e);
+ error!(target: "darkfid::rpc::tx_simulate", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
// Simulate state transition
- let lock = self.validator_state.read().await;
- let current_slot = lock.consensus.time_keeper.current_slot();
- match lock.verify_transactions(&[tx], current_slot, false).await {
- Ok(erroneous_txs) => {
- if !erroneous_txs.is_empty() {
- error!("[RPC] tx.simulate: invalid transaction provided");
- return server_error(RpcError::TxSimulationFail, id, None)
- }
- }
- Err(e) => {
- error!("[RPC] tx.simulate: Failed to validate state transition: {}", e);
- return server_error(RpcError::TxSimulationFail, id, None)
- }
+ let current_slot = self.validator.consensus.time_keeper.current_slot();
+ let result = self.validator.add_transactions(&[tx], current_slot, false).await;
+ if result.is_err() {
+ error!(
+ target: "darkfid::rpc::tx_simulate", "Failed to validate state transition: {}",
+ result.err().unwrap()
+ );
+ return server_error(RpcError::TxSimulationFail, id, None)
};
JsonResponse::new(JsonValue::Boolean(true), id).into()
@@ -91,7 +89,7 @@ impl Darkfid {
// if the transaction is actually valid, and in turn it will return an
// error if this is the case. Otherwise, a transaction ID will be returned.
//
- // --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base58encodedTX"], "id": 1}
+ // --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "txID...", "id": 1}
pub async fn tx_broadcast(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::>().unwrap();
@@ -99,66 +97,124 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
- if !(*self.synced.lock().await) {
- error!("[RPC] tx.transfer: Blockchain is not synced");
+ if !*self.validator.synced.read().await {
+ error!(target: "darkfid::rpc::tx_broadcast", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
- let tx_enc = params[0].get::().unwrap();
- let tx_bytes = match base64::decode(tx_enc.trim()) {
+ let tx_enc = params[0].get::().unwrap().trim();
+ let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
- error!("[RPC] tx.broadcast: Failed decoding base64 transaction");
+ error!(target: "darkfid::rpc::tx_broadcast", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
- let tx: Transaction = match deserialize(&tx_bytes) {
+ let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
- error!("[RPC] tx.broadcast: Failed deserializing bytes into Transaction: {}", e);
+ error!(target: "darkfid::rpc::tx_broadcast", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
if self.consensus_p2p.is_some() {
- // Consider we're participating in consensus here?
- // The append_tx function performs a state transition check.
- if !self.validator_state.write().await.append_tx(tx.clone()).await {
- error!("[RPC] tx.broadcast: Failed to append transaction to mempool");
- return server_error(RpcError::TxBroadcastFail, id, None)
+ // Consensus participants can directly perform
+ // the state transition check and append to their
+ // pending transactions store.
+ if self.validator.append_tx(&tx).await.is_err() {
+ error!(target: "darkfid::rpc::tx_broadcast", "Failed to append transaction to mempool");
+ return server_error(RpcError::TxSimulationFail, id, None)
}
} else {
// We'll perform the state transition check here.
- let lock = self.validator_state.read().await;
- let current_slot = lock.consensus.time_keeper.current_slot();
- match lock.verify_transactions(&[tx.clone()], current_slot, false).await {
- Ok(erroneous_txs) => {
- if !erroneous_txs.is_empty() {
- error!("[RPC] tx.broadcast: invalid transaction provided");
- return server_error(RpcError::TxSimulationFail, id, None)
- }
- }
- Err(e) => {
- error!("[RPC] tx.broadcast: Failed to validate state transition: {}", e);
- return server_error(RpcError::TxSimulationFail, id, None)
- }
+ let current_slot = self.validator.consensus.time_keeper.current_slot();
+ let result = self.validator.add_transactions(&[tx.clone()], current_slot, false).await;
+ if result.is_err() {
+ error!(
+ target: "darkfid::rpc::tx_broadcast", "Failed to validate state transition: {}",
+ result.err().unwrap()
+ );
+ return server_error(RpcError::TxSimulationFail, id, None)
};
}
- if let Some(sync_p2p) = &self.sync_p2p {
- sync_p2p.broadcast(&tx).await;
- if sync_p2p.channels().await.is_empty() {
- error!("[RPC] tx.broadcast: Failed broadcasting tx, no connected channels");
- return server_error(RpcError::TxBroadcastFail, id, None)
- }
- } else {
- warn!("[RPC] tx.broadcast: No sync P2P network, not broadcasting transaction.");
+ self.sync_p2p.broadcast(&tx).await;
+ if self.sync_p2p.channels().await.is_empty() {
+ error!(target: "darkfid::rpc::tx_broadcast", "Failed broadcasting tx, no connected channels");
return server_error(RpcError::TxBroadcastFail, id, None)
}
let tx_hash = tx.hash().unwrap().to_string();
JsonResponse::new(JsonValue::String(tx_hash), id).into()
}
+
+ // RPCAPI:
+ // Queries the node pending transactions store to retrieve all transactions.
+ // Returns a vector of hex-encoded transaction hashes.
+ //
+ // --> {"jsonrpc": "2.0", "method": "tx.pending", "params": [], "id": 1}
+ // <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
+ pub async fn tx_pending(&self, id: u16, params: JsonValue) -> JsonResult {
+ let params = params.get::>().unwrap();
+ if !params.is_empty() {
+ return JsonError::new(InvalidParams, None, id).into()
+ }
+
+ if !*self.validator.synced.read().await {
+ error!(target: "darkfid::rpc::tx_pending", "Blockchain is not synced");
+ return server_error(RpcError::NotSynced, id, None)
+ }
+
+ let pending_txs = match self.validator.blockchain.get_pending_txs() {
+ Ok(v) => v,
+ Err(e) => {
+ error!(target: "darkfid::rpc::tx_pending", "Failed fetching pending txs: {}", e);
+ return JsonError::new(InternalError, None, id).into()
+ }
+ };
+
+ let pending_txs: Vec =
+ pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
+
+ JsonResponse::new(JsonValue::Array(pending_txs), id).into()
+ }
+
+ // RPCAPI:
+ // Queries the node pending transactions store to remove all transactions.
+ // Returns a vector of hex-encoded transaction hashes.
+ //
+ // --> {"jsonrpc": "2.0", "method": "tx.clean_pending", "params": [], "id": 1}
+ // <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
+ pub async fn tx_clean_pending(&self, id: u16, params: JsonValue) -> JsonResult {
+ let params = params.get::>().unwrap();
+ if !params.is_empty() {
+ return JsonError::new(InvalidParams, None, id).into()
+ }
+
+ if !*self.validator.synced.read().await {
+ error!(target: "darkfid::rpc::tx_clean_pending", "Blockchain is not synced");
+ return server_error(RpcError::NotSynced, id, None)
+ }
+
+ let pending_txs = match self.validator.blockchain.get_pending_txs() {
+ Ok(v) => v,
+ Err(e) => {
+ error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
+ return JsonError::new(InternalError, None, id).into()
+ }
+ };
+
+ if let Err(e) = self.validator.blockchain.remove_pending_txs(&pending_txs) {
+ error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
+ return JsonError::new(InternalError, None, id).into()
+ };
+
+ let pending_txs: Vec =
+ pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
+
+ JsonResponse::new(JsonValue::Array(pending_txs), id).into()
+ }
}
diff --git a/bin/darkfid/src/rpc_wallet.rs b/bin/darkfid/src/rpc_wallet.rs
deleted file mode 100644
index b14ddbda7..000000000
--- a/bin/darkfid/src/rpc_wallet.rs
+++ /dev/null
@@ -1,441 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-/*
-use log::{debug, error};
-use serde_json::{json, Value};
-
-use darkfi::{
- rpc::jsonrpc::{
- ErrorCode::{InternalError, InvalidParams, ParseError},
- JsonError, JsonResponse, JsonResult,
- },
- wallet::walletdb::QueryType,
-};
-
-use super::{error::RpcError, server_error, Darkfid};
-*/
-use darkfi::rpc::jsonrpc::JsonResult;
-use tinyjson::JsonValue;
-
-use super::Darkfid;
-
-impl Darkfid {
- // RPCAPI:
- // Attempts to query for a single row in a given table.
- // The parameters given contain paired metadata so we know how to decode the SQL data.
- // An example of `params` is as such:
- // ```
- // params[0] -> "sql query"
- // params[1] -> column_type
- // params[2] -> "column_name"
- // ...
- // params[n-1] -> column_type
- // params[n] -> "column_name"
- // ```
- // This function will fetch the first row it finds, if any. The `column_type` field
- // is a type available in the `WalletDb` API as an enum called `QueryType`. If a row
- // is not found, the returned result will be a JSON-RPC error.
- // NOTE: This is obviously vulnerable to SQL injection. Open to interesting solutions.
- //
- // --> {"jsonrpc": "2.0", "method": "wallet.query_row_single", "params": [...], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": ["va", "lu", "es", ...], "id": 1}
- pub async fn wallet_query_row_single(&self, _id: u16, _params: JsonValue) -> JsonResult {
- todo!();
- /* TODO: This will be abstracted away
- // We need at least 3 params for something we want to fetch, and we want them in pairs.
- // Also the first param should be a String
- if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- // The remaining pairs should be typed properly too
- let mut types: Vec = vec![];
- let mut names: Vec<&str> = vec![];
- for pair in params[1..].chunks(2) {
- if !pair[0].is_u64() || !pair[1].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let typ = pair[0].as_u64().unwrap();
- if typ >= QueryType::Last as u64 {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- types.push((typ as u8).into());
- names.push(pair[1].as_str().unwrap());
- }
-
- // Get a wallet connection
- let mut conn = match self.wallet.conn.acquire().await {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: Failed to acquire wallet connection: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- // Execute the query and see if we find a row
- let row = match sqlx::query(params[0].as_str().unwrap()).fetch_one(&mut conn).await {
- Ok(v) => Some(v),
- Err(_) => None,
- };
-
- // Try to decode the row into what was requested
- let mut ret: Vec = vec![];
-
- for (typ, col) in types.iter().zip(names) {
- match typ {
- QueryType::Integer => {
- let Some(ref row) = row else {
- error!("[RPC] wallet.query_row_single: Got None for QueryType::Integer");
- return server_error(RpcError::NoRowsFoundInWallet, id, None)
- };
-
- let value: i32 = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- ret.push(json!(value));
- continue
- }
-
- QueryType::Blob => {
- let Some(ref row) = row else {
- error!("[RPC] wallet.query_row_single: Got None for QueryType::Blob");
- return server_error(RpcError::NoRowsFoundInWallet, id, None)
- };
-
- let value: Vec = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- ret.push(json!(value));
- continue
- }
-
- QueryType::OptionInteger => {
- let Some(ref row) = row else {
- ret.push(json!(None::));
- continue
- };
-
- let value: i32 = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- ret.push(json!(value));
- continue
- }
-
- QueryType::OptionBlob => {
- let Some(ref row) = row else {
- ret.push(json!(None::>));
- continue
- };
-
- let value: Vec = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- ret.push(json!(value));
- continue
- }
-
- QueryType::Text => {
- let Some(ref row) = row else {
- error!("[RPC] wallet.query_row_single: Got None for QueryType::Text");
- return server_error(RpcError::NoRowsFoundInWallet, id, None)
- };
-
- let value: String = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_single: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- ret.push(json!(value));
- continue
- }
-
- _ => unreachable!(),
- }
- }
-
- JsonResponse::new(json!(ret), id).into()
- */
- }
-
- // RPCAPI:
- // Attempts to query for all available rows in a given table.
- // The parameters given contain paired metadata so we know how to decode the SQL data.
- // They're the same as above in `wallet.query_row_single`.
- // If there are any values found, they will be returned in a paired array. If not, an
- // empty array will be returned.
- //
- // --> {"jsonrpc": "2.0", "method": "wallet.query_row_multi", "params": [...], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": [["va", "lu"], ["es", "es"], ...], "id": 1}
- pub async fn wallet_query_row_multi(&self, _id: u16, _params: JsonValue) -> JsonResult {
- todo!();
- /* TODO: This will be abstracted away
- // We need at least 3 params for something we want to fetch, and we want them in pairs.
- // Also the first param (the query) should be a String.
- if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- // The remaining pairs should be typed properly too
- let mut types: Vec = vec![];
- let mut names: Vec<&str> = vec![];
- for pair in params[1..].chunks(2) {
- if !pair[0].is_u64() || !pair[1].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let typ = pair[0].as_u64().unwrap();
- if typ >= QueryType::Last as u64 {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- types.push((typ as u8).into());
- names.push(pair[1].as_str().unwrap());
- }
-
- // Get a wallet connection
- let mut conn = match self.wallet.conn.acquire().await {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_multi: Failed to acquire wallet connection: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- // Execute the query and see if we find any rows
- let rows = match sqlx::query(params[0].as_str().unwrap()).fetch_all(&mut conn).await {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_multi: Failed to execute SQL query: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- debug!("[RPC] wallet.query_row_multi: Found {} rows", rows.len());
-
- // Try to decode whatever we've found
- let mut ret: Vec> = vec![];
-
- for row in rows {
- let mut row_ret: Vec = vec![];
- for (typ, col) in types.iter().zip(names.clone()) {
- match typ {
- QueryType::Integer => {
- let value: i32 = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_multi: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- row_ret.push(json!(value));
- }
-
- QueryType::Blob => {
- let value: Vec = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_multi: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- row_ret.push(json!(value));
- }
-
- QueryType::OptionInteger => {
- let value: Option = match row.try_get(col) {
- Ok(v) => Some(v),
- Err(_) => None,
- };
-
- row_ret.push(json!(value));
- }
-
- QueryType::OptionBlob => {
- let value: Option> = match row.try_get(col) {
- Ok(v) => Some(v),
- Err(_) => None,
- };
-
- row_ret.push(json!(value));
- }
-
- QueryType::Text => {
- let value: String = match row.try_get(col) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.query_row_multi: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- row_ret.push(json!(value));
- }
-
- _ => unreachable!(),
- }
- }
-
- ret.push(row_ret);
- }
-
- JsonResponse::new(json!(ret), id).into()
- */
- }
-
- // RPCAPI:
- // Executes an arbitrary SQL query on the wallet, and returns `true` on success.
- // `params[1..]` can optionally be provided in pairs like in `wallet.query_row_single`.
- //
- // --> {"jsonrpc": "2.0", "method": "wallet.exec_sql", "params": ["CREATE TABLE ..."], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": true, "id": 1}
- pub async fn wallet_exec_sql(&self, _id: u16, _params: JsonValue) -> JsonResult {
- todo!();
- /* TODO: This will be abstracted away
- if params.is_empty() || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- if params.len() > 1 && params[1..].len() % 2 != 0 {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let query = params[0].as_str().unwrap();
- debug!("Executing SQL query: {}", query);
- let mut query = sqlx::query(query);
-
- for pair in params[1..].chunks(2) {
- if !pair[0].is_u64() || pair[0].as_u64().unwrap() >= QueryType::Last as u64 {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let typ = (pair[0].as_u64().unwrap() as u8).into();
- match typ {
- QueryType::Integer => {
- let val: i32 = match serde_json::from_value(pair[1].clone()) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.exec_sql: Failed casting value to i32: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- query = query.bind(val);
- }
-
- QueryType::Blob => {
- let val: Vec = match serde_json::from_value(pair[1].clone()) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.exec_sql: Failed casting value to Vec: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- query = query.bind(val);
- }
-
- QueryType::OptionInteger => {
- let val: Option = match serde_json::from_value(pair[1].clone()) {
- Ok(v) => v,
- Err(e) => {
- error!(
- "[RPC] wallet.exec_sql: Failed casting value to Option: {}",
- e
- );
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- query = query.bind(val);
- }
-
- QueryType::OptionBlob => {
- let val: Option> = match serde_json::from_value(pair[1].clone()) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.exec_sql: Failed casting value to Option>: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- query = query.bind(val);
- }
-
- QueryType::Text => {
- let val: String = match serde_json::from_value(pair[1].clone()) {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.exec_sql: Failed casting value to String: {}", e);
- return JsonError::new(ParseError, None, id).into()
- }
- };
-
- query = query.bind(val);
- }
-
- _ => return JsonError::new(InvalidParams, None, id).into(),
- }
- }
-
- // Get a wallet connection
- let mut conn = match self.wallet.conn.acquire().await {
- Ok(v) => v,
- Err(e) => {
- error!("[RPC] wallet.exec_sql: Failed to acquire wallet connection: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- if let Err(e) = query.execute(&mut conn).await {
- error!("[RPC] wallet.exec_sql: Failed to execute sql query: {}", e);
- return JsonError::new(InternalError, None, id).into()
- };
-
- JsonResponse::new(json!(true), id).into()
- */
- }
-}
diff --git a/bin/darkfid2/src/task/miner.rs b/bin/darkfid/src/task/miner.rs
similarity index 98%
rename from bin/darkfid2/src/task/miner.rs
rename to bin/darkfid/src/task/miner.rs
index a2acfbadb..4d30271aa 100644
--- a/bin/darkfid2/src/task/miner.rs
+++ b/bin/darkfid/src/task/miner.rs
@@ -29,7 +29,6 @@ use darkfi::{
zkas::ZkBinary,
Result,
};
-use darkfi_consensus_contract::model::SECRET_KEY_PREFIX;
use darkfi_money_contract::{
client::pow_reward_v1::PoWRewardCallBuilder, MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1,
};
@@ -161,7 +160,8 @@ async fn generate_next_block(
// We are deriving the next secret key for optimization.
// Next secret is the poseidon hash of:
// [prefix, current(previous) secret, signing(block) height].
- let next_secret = poseidon_hash([SECRET_KEY_PREFIX, secret.inner(), height.into()]);
+ let prefix = pallas::Base::from_raw([4, 0, 0, 0]);
+ let next_secret = poseidon_hash([prefix, secret.inner(), height.into()]);
*secret = SecretKey::from(next_secret);
// Generate reward transaction
diff --git a/bin/darkfid2/src/task/mod.rs b/bin/darkfid/src/task/mod.rs
similarity index 100%
rename from bin/darkfid2/src/task/mod.rs
rename to bin/darkfid/src/task/mod.rs
diff --git a/bin/darkfid2/src/task/sync.rs b/bin/darkfid/src/task/sync.rs
similarity index 100%
rename from bin/darkfid2/src/task/sync.rs
rename to bin/darkfid/src/task/sync.rs
diff --git a/bin/darkfid2/src/tests/forks.rs b/bin/darkfid/src/tests/forks.rs
similarity index 100%
rename from bin/darkfid2/src/tests/forks.rs
rename to bin/darkfid/src/tests/forks.rs
diff --git a/bin/darkfid2/src/tests/harness.rs b/bin/darkfid/src/tests/harness.rs
similarity index 95%
rename from bin/darkfid2/src/tests/harness.rs
rename to bin/darkfid/src/tests/harness.rs
index 4ae04cdac..a39d7bc01 100644
--- a/bin/darkfid2/src/tests/harness.rs
+++ b/bin/darkfid/src/tests/harness.rs
@@ -36,7 +36,7 @@ use num_bigint::BigUint;
use url::Url;
use crate::{
- proto::BlockInfoMessage,
+ // proto::BlockInfoMessage,
task::sync::sync_task,
utils::{spawn_consensus_p2p, spawn_sync_p2p},
Darkfid,
@@ -65,16 +65,13 @@ impl Harness {
ex: &Arc>,
) -> Result {
// Use test harness to generate genesis transactions
- let mut th =
- TestHarness::new(&["money".to_string(), "consensus".to_string()], verify_fees).await?;
- let (genesis_stake_tx, _) = th.genesis_stake(&Holder::Alice, config.alice_initial)?;
+ let mut th = TestHarness::new(&["money".to_string()], verify_fees).await?;
let (genesis_mint_tx, _) = th.genesis_mint(&Holder::Bob, config.bob_initial)?;
// Generate default genesis block
let mut genesis_block = BlockInfo::default();
// Append genesis transactions and calculate their total
- genesis_block.txs.push(genesis_stake_tx);
genesis_block.txs.push(genesis_mint_tx);
let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
genesis_block.slots[0].total_tokens = genesis_txs_total;
@@ -171,12 +168,12 @@ impl Harness {
pub async fn add_blocks(&self, blocks: &[BlockInfo]) -> Result<()> {
// We simply broadcast the block using Alice's sync P2P
- for block in blocks {
- self.alice.sync_p2p.broadcast(&BlockInfoMessage::from(block)).await;
+ for _block in blocks {
+ //self.alice.sync_p2p.broadcast(&BlockInfoMessage::from(block)).await;
}
// and then add it to her chain
- self.alice.validator.add_blocks(blocks).await?;
+ //self.alice.validator.add_blocks(blocks).await?;
Ok(())
}
diff --git a/bin/darkfid2/src/tests/mod.rs b/bin/darkfid/src/tests/mod.rs
similarity index 100%
rename from bin/darkfid2/src/tests/mod.rs
rename to bin/darkfid/src/tests/mod.rs
diff --git a/bin/darkfid2/src/utils.rs b/bin/darkfid/src/utils.rs
similarity index 100%
rename from bin/darkfid2/src/utils.rs
rename to bin/darkfid/src/utils.rs
diff --git a/bin/darkfid2/Cargo.toml b/bin/darkfid2/Cargo.toml
deleted file mode 100644
index 9a2d2fac8..000000000
--- a/bin/darkfid2/Cargo.toml
+++ /dev/null
@@ -1,44 +0,0 @@
-[package]
-name = "darkfid2"
-version = "0.4.1"
-homepage = "https://dark.fi"
-description = "DarkFi node daemon"
-authors = ["Dyne.org foundation "]
-repository = "https://github.com/darkrenaissance/darkfi"
-license = "AGPL-3.0-only"
-edition = "2021"
-
-[dependencies]
-# Darkfi
-darkfi = {path = "../../", features = ["async-daemonize", "bs58"]}
-darkfi_consensus_contract = {path = "../../src/contract/consensus"}
-darkfi_money_contract = {path = "../../src/contract/money"}
-darkfi-contract-test-harness = {path = "../../src/contract/test-harness"}
-darkfi-sdk = {path = "../../src/sdk"}
-darkfi-serial = {path = "../../src/serial"}
-
-# Misc
-blake3 = "1.5.0"
-bs58 = "0.5.0"
-log = "0.4.20"
-num-bigint = "0.4.4"
-rand = "0.8.5"
-sled = "0.34.7"
-toml = "0.8.8"
-
-# JSON-RPC
-async-trait = "0.1.77"
-tinyjson = "2.5.1"
-url = "2.5.0"
-
-# Daemon
-easy-parallel = "3.3.1"
-signal-hook-async-std = "0.2.2"
-signal-hook = "0.3.17"
-simplelog = "0.12.1"
-smol = "1.3.0"
-
-# Argument parsing
-serde = {version = "1.0.195", features = ["derive"]}
-structopt = "0.3.26"
-structopt-toml = "0.5.1"
diff --git a/bin/darkfid2/Makefile b/bin/darkfid2/Makefile
deleted file mode 100644
index c5a759666..000000000
--- a/bin/darkfid2/Makefile
+++ /dev/null
@@ -1,42 +0,0 @@
-.POSIX:
-
-# Install prefix
-PREFIX = $(HOME)/.cargo
-
-# Cargo binary
-CARGO = cargo +nightly
-
-# Compile target
-RUST_TARGET = $(shell rustc -Vv | grep '^host: ' | cut -d' ' -f2)
-# Uncomment when doing musl static builds
-#RUSTFLAGS = -C target-feature=+crt-static -C link-self-contained=yes
-
-SRC = \
- Cargo.toml \
- ../../Cargo.toml \
- $(shell find src -type f -name '*.rs') \
- $(shell find ../../src -type f -name '*.rs') \
- $(shell find ../../src/contract -type f -name '*.wasm')
-
-BIN = $(shell grep '^name = ' Cargo.toml | cut -d' ' -f3 | tr -d '"')
-
-all: $(BIN)
-
-$(BIN): $(SRC)
- RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --target=$(RUST_TARGET) --release --package $@
- cp -f ../../target/$(RUST_TARGET)/release/$@ $@
- cp -f ../../target/$(RUST_TARGET)/release/$@ ../../$@
-
-clean:
- RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(RUST_TARGET) --release --package $(BIN)
- rm -f $(BIN) ../../$(BIN)
-
-install: all
- mkdir -p $(DESTDIR)$(PREFIX)/bin
- cp -f $(BIN) $(DESTDIR)$(PREFIX)/bin
- chmod 755 $(DESTDIR)$(PREFIX)/bin/$(BIN)
-
-uninstall:
- rm -f $(DESTDIR)$(PREFIX)/bin/$(BIN)
-
-.PHONY: all clean install uninstall
diff --git a/bin/darkfid2/darkfid_config.toml b/bin/darkfid2/darkfid_config.toml
deleted file mode 100644
index 3d2ffd473..000000000
--- a/bin/darkfid2/darkfid_config.toml
+++ /dev/null
@@ -1,475 +0,0 @@
-## darkfid configuration file
-##
-## Please make sure you go through all the settings so you can configure
-## your daemon properly.
-##
-## The default values are left commented. They can be overridden either by
-## uncommenting, or by using the command-line.
-
-# JSON-RPC listen URL
-rpc_listen = "tcp://127.0.0.1:8340"
-
-# Blockchain network to use
-network = "testnet"
-
-# Localnet blockchain network configuration
-[network_config."localnet"]
-# Path to the blockchain database directory
-database = "~/.local/darkfi/darkfid_blockchain_localnet"
-
-# Finalization threshold, denominated by number of blocks
-threshold = 3
-
-# minerd JSON-RPC endpoint
-minerd_endpoint = "tcp://127.0.0.1:28467"
-
-# PoW block production target, in seconds
-pow_target = 10
-
-# Optional fixed PoW difficulty, used for testing
-pow_fixed_difficulty = 1
-
-# Epoch duration, denominated by number of blocks/slots
-epoch_length = 10
-
-# PoS slot duration, in seconds
-slot_time = 10
-
-# Whitelisted faucet addresses
-faucet_pub = []
-
-# Participate in the consensus protocol
-consensus = true
-
-# Wallet address to receive consensus rewards.
-# This is a dummy one so the miner can start,
-# replace with your own one.
-recipient = "5ZHfYpt4mpJcwBNxfEyxLzeFJUEeoePs5NQ5jVEgHrMf"
-
-# Skip syncing process and start node right away
-skip_sync = true
-
-# Enable PoS testing mode for local testing
-pos_testing_mode = true
-
-## Localnet sync P2P network settings
-[network_config."localnet".sync_net]
-# P2P accept addresses the instance listens on for inbound connections
-inbound = ["tcp+tls://0.0.0.0:8242"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-#external_addrs = []
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-#seeds = []
-
-# Whitelisted network transports for outbound connections
-#allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-#outbound_connections = 8
-
-# Inbound connections slots number, this many active inbound connections
-# will be allowed. (This does not include manual or outbound connections)
-#inbound_connections = 0
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = true
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
-
-## Localnet consensus P2P network settings
-[network_config."localnet".consensus_net]
-# P2P accept addresses the instance listens on for inbound connections
-#inbound = ["tcp+tls://0.0.0.0:8241"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-#external_addrs = []
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-#seeds = []
-
-# Whitelisted network transports for outbound connections
-#allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-#outbound_connections = 8
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = true
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
-
-# Testnet blockchain network configuration
-[network_config."testnet"]
-# Path to the blockchain database directory
-database = "~/.local/darkfi/darkfid_blockchain_testnet"
-
-# Finalization threshold, denominated by number of blocks
-threshold = 6
-
-# minerd JSON-RPC endpoint
-minerd_endpoint = "tcp://127.0.0.1:28467"
-
-# PoW block production target, in seconds
-pow_target = 90
-
-# Epoch duration, denominated by number of blocks/slots
-epoch_length = 10
-
-# PoS slot duration, in seconds
-slot_time = 90
-
-# Whitelisted faucet addresses
-faucet_pub = ["3ce5xa3PjuQGFtTaF7AvMJp7fGxqeGRJx7zj3LCwNCkP"]
-
-# Participate in the consensus protocol
-consensus = false
-
-# Wallet address to receive consensus rewards
-#recipient = "YOUR_WALLET_ADDRESS_HERE"
-
-# Skip syncing process and start node right away
-skip_sync = false
-
-# Enable PoS testing mode for local testing
-pos_testing_mode = false
-
-## Testnet sync P2P network settings
-[network_config."testnet".sync_net]
-# P2P accept addresses the instance listens on for inbound connections
-# You can also use an IPv6 address
-inbound = ["tcp+tls://0.0.0.0:8342"]
-# IPv6 version:
-#inbound = ["tcp+tls://[::]:8342"]
-# Combined:
-#inbound = ["tcp+tls://0.0.0.0:8342", "tcp+tls://[::]:8342"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-# You can also use an IPv6 address
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342"]
-# IPv6 version:
-#external_addrs = ["tcp+tls://[ipv6 address here]:8342"]
-# Combined:
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342", "tcp+tls://[ipv6 address here]:8342"]
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-seeds = ["tcp+tls://lilith0.dark.fi:8342", "tcp+tls://lilith1.dark.fi:8342"]
-
-# Whitelisted network transports for outbound connections
-allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-outbound_connections = 8
-
-# Inbound connections slots number, this many active inbound connections
-# will be allowed. (This does not include manual or outbound connections)
-#inbound_connections = 0
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = false
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
-
-## Testnet consensus P2P network settings
-[network_config."testnet".consensus_net]
-# P2P accept addresses the instance listens on for inbound connections
-# You can also use an IPv6 address
-inbound = ["tcp+tls://0.0.0.0:8341"]
-# IPv6 version:
-#inbound = ["tcp+tls://[::]:8341"]
-# Combined:
-#inbound = ["tcp+tls://0.0.0.0:8341", "tcp+tls://[::]:8341"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-# You can also use an IPv6 address
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341"]
-# IPv6 version:
-#external_addrs = ["tcp+tls://[ipv6 address here]:8341"]
-# Combined:
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341", "tcp+tls://[ipv6 address here]:8341"]
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-seeds = ["tcp+tls://lilith0.dark.fi:8341", "tcp+tls://lilith1.dark.fi:8341"]
-
-# Whitelisted network transports for outbound connections
-allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-#outbound_connections = 8
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = false
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
-
-# Mainnet blockchain network configuration
-[network_config."mainnet"]
-# Path to the blockchain database directory
-database = "~/.local/darkfi/darkfid_blockchain_mainnet"
-
-# Finalization threshold, denominated by number of blocks
-threshold = 11
-
-# minerd JSON-RPC endpoint
-minerd_endpoint = "tcp://127.0.0.1:28467"
-
-# PoW block production target, in seconds
-pow_target = 90
-
-# Epoch duration, denominated by number of blocks/slots
-epoch_length = 10
-
-# PoS slot duration, in seconds
-slot_time = 90
-
-# Whitelisted faucet addresses
-faucet_pub = []
-
-# Participate in the consensus protocol
-consensus = false
-
-# Wallet address to receive consensus rewards
-#recipient = "YOUR_WALLET_ADDRESS_HERE"
-
-# Skip syncing process and start node right away
-skip_sync = false
-
-# Enable PoS testing mode for local testing
-pos_testing_mode = false
-
-## Mainnet sync P2P network settings
-[network_config."mainnet".sync_net]
-# P2P accept addresses the instance listens on for inbound connections
-# You can also use an IPv6 address
-inbound = ["tcp+tls://0.0.0.0:8442"]
-# IPv6 version:
-#inbound = ["tcp+tls://[::]:8442"]
-# Combined:
-#inbound = ["tcp+tls://0.0.0.0:8442", "tcp+tls://[::]:8442"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-# You can also use an IPv6 address
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442"]
-# IPv6 version:
-#external_addrs = ["tcp+tls://[ipv6 address here]:8442"]
-# Combined:
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442", "tcp+tls://[ipv6 address here]:8442"]
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-seeds = ["tcp+tls://lilith0.dark.fi:8442", "tcp+tls://lilith1.dark.fi:8442"]
-
-# Whitelisted network transports for outbound connections
-allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-outbound_connections = 8
-
-# Inbound connections slots number, this many active inbound connections
-# will be allowed. (This does not include manual or outbound connections)
-#inbound_connections = 0
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = false
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
-
-## Mainnet consensus P2P network settings
-[network_config."mainnet".consensus_net]
-# P2P accept addresses the instance listens on for inbound connections
-# You can also use an IPv6 address
-inbound = ["tcp+tls://0.0.0.0:8441"]
-# IPv6 version:
-#inbound = ["tcp+tls://[::]:8441"]
-# Combined:
-#inbound = ["tcp+tls://0.0.0.0:8441", "tcp+tls://[::]:8441"]
-
-# P2P external addresses the instance advertises so other peers can
-# reach us and connect to us, as long as inbound addrs are configured.
-# You can also use an IPv6 address
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441"]
-# IPv6 version:
-#external_addrs = ["tcp+tls://[ipv6 address here]:8441"]
-# Combined:
-#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441", "tcp+tls://[ipv6 address here]:8441"]
-
-# Peer nodes to manually connect to
-#peers = []
-
-# Seed nodes to connect to for peer discovery and/or adversising our
-# own external addresses
-seeds = ["tcp+tls://lilith0.dark.fi:8441", "tcp+tls://lilith1.dark.fi:8441"]
-
-# Whitelisted network transports for outbound connections
-allowed_transports = ["tcp+tls"]
-
-# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
-#transport_mixing = true
-
-# Outbound connection slots number, this many connections will be
-# attempted. (This does not include manual connections)
-#outbound_connections = 8
-
-# Manual connections retry limit, 0 for forever looping
-#manual_attempt_limit = 0
-
-# Outbound connection timeout (in seconds)
-#outbound_connect_timeout = 10
-
-# Exchange versions (handshake) timeout (in seconds)
-#channel_handshake_timeout = 4
-
-# Ping-pong exchange execution interval (in seconds)
-#channel_heartbeat_interval = 10
-
-# Allow localnet hosts
-localnet = false
-
-# Delete a peer from hosts if they've been quarantined N times
-#hosts_quarantine_limit = 50
-
-# Cooling off time for peer discovery when unsuccessful
-#outbound_peer_discovery_cooloff_time = 30
-
-# Time between peer discovery attempts
-#outbound_peer_discovery_attempt_time = 5
diff --git a/bin/darkfid2/src/error.rs b/bin/darkfid2/src/error.rs
deleted file mode 100644
index 7ed774619..000000000
--- a/bin/darkfid2/src/error.rs
+++ /dev/null
@@ -1,69 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi::rpc::jsonrpc::{ErrorCode::ServerError, JsonError, JsonResult};
-
-/// Custom RPC errors available for darkfid.
-/// Please sort them sensefully.
-pub enum RpcError {
- // Transaction-related errors
- TxSimulationFail = -32110,
- TxBroadcastFail = -32111,
-
- // State-related errors,
- NotSynced = -32120,
- UnknownSlot = -32121,
-
- // Parsing errors
- ParseError = -32190,
-
- // Contract-related errors
- ContractZkasDbNotFound = -32200,
-
- // Misc errors
- PingFailed = -32300,
-}
-
-fn to_tuple(e: RpcError) -> (i32, String) {
- let msg = match e {
- // Transaction-related errors
- RpcError::TxSimulationFail => "Failed simulating transaction state change",
- RpcError::TxBroadcastFail => "Failed broadcasting transaction",
- // State-related errors
- RpcError::NotSynced => "Blockchain is not synced",
- RpcError::UnknownSlot => "Did not find slot",
- // Parsing errors
- RpcError::ParseError => "Parse error",
- // Contract-related errors
- RpcError::ContractZkasDbNotFound => "zkas database not found for given contract",
- // Misc errors
- RpcError::PingFailed => "Miner daemon ping error",
- };
-
- (e as i32, msg.to_string())
-}
-
-pub fn server_error(e: RpcError, id: u16, msg: Option<&str>) -> JsonResult {
- let (code, default_msg) = to_tuple(e);
-
- if let Some(message) = msg {
- return JsonError::new(ServerError(code), Some(message.to_string()), id).into()
- }
-
- JsonError::new(ServerError(code), Some(default_msg), id).into()
-}
diff --git a/bin/darkfid2/src/main.rs b/bin/darkfid2/src/main.rs
deleted file mode 100644
index 7bfa83f75..000000000
--- a/bin/darkfid2/src/main.rs
+++ /dev/null
@@ -1,419 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::{
- collections::{HashMap, HashSet},
- str::FromStr,
- sync::Arc,
-};
-
-use log::{error, info};
-use smol::{lock::Mutex, stream::StreamExt};
-use structopt_toml::{serde::Deserialize, structopt::StructOpt, StructOptToml};
-use url::Url;
-
-use darkfi::{
- async_daemonize,
- blockchain::BlockInfo,
- cli_desc,
- net::{settings::SettingsOpt, P2pPtr},
- rpc::{
- client::RpcClient,
- jsonrpc::JsonSubscriber,
- server::{listen_and_serve, RequestHandler},
- },
- system::{StoppableTask, StoppableTaskPtr},
- util::{path::expand_path, time::TimeKeeper},
- validator::{utils::genesis_txs_total, Validator, ValidatorConfig, ValidatorPtr},
- Error, Result,
-};
-use darkfi_sdk::crypto::PublicKey;
-use darkfi_serial::deserialize_async;
-
-#[cfg(test)]
-mod tests;
-
-mod error;
-use error::{server_error, RpcError};
-
-/// JSON-RPC requests handler and methods
-mod rpc;
-mod rpc_blockchain;
-mod rpc_tx;
-
-/// Validator async tasks
-mod task;
-use task::{miner_task, sync_task};
-
-/// P2P net protocols
-mod proto;
-
-/// Utility functions
-mod utils;
-use utils::{parse_blockchain_config, spawn_consensus_p2p, spawn_sync_p2p};
-
-const CONFIG_FILE: &str = "darkfid_config.toml";
-const CONFIG_FILE_CONTENTS: &str = include_str!("../darkfid_config.toml");
-/// Note:
-/// If you change these don't forget to remove their corresponding database folder,
-/// since if it already has a genesis block, provided one is ignored.
-const GENESIS_BLOCK_LOCALNET: &str = include_str!("../genesis_block_localnet");
-const GENESIS_BLOCK_TESTNET: &str = include_str!("../genesis_block_testnet");
-const GENESIS_BLOCK_MAINNET: &str = include_str!("../genesis_block_mainnet");
-
-#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)]
-#[serde(default)]
-#[structopt(name = "darkfid", about = cli_desc!())]
-struct Args {
- #[structopt(short, long)]
- /// Configuration file to use
- config: Option,
-
- #[structopt(short, long, default_value = "tcp://127.0.0.1:8340")]
- /// JSON-RPC listen URL
- rpc_listen: Url,
-
- #[structopt(short, long, default_value = "testnet")]
- /// Blockchain network to use
- network: String,
-
- #[structopt(short, long)]
- /// Set log file to ouput into
- log: Option,
-
- #[structopt(short, parse(from_occurrences))]
- /// Increase verbosity (-vvv supported)
- verbose: u8,
-}
-
-/// Defines a blockchain network configuration.
-/// Default values correspond to a local network.
-#[derive(Clone, Debug, serde::Deserialize, structopt::StructOpt, structopt_toml::StructOptToml)]
-#[structopt()]
-pub struct BlockchainNetwork {
- #[structopt(long, default_value = "~/.local/darkfi/darkfid_blockchain_localnet")]
- /// Path to blockchain database
- pub database: String,
-
- #[structopt(long, default_value = "3")]
- /// Finalization threshold, denominated by number of blocks
- pub threshold: usize,
-
- #[structopt(long, default_value = "tcp://127.0.0.1:28467")]
- /// minerd JSON-RPC endpoint
- pub minerd_endpoint: Url,
-
- #[structopt(long, default_value = "10")]
- /// PoW block production target, in seconds
- pub pow_target: usize,
-
- #[structopt(long)]
- /// Optional fixed PoW difficulty, used for testing
- pub pow_fixed_difficulty: Option,
-
- #[structopt(long, default_value = "10")]
- /// Epoch duration, denominated by number of blocks/slots
- pub epoch_length: u64,
-
- #[structopt(long, default_value = "10")]
- /// PoS slot duration, in seconds
- pub slot_time: u64,
-
- #[structopt(long)]
- /// Whitelisted faucet public key (repeatable flag)
- pub faucet_pub: Vec,
-
- #[structopt(long)]
- /// Participate in the consensus protocol
- pub consensus: bool,
-
- #[structopt(long)]
- /// Wallet address to receive consensus rewards
- pub recipient: Option,
-
- #[structopt(long)]
- /// Skip syncing process and start node right away
- pub skip_sync: bool,
-
- #[structopt(long)]
- /// Enable PoS testing mode for local testing
- pub pos_testing_mode: bool,
-
- /// Syncing network settings
- #[structopt(flatten)]
- pub sync_net: SettingsOpt,
-
- /// Consensus network settings
- #[structopt(flatten)]
- pub consensus_net: SettingsOpt,
-}
-
-/// Daemon structure
-pub struct Darkfid {
- /// Syncing P2P network pointer
- sync_p2p: P2pPtr,
- /// Optional consensus P2P network pointer
- consensus_p2p: Option,
- /// Validator(node) pointer
- validator: ValidatorPtr,
- /// A map of various subscribers exporting live info from the blockchain
- subscribers: HashMap<&'static str, JsonSubscriber>,
- /// JSON-RPC connection tracker
- rpc_connections: Mutex>,
- /// JSON-RPC client to execute requests to the miner daemon
- rpc_client: Option,
-}
-
-impl Darkfid {
- pub async fn new(
- sync_p2p: P2pPtr,
- consensus_p2p: Option,
- validator: ValidatorPtr,
- subscribers: HashMap<&'static str, JsonSubscriber>,
- rpc_client: Option,
- ) -> Self {
- Self {
- sync_p2p,
- consensus_p2p,
- validator,
- subscribers,
- rpc_connections: Mutex::new(HashSet::new()),
- rpc_client,
- }
- }
-}
-
-async_daemonize!(realmain);
-async fn realmain(args: Args, ex: Arc>) -> Result<()> {
- info!(target: "darkfid", "Initializing DarkFi node...");
-
- // Grab blockchain network configuration
- let (blockchain_config, genesis_block) = match args.network.as_str() {
- "localnet" => {
- (parse_blockchain_config(args.config, "localnet").await?, GENESIS_BLOCK_LOCALNET)
- }
- "testnet" => {
- (parse_blockchain_config(args.config, "testnet").await?, GENESIS_BLOCK_TESTNET)
- }
- "mainnet" => {
- (parse_blockchain_config(args.config, "mainnet").await?, GENESIS_BLOCK_MAINNET)
- }
- _ => {
- error!("Unsupported chain `{}`", args.network);
- return Err(Error::UnsupportedChain)
- }
- };
-
- if blockchain_config.pos_testing_mode {
- info!(target: "darkfid", "Node is configured to run in PoS testing mode!");
- }
-
- // Parse the genesis block
- let bytes = bs58::decode(&genesis_block.trim()).into_vec()?;
- let genesis_block: BlockInfo = deserialize_async(&bytes).await?;
-
- // Initialize or open sled database
- let db_path = expand_path(&blockchain_config.database)?;
- let sled_db = sled::open(&db_path)?;
-
- // Initialize validator configuration
- let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
-
- let time_keeper = TimeKeeper::new(
- genesis_block.header.timestamp,
- blockchain_config.epoch_length,
- blockchain_config.slot_time,
- 0,
- );
-
- let pow_fixed_difficulty = if let Some(diff) = blockchain_config.pow_fixed_difficulty {
- info!(target: "darkfid", "Node is configured to run with fixed PoW difficulty: {}", diff);
- Some(diff.into())
- } else {
- None
- };
-
- let config = ValidatorConfig::new(
- time_keeper,
- blockchain_config.threshold,
- blockchain_config.pow_target,
- pow_fixed_difficulty,
- genesis_block,
- genesis_txs_total,
- vec![],
- blockchain_config.pos_testing_mode,
- false, // TODO: Make configurable
- );
-
- // Initialize validator
- let validator = Validator::new(&sled_db, config).await?;
-
- // Here we initialize various subscribers that can export live blockchain/consensus data.
- let mut subscribers = HashMap::new();
- subscribers.insert("blocks", JsonSubscriber::new("blockchain.subscribe_blocks"));
- subscribers.insert("txs", JsonSubscriber::new("blockchain.subscribe_txs"));
- if blockchain_config.consensus {
- subscribers.insert("proposals", JsonSubscriber::new("blockchain.subscribe_proposals"));
- }
-
- // Initialize syncing P2P network
- let sync_p2p =
- spawn_sync_p2p(&blockchain_config.sync_net.into(), &validator, &subscribers, ex.clone())
- .await;
-
- // Initialize consensus P2P network
- let (consensus_p2p, rpc_client) = if blockchain_config.consensus {
- let Ok(rpc_client) = RpcClient::new(blockchain_config.minerd_endpoint, ex.clone()).await
- else {
- error!(target: "darkfid", "Failed to initialize miner daemon rpc client, check if minerd is running");
- return Err(Error::RpcClientStopped)
- };
- (
- Some(
- spawn_consensus_p2p(
- &blockchain_config.consensus_net.into(),
- &validator,
- &subscribers,
- ex.clone(),
- )
- .await,
- ),
- Some(rpc_client),
- )
- } else {
- (None, None)
- };
-
- // Initialize node
- let darkfid = Darkfid::new(
- sync_p2p.clone(),
- consensus_p2p.clone(),
- validator.clone(),
- subscribers,
- rpc_client,
- )
- .await;
- let darkfid = Arc::new(darkfid);
- info!(target: "darkfid", "Node initialized successfully!");
-
- // Pinging minerd daemon to verify it listens
- if blockchain_config.consensus {
- if let Err(e) = darkfid.ping_miner_daemon().await {
- error!(target: "darkfid", "Failed to ping miner daemon: {}", e);
- return Err(Error::RpcClientStopped)
- }
- }
-
- // JSON-RPC server
- info!(target: "darkfid", "Starting JSON-RPC server");
- // Here we create a task variable so we can manually close the
- // task later. P2P tasks don't need this since it has its own
- // stop() function to shut down, also terminating the task we
- // created for it.
- let rpc_task = StoppableTask::new();
- let darkfid_ = darkfid.clone();
- rpc_task.clone().start(
- listen_and_serve(args.rpc_listen, darkfid.clone(), None, ex.clone()),
- |res| async move {
- match res {
- Ok(()) | Err(Error::RpcServerStopped) => darkfid_.stop_connections().await,
- Err(e) => error!(target: "darkfid", "Failed starting sync JSON-RPC server: {}", e),
- }
- },
- Error::RpcServerStopped,
- ex.clone(),
- );
-
- info!(target: "darkfid", "Starting sync P2P network");
- sync_p2p.clone().start().await?;
-
- // Consensus protocol
- if blockchain_config.consensus {
- info!(target: "darkfid", "Starting consensus P2P network");
- let consensus_p2p = consensus_p2p.clone().unwrap();
- consensus_p2p.clone().start().await?;
- } else {
- info!(target: "darkfid", "Not starting consensus P2P network");
- }
-
- // Sync blockchain
- if !blockchain_config.skip_sync {
- sync_task(&darkfid).await?;
- } else {
- *darkfid.validator.synced.write().await = true;
- }
-
- // Clean node pending transactions
- darkfid.validator.purge_pending_txs().await?;
-
- // Consensus protocol
- let consensus_task = if blockchain_config.consensus {
- info!(target: "darkfid", "Starting consensus protocol task");
- // Grab rewards recipient public key(address)
- if blockchain_config.recipient.is_none() {
- return Err(Error::ParseFailed("Recipient address missing"))
- }
- let recipient = match PublicKey::from_str(&blockchain_config.recipient.unwrap()) {
- Ok(address) => address,
- Err(_) => return Err(Error::InvalidAddress),
- };
-
- let task = StoppableTask::new();
- task.clone().start(
- // Weird hack to prevent lifetimes hell
- async move { miner_task(&darkfid, &recipient).await },
- |res| async {
- match res {
- Ok(()) | Err(Error::MinerTaskStopped) => { /* Do nothing */ }
- Err(e) => error!(target: "darkfid", "Failed starting miner task: {}", e),
- }
- },
- Error::MinerTaskStopped,
- ex.clone(),
- );
- Some(task)
- } else {
- info!(target: "darkfid", "Not participating in consensus");
- None
- };
-
- // Signal handling for graceful termination.
- let (signals_handler, signals_task) = SignalHandler::new(ex)?;
- signals_handler.wait_termination(signals_task).await?;
- info!(target: "darkfid", "Caught termination signal, cleaning up and exiting...");
-
- info!(target: "darkfid", "Stopping JSON-RPC server...");
- rpc_task.stop().await;
-
- info!(target: "darkfid", "Stopping syncing P2P network...");
- sync_p2p.stop().await;
-
- if blockchain_config.consensus {
- info!(target: "darkfid", "Stopping consensus P2P network...");
- consensus_p2p.unwrap().stop().await;
-
- info!(target: "darkfid", "Stopping consensus task...");
- consensus_task.unwrap().stop().await;
- }
-
- info!(target: "darkfid", "Flushing sled database...");
- let flushed_bytes = sled_db.flush_async().await?;
- info!(target: "darkfid", "Flushed {} bytes", flushed_bytes);
-
- Ok(())
-}
diff --git a/bin/darkfid2/src/rpc_blockchain.rs b/bin/darkfid2/src/rpc_blockchain.rs
deleted file mode 100644
index b812f2d47..000000000
--- a/bin/darkfid2/src/rpc_blockchain.rs
+++ /dev/null
@@ -1,292 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::{collections::HashMap, str::FromStr};
-
-use darkfi_sdk::crypto::ContractId;
-use darkfi_serial::{deserialize_async, serialize_async};
-use log::{debug, error};
-use tinyjson::JsonValue;
-
-use darkfi::{
- blockchain::contract_store::SMART_CONTRACT_ZKAS_DB_NAME,
- rpc::jsonrpc::{
- ErrorCode::{InternalError, InvalidParams, ParseError},
- JsonError, JsonResponse, JsonResult,
- },
- util::encoding::base64,
-};
-
-use crate::{server_error, Darkfid, RpcError};
-
-impl Darkfid {
- // RPCAPI:
- // Queries the blockchain database for a block in the given slot.
- // Returns a readable block upon success.
- //
- // **Params:**
- // * `array[0]`: `u64` slot ID (as string)
- //
- // **Returns:**
- // * [`BlockInfo`](https://darkrenaissance.github.io/darkfi/development/darkfi/consensus/block/struct.BlockInfo.html)
- // struct serialized into base64.
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.get_slot", "params": ["0"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": {...}, "id": 1}
- pub async fn blockchain_get_slot(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let slot = match params[0].get::().unwrap().parse::() {
- Ok(v) => v,
- Err(_) => return JsonError::new(ParseError, None, id).into(),
- };
-
- let blocks = match self.validator.blockchain.get_blocks_by_slot(&[slot]) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::blockchain_get_slot", "Failed fetching block by slot: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- if blocks.is_empty() {
- return server_error(RpcError::UnknownSlot, id, None)
- }
-
- let block = base64::encode(&serialize_async(&blocks[0]).await);
- JsonResponse::new(JsonValue::String(block), id).into()
- }
-
- // RPCAPI:
- // Queries the blockchain database for a given transaction.
- // Returns a serialized `Transaction` object.
- //
- // **Params:**
- // * `array[0]`: Hex-encoded transaction hash string
- //
- // **Returns:**
- // * Serialized [`Transaction`](https://darkrenaissance.github.io/darkfi/development/darkfi/tx/struct.Transaction.html)
- // object encoded with base64
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.get_tx", "params": ["TxHash"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
- pub async fn blockchain_get_tx(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let tx_hash = params[0].get::().unwrap();
- let tx_hash = match blake3::Hash::from_hex(tx_hash) {
- Ok(v) => v,
- Err(_) => return JsonError::new(ParseError, None, id).into(),
- };
-
- let txs = match self.validator.blockchain.transactions.get(&[tx_hash], true) {
- Ok(txs) => txs,
- Err(e) => {
- error!(target: "darkfid::rpc::blockchain_get_tx", "Failed fetching tx by hash: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
- // This would be an logic error somewhere
- assert_eq!(txs.len(), 1);
- // and strict was used during .get()
- let tx = txs[0].as_ref().unwrap();
-
- let tx_enc = base64::encode(&serialize_async(tx).await);
- JsonResponse::new(JsonValue::String(tx_enc), id).into()
- }
-
- // RPCAPI:
- // Queries the blockchain database to find the last known slot
- //
- // **Params:**
- // * `None`
- //
- // **Returns:**
- // * `u64` ID of the last known slot, as string
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.last_known_slot", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "1234", "id": 1}
- pub async fn blockchain_last_known_slot(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let blockchain = self.validator.blockchain.clone();
- let Ok(last_slot) = blockchain.last() else {
- return JsonError::new(InternalError, None, id).into()
- };
-
- JsonResponse::new(JsonValue::Number(last_slot.0 as f64), id).into()
- }
-
- // RPCAPI:
- // Initializes a subscription to new incoming blocks.
- // Once a subscription is established, `darkfid` will send JSON-RPC notifications of
- // new incoming blocks to the subscriber.
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_blocks", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_blocks", "params": [`blockinfo`]}
- pub async fn blockchain_subscribe_blocks(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- self.subscribers.get("blocks").unwrap().clone().into()
- }
-
- // RPCAPI:
- // Initializes a subscription to new incoming transactions.
- // Once a subscription is established, `darkfid` will send JSON-RPC notifications of
- // new incoming transactions to the subscriber.
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [`tx_hash`]}
- pub async fn blockchain_subscribe_txs(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- self.subscribers.get("txs").unwrap().clone().into()
- }
-
- // RPCAPI:
- // Initializes a subscription to new incoming proposals, asuming node participates
- // in consensus. Once a subscription is established, `darkfid` will send JSON-RPC
- // notifications of new incoming proposals to the subscriber.
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [`blockinfo`]}
- pub async fn blockchain_subscribe_proposals(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- // Since proposals subscriber is only active if we participate to consensus,
- // we have to check if it actually exists in the subscribers map.
- let proposals_subscriber = self.subscribers.get("proposals");
- if proposals_subscriber.is_none() {
- error!(target: "darkfid::rpc::blockchain_subscribe_proposals", "Proposals subscriber not found");
- return JsonError::new(InternalError, None, id).into()
- }
-
- proposals_subscriber.unwrap().clone().into()
- }
-
- // RPCAPI:
- // Performs a lookup of zkas bincodes for a given contract ID and returns all of
- // them, including their namespace.
- //
- // **Params:**
- // * `array[0]`: base58-encoded contract ID string
- //
- // **Returns:**
- // * `array[n]`: Pairs of: `zkas_namespace` string, serialized
- // [`ZkBinary`](https://darkrenaissance.github.io/darkfi/development/darkfi/zkas/decoder/struct.ZkBinary.html)
- // object
- //
- // --> {"jsonrpc": "2.0", "method": "blockchain.lookup_zkas", "params": ["6Ef42L1KLZXBoxBuCDto7coi9DA2D2SRtegNqNU4sd74"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": [["Foo", "ABCD..."], ["Bar", "EFGH..."]], "id": 1}
- pub async fn blockchain_lookup_zkas(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- let contract_id = params[0].get::().unwrap();
- let contract_id = match ContractId::from_str(contract_id) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Error decoding string to ContractId: {}", e);
- return JsonError::new(InvalidParams, None, id).into()
- }
- };
-
- let blockchain = self.validator.blockchain.clone();
-
- let Ok(zkas_db) = blockchain.contracts.lookup(
- &blockchain.sled_db,
- &contract_id,
- SMART_CONTRACT_ZKAS_DB_NAME,
- ) else {
- error!(
- target: "darkfid::rpc::blockchain_lookup_zkas", "Did not find zkas db for ContractId: {}",
- contract_id
- );
- return server_error(RpcError::ContractZkasDbNotFound, id, None)
- };
-
- let mut ret = vec![];
-
- for i in zkas_db.iter() {
- debug!(target: "darkfid::rpc::blockchain_lookup_zkas", "Iterating over zkas db");
- let Ok((zkas_ns, zkas_bytes)) = i else {
- error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Internal sled error iterating db");
- return JsonError::new(InternalError, None, id).into()
- };
-
- let Ok(zkas_ns) = deserialize_async(&zkas_ns).await else {
- return JsonError::new(InternalError, None, id).into()
- };
-
- let zkas_bincode = base64::encode(&zkas_bytes);
- ret.push(JsonValue::Array(vec![
- JsonValue::String(zkas_ns),
- JsonValue::String(zkas_bincode),
- ]));
- }
-
- JsonResponse::new(JsonValue::Array(ret), id).into()
- }
-
- // RPCAPI:
- // Returns the `chain_id` used for merge mining. A 32-byte hash of the genesis block.
- //
- // --> {"jsonrpc": "2.0", "method": "merge_mining_get_chain_id", "params": [], "id": 0}
- // <-- {"jsonrpc": "2.0", "result": {"chain_id": 02f8...7863"}, "id": 0}
- pub async fn merge_mining_get_chain_id(&self, id: u16, _params: JsonValue) -> JsonResult {
- let chain_id = match self.validator.blockchain.genesis() {
- Ok((_, v)) => v,
- Err(e) => {
- error!(
- target: "darkfid::rpc::merge_mining_get_chain_id",
- "[RPC] Error looking up genesis block: {}", e,
- );
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- JsonResponse::new(
- JsonValue::Object(HashMap::from([(
- "chain_id".to_string(),
- chain_id.to_hex().to_string().into(),
- )])),
- id,
- )
- .into()
- }
-}
diff --git a/bin/darkfid2/src/rpc_tx.rs b/bin/darkfid2/src/rpc_tx.rs
deleted file mode 100644
index e551ed8db..000000000
--- a/bin/darkfid2/src/rpc_tx.rs
+++ /dev/null
@@ -1,220 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_serial::deserialize_async;
-use log::error;
-use tinyjson::JsonValue;
-
-use darkfi::{
- rpc::jsonrpc::{
- ErrorCode::{InternalError, InvalidParams},
- JsonError, JsonResponse, JsonResult,
- },
- tx::Transaction,
- util::encoding::base64,
-};
-
-use super::Darkfid;
-use crate::{server_error, RpcError};
-
-impl Darkfid {
- // RPCAPI:
- // Simulate a network state transition with the given transaction.
- // Returns `true` if the transaction is valid, otherwise, a corresponding
- // error.
- //
- // --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base64encodedTX"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": true, "id": 1}
- pub async fn tx_simulate(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- if !*self.validator.synced.read().await {
- error!(target: "darkfid::rpc::tx_simulate", "Blockchain is not synced");
- return server_error(RpcError::NotSynced, id, None)
- }
-
- // Try to deserialize the transaction
- let tx_enc = params[0].get::().unwrap().trim();
- let tx_bytes = match base64::decode(tx_enc) {
- Some(v) => v,
- None => {
- error!(target: "darkfid::rpc::tx_simulate", "Failed decoding base64 transaction");
- return server_error(RpcError::ParseError, id, None)
- }
- };
-
- let tx: Transaction = match deserialize_async(&tx_bytes).await {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::tx_simulate", "Failed deserializing bytes into Transaction: {}", e);
- return server_error(RpcError::ParseError, id, None)
- }
- };
-
- // Simulate state transition
- let current_slot = self.validator.consensus.time_keeper.current_slot();
- let result = self.validator.add_transactions(&[tx], current_slot, false).await;
- if result.is_err() {
- error!(
- target: "darkfid::rpc::tx_simulate", "Failed to validate state transition: {}",
- result.err().unwrap()
- );
- return server_error(RpcError::TxSimulationFail, id, None)
- };
-
- JsonResponse::new(JsonValue::Boolean(true), id).into()
- }
-
- // RPCAPI:
- // Broadcast a given transaction to the P2P network.
- // The function will first simulate the state transition in order to see
- // if the transaction is actually valid, and in turn it will return an
- // error if this is the case. Otherwise, a transaction ID will be returned.
- //
- // --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base64encodedTX"], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "txID...", "id": 1}
- pub async fn tx_broadcast(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if params.len() != 1 || !params[0].is_string() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- if !*self.validator.synced.read().await {
- error!(target: "darkfid::rpc::tx_broadcast", "Blockchain is not synced");
- return server_error(RpcError::NotSynced, id, None)
- }
-
- // Try to deserialize the transaction
- let tx_enc = params[0].get::().unwrap().trim();
- let tx_bytes = match base64::decode(tx_enc) {
- Some(v) => v,
- None => {
- error!(target: "darkfid::rpc::tx_broadcast", "Failed decoding base64 transaction");
- return server_error(RpcError::ParseError, id, None)
- }
- };
-
- let tx: Transaction = match deserialize_async(&tx_bytes).await {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::tx_broadcast", "Failed deserializing bytes into Transaction: {}", e);
- return server_error(RpcError::ParseError, id, None)
- }
- };
-
- if self.consensus_p2p.is_some() {
- // Consensus participants can directly perform
- // the state transition check and append to their
- // pending transactions store.
- if self.validator.append_tx(&tx).await.is_err() {
- error!(target: "darkfid::rpc::tx_broadcast", "Failed to append transaction to mempool");
- return server_error(RpcError::TxSimulationFail, id, None)
- }
- } else {
- // We'll perform the state transition check here.
- let current_slot = self.validator.consensus.time_keeper.current_slot();
- let result = self.validator.add_transactions(&[tx.clone()], current_slot, false).await;
- if result.is_err() {
- error!(
- target: "darkfid::rpc::tx_broadcast", "Failed to validate state transition: {}",
- result.err().unwrap()
- );
- return server_error(RpcError::TxSimulationFail, id, None)
- };
- }
-
- self.sync_p2p.broadcast(&tx).await;
- if self.sync_p2p.channels().await.is_empty() {
- error!(target: "darkfid::rpc::tx_broadcast", "Failed broadcasting tx, no connected channels");
- return server_error(RpcError::TxBroadcastFail, id, None)
- }
-
- let tx_hash = tx.hash().unwrap().to_string();
- JsonResponse::new(JsonValue::String(tx_hash), id).into()
- }
-
- // RPCAPI:
- // Queries the node pending transactions store to retrieve all transactions.
- // Returns a vector of hex-encoded transaction hashes.
- //
- // --> {"jsonrpc": "2.0", "method": "tx.pending", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
- pub async fn tx_pending(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- if !*self.validator.synced.read().await {
- error!(target: "darkfid::rpc::tx_pending", "Blockchain is not synced");
- return server_error(RpcError::NotSynced, id, None)
- }
-
- let pending_txs = match self.validator.blockchain.get_pending_txs() {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::tx_pending", "Failed fetching pending txs: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- let pending_txs: Vec =
- pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
-
- JsonResponse::new(JsonValue::Array(pending_txs), id).into()
- }
-
- // RPCAPI:
- // Queries the node pending transactions store to remove all transactions.
- // Returns a vector of hex-encoded transaction hashes.
- //
- // --> {"jsonrpc": "2.0", "method": "tx.clean_pending", "params": [], "id": 1}
- // <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
- pub async fn tx_clean_pending(&self, id: u16, params: JsonValue) -> JsonResult {
- let params = params.get::>().unwrap();
- if !params.is_empty() {
- return JsonError::new(InvalidParams, None, id).into()
- }
-
- if !*self.validator.synced.read().await {
- error!(target: "darkfid::rpc::tx_clean_pending", "Blockchain is not synced");
- return server_error(RpcError::NotSynced, id, None)
- }
-
- let pending_txs = match self.validator.blockchain.get_pending_txs() {
- Ok(v) => v,
- Err(e) => {
- error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
- return JsonError::new(InternalError, None, id).into()
- }
- };
-
- if let Err(e) = self.validator.blockchain.remove_pending_txs(&pending_txs) {
- error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
- return JsonError::new(InternalError, None, id).into()
- };
-
- let pending_txs: Vec =
- pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
-
- JsonResponse::new(JsonValue::Array(pending_txs), id).into()
- }
-}
diff --git a/contrib/docker/riscv.Dockerfile b/contrib/docker/riscv.Dockerfile
index b8a5e3ad2..e0c019302 100644
--- a/contrib/docker/riscv.Dockerfile
+++ b/contrib/docker/riscv.Dockerfile
@@ -67,7 +67,6 @@ ENV TARGET_PRFX="--target=" RUST_TARGET="${RISCV_TARGET}"
RUN make ${BINS} && mkdir compiled-bins && \
(if [ -e zkas ]; then cp -a zkas compiled-bins/; fi;) && \
(if [ -e darkfid ]; then cp -a darkfid compiled-bins/; fi;) && \
- (if [ -e darkfid2 ]; then cp -a darkfid2 compiled-bins/; fi;) && \
(if [ -e faucetd ]; then cp -a faucetd compiled-bins/; fi;) && \
(if [ -e darkirc ]; then cp -a darkirc compiled-bins/; fi;) && \
(if [ -e "genev-cli" ]; then cp -a genev-cli compiled-bins/; fi;) && \
diff --git a/contrib/docker/static.Dockerfile b/contrib/docker/static.Dockerfile
index d518bb598..df2c5ad59 100644
--- a/contrib/docker/static.Dockerfile
+++ b/contrib/docker/static.Dockerfile
@@ -48,7 +48,6 @@ RUN sed -e 's,^#RUSTFLAGS ,RUSTFLAGS ,' -i Makefile
RUN make clean && make ${BINS} && mkdir compiled-bins && \
(if [ -e zkas ]; then cp -a zkas compiled-bins/; fi;) && \
(if [ -e darkfid ]; then cp -a darkfid compiled-bins/; fi;) && \
- (if [ -e darkfid2 ]; then cp -a darkfid2 compiled-bins/; fi;) && \
(if [ -e faucetd ]; then cp -a faucetd compiled-bins/; fi;) && \
(if [ -e darkirc ]; then cp -a darkirc compiled-bins/; fi;) && \
(if [ -e "genev-cli" ]; then cp -a genev-cli compiled-bins/; fi;) && \
diff --git a/src/consensus/block.rs b/src/consensus/block.rs
deleted file mode 100644
index 2e8840faa..000000000
--- a/src/consensus/block.rs
+++ /dev/null
@@ -1,253 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::fmt;
-
-use darkfi_sdk::{
- crypto::{MerkleNode, MerkleTree},
- pasta::pallas,
-};
-use darkfi_serial::{async_trait, serialize, SerialDecodable, SerialEncodable};
-
-use super::{
- constants::{BLOCK_MAGIC_BYTES, BLOCK_VERSION},
- LeadInfo,
-};
-use crate::{impl_p2p_message, net::Message, tx::Transaction, util::time::Timestamp};
-
-/// This struct represents a tuple of the form (version, previous, epoch, slot, timestamp, merkle_root).
-#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
-pub struct Header {
- /// Block version
- pub version: u8,
- /// Previous block hash
- pub previous: blake3::Hash,
- /// Epoch
- pub epoch: u64,
- /// Slot UID
- pub slot: u64,
- /// Block creation timestamp
- pub timestamp: Timestamp,
- /// Root of the transaction hashes merkle tree
- pub root: MerkleNode,
-}
-
-impl Header {
- pub fn new(
- previous: blake3::Hash,
- epoch: u64,
- slot: u64,
- timestamp: Timestamp,
- root: MerkleNode,
- ) -> Self {
- let version = BLOCK_VERSION;
- Self { version, previous, epoch, slot, timestamp, root }
- }
-
- /// Generate the genesis block.
- pub fn genesis_header(genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Self {
- let tree = MerkleTree::new(100);
- let root = tree.root(0).unwrap();
-
- Self::new(genesis_data, 0, 0, genesis_ts, root)
- }
-
- /// Calculate the header hash
- pub fn headerhash(&self) -> blake3::Hash {
- blake3::hash(&serialize(self))
- }
-}
-
-impl Default for Header {
- fn default() -> Self {
- Header::new(
- blake3::hash(b""),
- 0,
- 0,
- Timestamp::current_time(),
- MerkleNode::from(pallas::Base::zero()),
- )
- }
-}
-
-/// This struct represents a tuple of the form (`magic`, `header`, `counter`, `txs`, `lead_info`).
-/// The header and transactions are stored as hashes, serving as pointers to
-/// the actual data in the sled database.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct Block {
- /// Block magic bytes
- pub magic: [u8; 4],
- /// Block header
- pub header: blake3::Hash,
- /// Trasaction hashes
- pub txs: Vec,
- /// Lead Info
- pub lead_info: LeadInfo,
-}
-
-impl_p2p_message!(Block, "block");
-
-impl Block {
- pub fn new(
- previous: blake3::Hash,
- epoch: u64,
- slot: u64,
- txs: Vec,
- root: MerkleNode,
- lead_info: LeadInfo,
- ) -> Self {
- let magic = BLOCK_MAGIC_BYTES;
- let timestamp = Timestamp::current_time();
- let header = Header::new(previous, epoch, slot, timestamp, root);
- let header = header.headerhash();
- Self { magic, header, txs, lead_info }
- }
-
- /// Generate the genesis block.
- pub fn genesis_block(genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Self {
- let magic = BLOCK_MAGIC_BYTES;
- let header = Header::genesis_header(genesis_ts, genesis_data);
- let header = header.headerhash();
- let lead_info = LeadInfo::default();
- Self { magic, header, txs: vec![], lead_info }
- }
-
- /// Calculate the block hash
- pub fn blockhash(&self) -> blake3::Hash {
- blake3::hash(&serialize(self))
- }
-}
-
-/// Auxiliary structure used for blockchain syncing.
-#[derive(Debug, SerialEncodable, SerialDecodable)]
-pub struct BlockOrder {
- /// Slot UID
- pub slot: u64,
- /// Block headerhash of that slot
- pub block: blake3::Hash,
-}
-
-impl_p2p_message!(BlockOrder, "blockorder");
-
-/// Structure representing full block data.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct BlockInfo {
- /// BlockInfo magic bytes
- pub magic: [u8; 4],
- /// Block header data
- pub header: Header,
- /// Transactions payload
- pub txs: Vec,
- /// Lead Info,
- pub lead_info: LeadInfo,
-}
-
-impl Default for BlockInfo {
- fn default() -> Self {
- let magic = BLOCK_MAGIC_BYTES;
- Self { magic, header: Header::default(), txs: vec![], lead_info: LeadInfo::default() }
- }
-}
-
-impl_p2p_message!(BlockInfo, "blockinfo");
-
-impl BlockInfo {
- pub fn new(header: Header, txs: Vec, lead_info: LeadInfo) -> Self {
- let magic = BLOCK_MAGIC_BYTES;
- Self { magic, header, txs, lead_info }
- }
-
- /// Calculate the block hash
- pub fn blockhash(&self) -> blake3::Hash {
- let block: Block = self.clone().into();
- block.blockhash()
- }
-}
-
-impl From for Block {
- fn from(block_info: BlockInfo) -> Self {
- let txs = block_info.txs.iter().map(|x| blake3::hash(&serialize(x))).collect();
- Self {
- magic: block_info.magic,
- header: block_info.header.headerhash(),
- txs,
- lead_info: block_info.lead_info,
- }
- }
-}
-
-/// Auxiliary structure used for blockchain syncing
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct BlockResponse {
- /// Response blocks.
- pub blocks: Vec,
-}
-
-impl_p2p_message!(BlockResponse, "blockresponse");
-
-/// This struct represents a block proposal, used for consensus.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct BlockProposal {
- /// Block hash
- pub hash: blake3::Hash,
- /// Block header hash
- pub header: blake3::Hash,
- /// Block data
- pub block: BlockInfo,
-}
-
-impl BlockProposal {
- #[allow(clippy::too_many_arguments)]
- pub fn new(header: Header, txs: Vec, lead_info: LeadInfo) -> Self {
- let block = BlockInfo::new(header, txs, lead_info);
- let hash = block.blockhash();
- let header = block.header.headerhash();
- Self { hash, header, block }
- }
-}
-
-impl PartialEq for BlockProposal {
- fn eq(&self, other: &Self) -> bool {
- self.hash == other.hash &&
- self.header == other.header &&
- self.block.header == other.block.header &&
- self.block.txs == other.block.txs
- }
-}
-
-impl fmt::Display for BlockProposal {
- fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
- formatter.write_fmt(format_args!(
- "BlockProposal {{ leader public key: {}, hash: {}, header: {}, epoch: {}, slot: {}, txs: {} }}",
- self.block.lead_info.public_key,
- self.hash,
- self.header,
- self.block.header.epoch,
- self.block.header.slot,
- self.block.txs.len()
- ))
- }
-}
-
-impl_p2p_message!(BlockProposal, "proposal");
-
-impl From for BlockInfo {
- fn from(block: BlockProposal) -> BlockInfo {
- block.block
- }
-}
diff --git a/src/consensus/clock.rs b/src/consensus/clock.rs
deleted file mode 100644
index ca724e722..000000000
--- a/src/consensus/clock.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use crate::{util::time::Timestamp, Result};
-use log::debug;
-use std::{thread, time::Duration};
-use url::Url;
-
-pub enum Ticks {
- GENESIS { e: u64, sl: u64 }, //genesis epoch
- NEWSLOT { e: u64, sl: u64 }, // new slot
- NEWEPOCH { e: u64, sl: u64 }, // new epoch
- TOCKS, //tocks, or slot is ending
- IDLE, // idle clock state
- OUTOFSYNC, //clock, and blockchain are out of sync
-}
-
-const BB_SL: u64 = u64::MAX - 1; //big bang slot time (need to be negative value)
-const BB_E: u64 = 0; //big bang epoch time.
-
-#[derive(Debug)]
-pub struct Clock {
- pub sl: u64, // relative slot index (zero-based) [0-len[
- pub e: u64, // epoch index (zero-based) [0-\inf[
- pub tick_len: u64, // tick length in time (seconds)
- pub sl_len: u64, // slot length in ticks
- pub e_len: u64, // epoch length in slots
- pub peers: Vec,
- pub genesis_time: Timestamp,
-}
-
-impl Clock {
- pub fn new(
- e_len: Option,
- sl_len: Option,
- tick_len: Option,
- peers: Vec,
- ) -> Self {
- let gt: Timestamp = Timestamp::current_time();
- Self {
- sl: BB_SL, //necessary for genesis slot
- e: BB_E,
- tick_len: tick_len.unwrap_or(22), // 22 seconds
- sl_len: sl_len.unwrap_or(22), // ~8 minutes
- e_len: e_len.unwrap_or(3), // 24.2 minutes
- peers,
- genesis_time: gt,
- }
- }
-
- pub fn get_sl_len(&self) -> u64 {
- self.sl_len
- }
-
- pub fn get_e_len(&self) -> u64 {
- self.e_len
- }
-
- async fn time(&self) -> Result {
- //TODO (fix) add more than ntp server to time, and take the avg
- Ok(Timestamp::current_time())
- }
-
- /// returns time since genesis in seconds.
- async fn time_to_genesis(&self) -> Timestamp {
- //TODO this value need to be assigned to kickoff time.
- let genesis_time = self.genesis_time.0;
- let abs_time = self.time().await.unwrap();
- Timestamp(abs_time.0 - genesis_time)
- }
-
- /// return absolute tick to genesis, and relative tick index in the slot.
- async fn tick_time(&self) -> (u64, u64, u64) {
- let time = self.time_to_genesis().await.0;
- let tick_abs: u64 = time / self.tick_len;
- let tick_rel: u64 = time % self.tick_len;
- (time, tick_rel, tick_abs)
- }
-
- /// return true if the clock is at the begining (before 2/3 of the slot).
- async fn ticking(&self) -> bool {
- let (abs, rel, _) = self.tick_time().await;
- debug!(target: "consensus::clock", "abs time to genesis ticks: {}, rel ticks: {}", abs, rel);
- rel < (self.tick_len) * 2 / 3
- }
-
- pub async fn sync(&mut self) -> Result<()> {
- let e = self.epoch_abs().await;
- let sl = self.slot_relative().await;
- self.sl = sl;
- self.e = e;
- Ok(())
- }
-
- /// returns absolute zero based slot index
- async fn slot_abs(&self) -> u64 {
- let sl_abs = self.tick_time().await.0 / self.sl_len;
- debug!(target: "consensus::clock", "[slot_abs] slot len: {} - slot abs: {}", self.sl_len, sl_abs);
- sl_abs
- }
-
- /// returns relative zero based slot index
- async fn slot_relative(&self) -> u64 {
- let e_abs = self.slot_abs().await % self.e_len;
- debug!(target: "consensus::clock", "[slot_relative] slot len: {} - slot relative: {}", self.sl_len, e_abs);
- e_abs
- }
-
- /// returns absolute zero based epoch index.
- async fn epoch_abs(&self) -> u64 {
- let res = self.slot_abs().await / self.e_len;
- debug!(target: "consensus::clock", "[epoch_abs] epoch len: {} - epoch abs: {}", self.e_len, res);
- res
- }
-
- /// return the ticks phase with corresponding phase parameters
- ///
- /// the Ticks enum can include epoch index, and relative slot index (zero-based)
- pub async fn ticks(&mut self) -> Ticks {
- // also debug the failing function.
- let e = self.epoch_abs().await;
- let sl = self.slot_relative().await;
- if self.ticking().await {
- debug!(
- target: "consensus::clock",
- "e/e`: {}/{} sl/sl`: {}/{}, BB_E/BB_SL: {}/{}",
- e, self.e, sl, self.sl, BB_E, BB_SL
- );
- if e == self.e && e == BB_E && self.sl == BB_SL {
- self.sl = sl + 1; // 0
- self.e = e; // 0
- debug!(target: "consensus::clock", "new genesis");
- Ticks::GENESIS { e, sl }
- } else if e == self.e && sl == self.sl + 1 {
- self.sl = sl;
- debug!(target: "consensus::clock", "new slot");
- Ticks::NEWSLOT { e, sl }
- } else if e == self.e + 1 && sl == 0 {
- self.e = e;
- self.sl = sl;
- debug!(target: "consensus::clock", "new epoch");
- Ticks::NEWEPOCH { e, sl }
- } else if e == self.e && sl == self.sl {
- debug!(target: "consensus::clock", "clock is idle");
- thread::sleep(Duration::from_millis(100));
- Ticks::IDLE
- } else {
- debug!(target: "consensus::clock", "clock is out of sync");
- //clock is out of sync
- Ticks::OUTOFSYNC
- }
- } else {
- debug!(target: "consensus::clock", "tocks");
- Ticks::TOCKS
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::{Clock, Ticks};
- use futures::executor::block_on;
- use std::{thread, time::Duration};
- #[test]
- fn clock_works() {
- let clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
- //block th for 3 secs
- thread::sleep(Duration::from_millis(1000));
- let ttg = block_on(clock.time_to_genesis()).0;
- assert!((1..2).contains(&ttg));
- }
-
- fn _clock_ticking() {
- let clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
- //block th for 3 secs
- thread::sleep(Duration::from_millis(1000));
- assert!(block_on(clock.ticking()));
- thread::sleep(Duration::from_millis(1000));
- assert!(block_on(clock.ticking()));
- }
-
- fn _clock_ticks() {
- let mut clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
- //
- let tick: Ticks = block_on(clock.ticks());
- assert!(matches!(tick, Ticks::GENESIS { e: 0, sl: 0 }));
- thread::sleep(Duration::from_millis(3000));
- let tock: Ticks = block_on(clock.ticks());
- assert!(matches!(tock, Ticks::TOCKS));
- }
-}
diff --git a/src/consensus/consensus_coin.sql b/src/consensus/consensus_coin.sql
deleted file mode 100644
index 20fbcc245..000000000
--- a/src/consensus/consensus_coin.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Wallet definitions for consensus lead coins.
-
--- The consensus lead coin we have and can use
-CREATE TABLE IF NOT EXISTS consensus_coin (
- coin BLOB
-);
diff --git a/src/consensus/constants.rs b/src/consensus/constants.rs
deleted file mode 100644
index 8e9db66ab..000000000
--- a/src/consensus/constants.rs
+++ /dev/null
@@ -1,129 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use lazy_static::lazy_static;
-
-use crate::{consensus::Float10, util::time::Timestamp};
-
-lazy_static! {
- /// Genesis hash for the mainnet chain
- pub static ref MAINNET_GENESIS_HASH_BYTES: blake3::Hash = blake3::hash(b"darkfi_mainnet");
-
- // NOTE: On initial network bootstrap, genesis timestamp should be equal to boostrap timestamp.
- // On network restart only change bootstrap timestamp to schedule when nodes become active.
- /// Genesis timestamp for the mainnet chain
- pub static ref MAINNET_GENESIS_TIMESTAMP: Timestamp = Timestamp(1650887115);
-
- /// Bootstrap timestamp for the mainnet chain
- pub static ref MAINNET_BOOTSTRAP_TIMESTAMP: Timestamp = Timestamp(1650887115);
-
- /// Total sum of initial staking coins for the mainnet chain
- pub static ref MAINNET_INITIAL_DISTRIBUTION: u64 = 0;
-
- /// Genesis hash for the testnet chain
- pub static ref TESTNET_GENESIS_HASH_BYTES: blake3::Hash = blake3::hash(b"darkfi_testnet");
-
- /// Genesis timestamp for the testnet chain
- pub static ref TESTNET_GENESIS_TIMESTAMP: Timestamp = Timestamp(1677531600);
-
- /// Bootstrap timestamp for the testnet chain
- pub static ref TESTNET_BOOTSTRAP_TIMESTAMP: Timestamp = Timestamp(1677531600);
-
- /// Total sum of initial staking coins for the testnet chain
- pub static ref TESTNET_INITIAL_DISTRIBUTION: u64 = 1000;
-
- // Commonly used Float10
- pub static ref FLOAT10_EPSILON: Float10 = Float10::try_from("1").unwrap();
- pub static ref FLOAT10_NEG_TWO: Float10 = Float10::try_from("-2").unwrap();
- pub static ref FLOAT10_NEG_ONE: Float10 = Float10::try_from("-1").unwrap();
- pub static ref FLOAT10_ZERO: Float10 = Float10::try_from("0").unwrap();
- pub static ref FLOAT10_ONE: Float10 = Float10::try_from("1").unwrap();
- pub static ref FLOAT10_TWO: Float10 = Float10::try_from("2").unwrap();
- pub static ref FLOAT10_THREE: Float10 = Float10::try_from("3").unwrap();
- pub static ref FLOAT10_FIVE: Float10 = Float10::try_from("5").unwrap();
- pub static ref FLOAT10_NINE: Float10 = Float10::try_from("9").unwrap();
- pub static ref FLOAT10_TEN: Float10 = Float10::try_from("10").unwrap();
-
- // Consensus parameters
- pub static ref KP: Float10 = Float10::try_from("0.18").unwrap();
- pub static ref KI: Float10 = Float10::try_from("0.02").unwrap();
- pub static ref KD: Float10 = Float10::try_from("-0.1").unwrap();
- pub static ref PID_OUT_STEP: Float10 = Float10::try_from("0.1").unwrap();
- pub static ref MAX_DER: Float10 = Float10::try_from("0.1").unwrap();
- pub static ref MIN_DER: Float10 = Float10::try_from("-0.1").unwrap();
- pub static ref MAX_F: Float10 = Float10::try_from("0.99").unwrap();
- pub static ref MIN_F: Float10 = Float10::try_from("0.01").unwrap();
-
-}
-
-/// Block version number
-pub const BLOCK_VERSION: u8 = 1;
-
-/// Block magic bytes
-pub const BLOCK_MAGIC_BYTES: [u8; 4] = [0x11, 0x6d, 0x75, 0x1f];
-
-/// Block info magic bytes
-pub const BLOCK_INFO_MAGIC_BYTES: [u8; 4] = [0x90, 0x44, 0xf1, 0xf6];
-
-/// Number of slots in one epoch
-pub const EPOCH_LENGTH: usize = 10;
-
-/// Slot time in seconds
-pub const SLOT_TIME: u64 = 90;
-
-/// Finalization sync period duration (should be >=2/3 of slot time)
-pub const FINAL_SYNC_DUR: u64 = 60;
-
-/// Max resync retries duration in epochs
-pub const SYNC_RETRIES_DURATION: u64 = 2;
-
-/// Max resync retries
-pub const SYNC_MAX_RETRIES: u64 = 10;
-
-/// Transactions included in a block cap
-pub const TXS_CAP: usize = 50;
-
-/// Block leader reward
-pub const REWARD: u64 = 1;
-
-/// Leader proofs k for zk proof rows (rows=2^k)
-pub const LEADER_PROOF_K: u32 = 13;
-
-// TODO: Describe these constants
-pub const RADIX_BITS: usize = 76;
-
-pub const P: &str = "28948022309329048855892746252171976963363056481941560715954676764349967630337";
-pub const LOTTERY_HEAD_START: u64 = 1;
-pub const PRF_NULLIFIER_PREFIX: u64 = 0;
-pub const PI_COMMITMENT_X_INDEX: usize = 1;
-pub const PI_COMMITMENT_Y_INDEX: usize = 2;
-pub const PI_COMMITMENT_ROOT: usize = 5;
-pub const PI_NULLIFIER_INDEX: usize = 7;
-pub const PI_MU_Y_INDEX: usize = 8;
-pub const PI_MU_RHO_INDEX: usize = 10;
-pub const PI_SIGMA1_INDEX: usize = 12;
-pub const PI_SIGMA2_INDEX: usize = 13;
-pub const GENESIS_TOTAL_STAKE: u64 = 1;
-
-pub const LEADER_HISTORY_LOG: &str = "/tmp/lead_history.log";
-pub const F_HISTORY_LOG: &str = "/tmp/f_history.log";
-pub const LOTTERY_HISTORY_LOG: &str = "/tmp/lottery_history.log";
-
-// Wallet SQL table constant names. These have to represent the SQL schema.
-pub const CONSENSUS_COIN_TABLE: &str = "consensus_coin";
-pub const CONSENSUS_COIN_COL: &str = "coin";
diff --git a/src/consensus/fees.rs b/src/consensus/fees.rs
deleted file mode 100644
index 60d3b8e92..000000000
--- a/src/consensus/fees.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use crate::zkas::{Opcode, VarType, ZkBinary};
-
-/// Calculate the gas use for verifying a given zkas circuit.
-/// This function assumes that the zkbin was properly decoded.
-pub fn circuit_gas_use(zkbin: &ZkBinary) -> u64 {
- let mut accumulator: u64 = 0;
-
- // Constants each with a cost of 10
- accumulator += 10 * zkbin.constants.len() as u64;
-
- // Literals each with a cost of 10 (for now there's only 1 type of literal)
- accumulator += 10 * zkbin.literals.len() as u64;
-
- // Witnesses have cost by type
- for witness in &zkbin.witnesses {
- let cost = match witness {
- VarType::Dummy => unreachable!(),
- VarType::EcPoint => 20,
- VarType::EcFixedPoint => unreachable!(),
- VarType::EcFixedPointShort => unreachable!(),
- VarType::EcFixedPointBase => unreachable!(),
- VarType::EcNiPoint => 20,
- VarType::Base => 10,
- VarType::BaseArray => unreachable!(),
- VarType::Scalar => 20,
- VarType::ScalarArray => unreachable!(),
- VarType::MerklePath => 40,
- VarType::Uint32 => 10,
- VarType::Uint64 => 10,
- VarType::Any => 10,
- };
-
- accumulator += cost;
- }
-
- // Opcodes depending on how heavy they are
- for opcode in &zkbin.opcodes {
- let cost = match opcode.0 {
- Opcode::Noop => unreachable!(),
- Opcode::EcAdd => 30,
- Opcode::EcMul => 30,
- Opcode::EcMulBase => 30,
- Opcode::EcMulShort => 30,
- Opcode::EcMulVarBase => 30,
- Opcode::EcGetX => 5,
- Opcode::EcGetY => 5,
- Opcode::PoseidonHash => 20 + 10 * opcode.1.len() as u64,
- Opcode::MerkleRoot => 50,
- Opcode::BaseAdd => 15,
- Opcode::BaseMul => 15,
- Opcode::BaseSub => 15,
- Opcode::WitnessBase => 10,
- Opcode::RangeCheck => 60,
- Opcode::LessThanStrict => 100,
- Opcode::LessThanLoose => 100,
- Opcode::BoolCheck => 20,
- Opcode::CondSelect => 10,
- Opcode::ZeroCondSelect => 10,
- Opcode::ConstrainEqualBase => 10,
- Opcode::ConstrainEqualPoint => 20,
- Opcode::ConstrainInstance => 10,
- Opcode::DebugPrint => 100,
- };
-
- accumulator += cost;
- }
-
- accumulator
-}
diff --git a/src/consensus/lead_coin.rs b/src/consensus/lead_coin.rs
deleted file mode 100644
index 432d44616..000000000
--- a/src/consensus/lead_coin.rs
+++ /dev/null
@@ -1,509 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_sdk::{
- crypto::{
- pedersen::{pedersen_commitment_base, pedersen_commitment_u64},
- poseidon_hash,
- util::fp_mod_fv,
- MerkleNode, MerkleTree, SecretKey,
- },
- pasta::{arithmetic::CurveAffine, group::Curve, pallas},
-};
-use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
-use halo2_proofs::{arithmetic::Field, circuit::Value};
-use log::info;
-use rand::rngs::OsRng;
-
-use super::constants::EPOCH_LENGTH;
-use crate::{
- consensus::{constants, utils::fbig2base, Float10, TransferStx, TxRcpt},
- zk::{
- proof::{Proof, ProvingKey},
- vm::ZkCircuit,
- vm_heap::Witness,
- },
- zkas::ZkBinary,
- Result,
-};
-
-use std::{
- fs::File,
- io::{prelude::*, BufWriter},
-};
-
-pub const MERKLE_DEPTH_LEAD_COIN: usize = 32;
-pub const MERKLE_DEPTH: u8 = 32;
-pub const ZERO: pallas::Base = pallas::Base::zero();
-pub const ONE: pallas::Base = pallas::Base::one();
-pub const PREFIX_EVL: u64 = 2;
-pub const PREFIX_SEED: u64 = 3;
-pub const PREFIX_CM: u64 = 4;
-pub const PREFIX_PK: u64 = 5;
-pub const PREFIX_SN: u64 = 6;
-
-// TODO: Unify item names with the names in the ZK proof (those are more descriptive)
-/// Structure representing the consensus leader coin
-#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
-pub struct LeadCoin {
- /// Coin's stake value
- pub value: u64,
- /// Coin creation slot.
- pub slot: u64,
- /// Coin nonce
- pub nonce: pallas::Base,
- /// Commitment for coin1
- pub coin1_commitment: pallas::Point,
- /// Merkle root of coin1 commitment
- pub coin1_commitment_root: MerkleNode,
- /// Coin commitment position
- pub coin1_commitment_pos: u32,
- /// Merkle path to the coin1's commitment
- pub coin1_commitment_merkle_path: Vec,
- /// coin1 sk
- pub coin1_sk: pallas::Base,
- /// Merkle root of the `coin1` secret key
- pub coin1_sk_root: MerkleNode,
- /// coin1 sk position in merkle tree
- pub coin1_sk_pos: u32,
- /// Merkle path to the secret key of `coin1`
- pub coin1_sk_merkle_path: Vec,
- /// coin1 commitment blinding factor
- pub coin1_blind: pallas::Scalar,
-}
-
-impl LeadCoin {
- /// Create a new `LeadCoin` object using given parameters.
- #[allow(clippy::too_many_arguments)]
- pub fn new(
- // emulation of global random oracle output from previous epoch randomness.
- //eta: pallas::Base,
- // Stake value
- value: u64,
- // Slot absolute index
- slot: u64,
- // coin1 sk
- coin1_sk: pallas::Base,
- // Merkle root of the `coin_1` secret key in the Merkle tree of secret keys
- coin1_sk_root: MerkleNode,
- // sk pos
- coin1_sk_pos: usize,
- // Merkle path to the secret key of `coin_1` in the Merkle tree of secret keys
- coin1_sk_merkle_path: Vec,
- // coin1 nonce
- seed: pallas::Base,
- // Merkle tree of coin commitments
- coin_commitment_tree: &mut MerkleTree,
- ) -> Self {
- // Generate random blinding values for commitments:
- let coin1_blind = pallas::Scalar::random(&mut OsRng);
- //let coin2_blind = pallas::Scalar::random(&mut OsRng);
- // pk
- let pk = Self::util_pk(coin1_sk_root, slot);
- let coin1_commitment = Self::commitment(pk, pallas::Base::from(value), seed, coin1_blind);
- // Hash its coordinates to get a base field element
- let c1_cm_coords = coin1_commitment.to_affine().coordinates().unwrap();
- let c1_base_msg = [*c1_cm_coords.x(), *c1_cm_coords.y()];
- let coin1_commitment_base = poseidon_hash(c1_base_msg);
- // Append the element to the Merkle tree
- coin_commitment_tree.append(MerkleNode::from(coin1_commitment_base));
- let coin1_commitment_pos = coin_commitment_tree.mark().unwrap();
- let coin1_commitment_root = coin_commitment_tree.root(0).unwrap();
- let coin1_commitment_merkle_path =
- coin_commitment_tree.witness(coin1_commitment_pos, 0).unwrap();
-
- Self {
- value,
- slot,
- nonce: seed,
- coin1_commitment,
- coin1_commitment_root,
- coin1_commitment_pos: u32::try_from(u64::from(coin1_commitment_pos)).unwrap(),
- coin1_commitment_merkle_path,
- coin1_sk,
- coin1_sk_root,
- coin1_sk_pos: u32::try_from(coin1_sk_pos).unwrap(),
- coin1_sk_merkle_path,
- coin1_blind,
- }
- }
-
- pub fn sn(&self) -> pallas::Base {
- let sn_msg = [pallas::Base::from(PREFIX_SN), self.coin1_sk_root.inner(), self.nonce, ZERO];
- poseidon_hash(sn_msg)
- }
-
- pub fn election_seeds_u64(eta: pallas::Base, slotu64: u64) -> (pallas::Base, pallas::Base) {
- Self::election_seeds(eta, pallas::Base::from(slotu64))
- }
-
- /// Derive election seeds from given parameters
- pub fn election_seeds(eta: pallas::Base, slot: pallas::Base) -> (pallas::Base, pallas::Base) {
- info!(target: "consensus::leadcoin", "election_seeds: eta: {:?}, slot: {:?}", eta, slot);
- let election_seed_nonce = pallas::Base::from(3);
- let election_seed_lead = pallas::Base::from(22);
-
- // mu_y
- let lead_msg = [election_seed_lead, eta, slot];
- let lead_mu = poseidon_hash(lead_msg);
-
- // mu_rho
- let nonce_msg = [election_seed_nonce, eta, slot];
- let nonce_mu = poseidon_hash(nonce_msg);
-
- (lead_mu, nonce_mu)
- }
-
- /// Create a vector of `pallas::Base` elements from the `LeadCoin` to be
- /// used as public inputs for the ZK proof.
- pub fn public_inputs(
- &self,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- current_eta: pallas::Base,
- current_slot: pallas::Base,
- derived_blind: pallas::Scalar,
- ) -> Vec {
- // pk
- let pk = self.pk();
- // coin 1-2 cm/commitment
- let c1_cm_coord = self.coin1_commitment.to_affine().coordinates().unwrap();
- let c2_cm_coord = self.derived_commitment(derived_blind).to_affine().coordinates().unwrap();
- // lottery seed
- let seed_msg =
- [pallas::Base::from(PREFIX_SEED), self.coin1_sk_root.inner(), self.nonce, ZERO];
- let seed = poseidon_hash(seed_msg);
- // y
- let (y_mu, rho_mu) = Self::election_seeds(current_eta, current_slot);
- let y_msg = [seed, y_mu];
- let y = poseidon_hash(y_msg);
- // rho
- let rho_msg = [seed, rho_mu];
- let rho = poseidon_hash(rho_msg);
- let public_inputs = vec![
- pk,
- *c1_cm_coord.x(),
- *c1_cm_coord.y(),
- *c2_cm_coord.x(),
- *c2_cm_coord.y(),
- self.coin1_commitment_root.inner(),
- self.coin1_sk_root.inner(),
- self.sn(),
- y_mu,
- y,
- rho_mu,
- rho,
- sigma1,
- sigma2,
- ];
- public_inputs
- }
-
- fn util_pk(sk_root: MerkleNode, slot: u64) -> pallas::Base {
- let pk_msg =
- [pallas::Base::from(PREFIX_PK), sk_root.inner(), pallas::Base::from(slot), ZERO];
-
- poseidon_hash(pk_msg)
- }
- /// calculate coin public key: hash of root coin secret key
- /// and creation slot.
- pub fn pk(&self) -> pallas::Base {
- Self::util_pk(self.coin1_sk_root, self.slot)
- }
-
- fn util_derived_rho(sk_root: MerkleNode, nonce: pallas::Base) -> pallas::Base {
- let rho_msg = [pallas::Base::from(PREFIX_EVL), sk_root.inner(), nonce, ZERO];
-
- poseidon_hash(rho_msg)
- }
- /// calculate derived coin nonce: hash of root coin secret key
- /// and old nonce
- pub fn derived_rho(&self) -> pallas::Base {
- Self::util_derived_rho(self.coin1_sk_root, self.nonce)
- }
-
- pub fn headstart() -> pallas::Base {
- let headstart = constants::MIN_F.clone() * Float10::try_from(constants::P).unwrap();
- fbig2base(headstart)
- }
-
- pub fn is_leader(
- &self,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- current_eta: pallas::Base,
- current_slot: pallas::Base,
- ) -> bool {
- let y_seed =
- [pallas::Base::from(PREFIX_SEED), self.coin1_sk_root.inner(), self.nonce, ZERO];
- let y_seed_hash = poseidon_hash(y_seed);
- let (y_mu, _) = Self::election_seeds(current_eta, current_slot);
- let y_msg = [y_seed_hash, y_mu];
- let y = poseidon_hash(y_msg);
-
- let value = pallas::Base::from(self.value);
-
- let headstart = Self::headstart();
- let target = sigma1 * value + sigma2 * value * value + headstart;
-
- let y_t_str = format!("{:?},{:?}\n", y, target);
- let f =
- File::options().append(true).create(true).open(constants::LOTTERY_HISTORY_LOG).unwrap();
-
- {
- let mut writer = BufWriter::new(f);
- let _ = writer.write(&y_t_str.into_bytes()).unwrap();
- }
- info!(target: "consensus::leadcoin", "is_leader(): y = {:?}", y);
- info!(target: "consensus::leadcoin", "is_leader(): T = {:?}", target);
-
- y < target
- }
-
- fn commitment(
- pk: pallas::Base,
- value: pallas::Base,
- seed: pallas::Base,
- blind: pallas::Scalar,
- ) -> pallas::Point {
- let commit_msg = [pallas::Base::from(PREFIX_CM), pk, value, seed];
- // Create commitment to coin
- let commit_v = poseidon_hash(commit_msg);
- pedersen_commitment_base(commit_v, blind)
- }
- /// calculated derived coin commitment
- pub fn derived_commitment(&self, blind: pallas::Scalar) -> pallas::Point {
- let pk = self.pk();
- let rho = self.derived_rho();
- Self::commitment(pk, pallas::Base::from(self.value + constants::REWARD), rho, blind)
- }
-
- /// the new coin to be minted after the current coin is spent
- /// in lottery.
- pub fn derive_coin(
- &self,
- coin_commitment_tree: &mut MerkleTree,
- derived_blind: pallas::Scalar,
- ) -> LeadCoin {
- info!(target: "consensus::leadcoin", "derive_coin(): Deriving new coin!");
- let derived_c1_rho = self.derived_rho();
- let derived_c1_cm = self.derived_commitment(derived_blind);
- let derived_c1_cm_coord = derived_c1_cm.to_affine().coordinates().unwrap();
- let derived_c1_cm_msg = [*derived_c1_cm_coord.x(), *derived_c1_cm_coord.y()];
- let derived_c1_cm_base = poseidon_hash(derived_c1_cm_msg);
- coin_commitment_tree.append(MerkleNode::from(derived_c1_cm_base));
- let leaf_pos = coin_commitment_tree.mark().unwrap();
- let commitment_root = coin_commitment_tree.root(0).unwrap();
- let commitment_merkle_path = coin_commitment_tree.witness(leaf_pos, 0).unwrap();
- LeadCoin {
- value: self.value + constants::REWARD,
- slot: self.slot,
- nonce: derived_c1_rho,
- coin1_commitment: derived_c1_cm,
- coin1_commitment_root: commitment_root,
- coin1_commitment_pos: u32::try_from(u64::from(leaf_pos)).unwrap(),
- coin1_commitment_merkle_path: commitment_merkle_path,
- coin1_sk: self.coin1_sk,
- coin1_sk_root: self.coin1_sk_root,
- coin1_sk_pos: self.coin1_sk_pos,
- coin1_sk_merkle_path: self.coin1_sk_merkle_path.clone(),
- coin1_blind: derived_blind,
- }
- }
-
- /// Try to create a ZK proof of consensus leadership
- pub fn create_lead_proof(
- &self,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- eta: pallas::Base,
- slot: pallas::Base, //current slot index.
- pk: &ProvingKey,
- derived_blind: pallas::Scalar,
- ) -> (Result, Vec) {
- let (y_mu, rho_mu) = Self::election_seeds(eta, slot);
- let bincode = include_bytes!("../../proof/lead.zk.bin");
- let zkbin = ZkBinary::decode(bincode).unwrap();
- let headstart = Self::headstart();
- let coin1_commitment_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
- self.coin1_commitment_merkle_path.clone().try_into().unwrap();
- let coin1_sk_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
- self.coin1_sk_merkle_path.clone().try_into().unwrap();
- let witnesses = vec![
- Witness::MerklePath(Value::known(coin1_commitment_merkle_path)),
- Witness::Uint32(Value::known(self.coin1_commitment_pos)),
- Witness::Uint32(Value::known(self.coin1_sk_pos)),
- Witness::Base(Value::known(self.coin1_sk)),
- Witness::Base(Value::known(self.coin1_sk_root.inner())),
- Witness::MerklePath(Value::known(coin1_sk_merkle_path)),
- Witness::Base(Value::known(pallas::Base::from(self.slot))),
- Witness::Base(Value::known(self.nonce)),
- Witness::Scalar(Value::known(self.coin1_blind)),
- Witness::Base(Value::known(pallas::Base::from(self.value))),
- Witness::Scalar(Value::known(derived_blind)),
- Witness::Base(Value::known(rho_mu)),
- Witness::Base(Value::known(y_mu)),
- Witness::Base(Value::known(sigma1)),
- Witness::Base(Value::known(sigma2)),
- Witness::Base(Value::known(headstart)),
- ];
- let circuit = ZkCircuit::new(witnesses, &zkbin);
- let public_inputs = self.public_inputs(sigma1, sigma2, eta, slot, derived_blind);
- (Ok(Proof::create(pk, &[circuit], &public_inputs, &mut OsRng).unwrap()), public_inputs)
- }
-
- #[allow(clippy::too_many_arguments)]
- pub fn create_xfer_proof(
- &self,
- pk: &ProvingKey,
- change_coin: TxRcpt,
- change_pk: pallas::Base, //change coin public key
- transfered_coin: TxRcpt,
- transfered_pk: pallas::Base, // recipient coin's public key
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- current_eta: pallas::Base,
- current_slot: pallas::Base,
- derived_blind: pallas::Scalar,
- ) -> Result {
- assert!(change_coin.value + transfered_coin.value == self.value && self.value > 0);
- let bincode = include_bytes!("../../proof/tx.zk.bin");
- let zkbin = ZkBinary::decode(bincode)?;
- let retval = pallas::Base::from(change_coin.value);
- let xferval = pallas::Base::from(transfered_coin.value);
- let pos: u32 = self.coin1_commitment_pos;
- let value = pallas::Base::from(self.value);
- let coin1_sk_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
- self.coin1_sk_merkle_path.clone().try_into().unwrap();
- let coin1_commitment_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
- self.coin1_commitment_merkle_path.clone().try_into().unwrap();
- let witnesses = vec![
- // coin (1) burned coin
- Witness::Base(Value::known(self.coin1_commitment_root.inner())),
- Witness::Base(Value::known(self.coin1_sk_root.inner())),
- Witness::Base(Value::known(self.coin1_sk)),
- Witness::MerklePath(Value::known(coin1_sk_merkle_path)),
- Witness::Uint32(Value::known(self.coin1_sk_pos)),
- Witness::Base(Value::known(self.nonce)),
- Witness::Scalar(Value::known(self.coin1_blind)),
- Witness::Base(Value::known(value)),
- Witness::MerklePath(Value::known(coin1_commitment_merkle_path)),
- Witness::Uint32(Value::known(pos)),
- Witness::Base(Value::known(self.sn())),
- // coin (3)
- Witness::Base(Value::known(change_pk)),
- Witness::Base(Value::known(change_coin.rho)),
- Witness::Scalar(Value::known(change_coin.opening)),
- Witness::Base(Value::known(retval)),
- // coin (4)
- Witness::Base(Value::known(transfered_pk)),
- Witness::Base(Value::known(transfered_coin.rho)),
- Witness::Scalar(Value::known(transfered_coin.opening)),
- Witness::Base(Value::known(xferval)),
- ];
- let circuit = ZkCircuit::new(witnesses, &zkbin);
- let proof = Proof::create(
- pk,
- &[circuit],
- &self.public_inputs(sigma1, sigma2, current_eta, current_slot, derived_blind),
- &mut OsRng,
- )?;
- let cm3_msg_in = [
- pallas::Base::from(PREFIX_CM),
- change_pk,
- pallas::Base::from(change_coin.value),
- change_coin.rho,
- ];
- let cm3_msg = poseidon_hash(cm3_msg_in);
- let cm3 = pedersen_commitment_base(cm3_msg, change_coin.opening);
- let cm4_msg_in = [
- pallas::Base::from(PREFIX_CM),
- transfered_pk,
- pallas::Base::from(transfered_coin.value),
- transfered_coin.rho,
- ];
- let cm4_msg = poseidon_hash(cm4_msg_in);
- let cm4 = pedersen_commitment_base(cm4_msg, transfered_coin.opening);
- let tx = TransferStx {
- coin_commitment: self.coin1_commitment,
- coin_pk: self.pk(),
- coin_root_sk: self.coin1_sk_root,
- change_coin_commitment: cm3,
- transfered_coin_commitment: cm4,
- nullifier: self.sn(),
- slot: pallas::Base::from(self.slot),
- root: self.coin1_commitment_root,
- proof,
- };
- Ok(tx)
- }
-}
-
-/// This struct holds the secrets for creating LeadCoins during one epoch.
-pub struct LeadCoinSecrets {
- pub secret_keys: Vec,
- pub merkle_roots: Vec,
- pub merkle_paths: Vec>,
-}
-
-impl LeadCoinSecrets {
- /// Generate epoch coins secret keys.
- /// First clot coin secret key is sampled at random, while the secret keys of the
- /// remaining slots derive from the previous slot secret.
- /// Clarification:
- /// ```plaintext
- /// sk[0] -> random,
- /// sk[1] -> derive_function(sk[0]),
- /// ...
- /// sk[n] -> derive_function(sk[n-1]),
- /// ```
- pub fn generate() -> Self {
- let mut tree = MerkleTree::new(EPOCH_LENGTH);
- let mut sks = Vec::with_capacity(EPOCH_LENGTH);
- let mut root_sks = Vec::with_capacity(EPOCH_LENGTH);
- let mut path_sks = Vec::with_capacity(EPOCH_LENGTH);
-
- let mut prev_sk = SecretKey::from(pallas::Base::one());
-
- for i in 0..EPOCH_LENGTH {
- let secret = if i == 0 {
- pedersen_commitment_u64(1, pallas::Scalar::random(&mut OsRng))
- } else {
- pedersen_commitment_u64(1, fp_mod_fv(prev_sk.inner()))
- };
-
- let secret_coords = secret.to_affine().coordinates().unwrap();
- let secret_msg = [*secret_coords.x(), *secret_coords.y()];
- let secret_key = SecretKey::from(poseidon_hash(secret_msg));
-
- sks.push(secret_key);
- prev_sk = secret_key;
-
- let node = MerkleNode::from(secret_key.inner());
- tree.append(node);
- let leaf_pos = tree.mark().unwrap();
- let root = tree.root(0).unwrap();
- let path = tree.witness(leaf_pos, 0).unwrap();
-
- root_sks.push(root);
- path_sks.push(path);
- }
-
- Self { secret_keys: sks, merkle_roots: root_sks, merkle_paths: path_sks }
- }
-}
diff --git a/src/consensus/lead_info.rs b/src/consensus/lead_info.rs
deleted file mode 100644
index 83ff82889..000000000
--- a/src/consensus/lead_info.rs
+++ /dev/null
@@ -1,111 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_sdk::{
- crypto::{schnorr::Signature, Keypair, PublicKey},
- pasta::pallas,
-};
-use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
-use log::error;
-
-use crate::{
- zk::proof::{Proof, VerifyingKey},
- Result,
-};
-
-// TODO: Replace 'Lead' terms with 'Producer' to make it more clear that
-// we refer to block producer.
-/// This struct represents [`Block`](super::Block) leader information used by the consensus protocol.
-#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
-pub struct LeadInfo {
- /// Block producer signature
- pub signature: Signature,
- /// Block producer public_key
- pub public_key: PublicKey, // TODO: remove this(to be derived by proof)
- /// Block producer slot competing coins public inputs
- pub public_inputs: Vec,
- /// Leader coin creation slot
- pub coin_slot: u64,
- /// Leader coin creation eta
- pub coin_eta: pallas::Base,
- /// Leader NIZK proof
- pub proof: LeadProof,
- /// Block producer leaders count
- pub leaders: u64,
-}
-
-impl Default for LeadInfo {
- /// Default LeadInfo used in genesis block generation
- fn default() -> Self {
- let keypair = Keypair::default();
- let signature = Signature::dummy();
- let public_inputs = vec![];
- let coin_slot = 0;
- let coin_eta = pallas::Base::zero();
- let proof = LeadProof::default();
- let leaders = 0;
- Self {
- signature,
- public_key: keypair.public,
- public_inputs,
- coin_slot,
- coin_eta,
- proof,
- leaders,
- }
- }
-}
-
-impl LeadInfo {
- #[allow(clippy::too_many_arguments)]
- pub fn new(
- signature: Signature,
- public_key: PublicKey,
- public_inputs: Vec,
- coin_slot: u64,
- coin_eta: pallas::Base,
- proof: LeadProof,
- leaders: u64,
- ) -> Self {
- Self { signature, public_key, public_inputs, coin_slot, coin_eta, proof, leaders }
- }
-}
-
-/// Wrapper over the Proof, for future additions.
-#[derive(Default, Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
-pub struct LeadProof {
- /// Leadership proof
- pub proof: Proof,
-}
-
-impl LeadProof {
- pub fn verify(&self, vk: &VerifyingKey, public_inputs: &[pallas::Base]) -> Result<()> {
- if let Err(e) = self.proof.verify(vk, public_inputs) {
- error!(target: "consensus::lead_info", "Verification of consensus lead proof failed: {}", e);
- return Err(e.into())
- }
-
- Ok(())
- }
-}
-
-impl From for LeadProof {
- fn from(proof: Proof) -> Self {
- Self { proof }
- }
-}
diff --git a/src/consensus/mod.rs b/src/consensus/mod.rs
deleted file mode 100644
index daa56aefc..000000000
--- a/src/consensus/mod.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-/// Block definition
-pub mod block;
-pub use block::{Block, BlockInfo, BlockProposal, Header};
-
-/// Constants
-pub mod constants;
-pub use constants::{
- TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_TIMESTAMP,
- TESTNET_INITIAL_DISTRIBUTION,
-};
-
-/// Consensus block leader information
-pub mod lead_info;
-pub use lead_info::{LeadInfo, LeadProof};
-
-/// Consensus state
-pub mod state;
-
-/// Consensus validator state
-pub mod validator;
-pub use validator::{ValidatorState, ValidatorStatePtr};
-
-/// Fee calculations
-pub mod fees;
-
-/// P2P net protocols
-pub mod proto;
-
-/// async tasks to utilize the protocols
-pub mod task;
-
-/// Lamport clock
-pub mod clock;
-pub use clock::{Clock, Ticks};
-
-/// Consensus participation coin functions and definitions
-pub mod lead_coin;
-pub use lead_coin::LeadCoin;
-
-/// Utility types
-pub mod types;
-pub use types::Float10;
-
-/// Utility functions
-pub mod utils;
-
-/// Wallet functions
-pub mod wallet;
-
-/// transfered tx proof with public inputs.
-pub mod stx;
-pub use stx::TransferStx;
-
-/// encrypted receipient coin info
-pub mod rcpt;
-pub use rcpt::{EncryptedTxRcpt, TxRcpt};
-
-/// transfer transaction
-pub mod tx;
-pub use tx::Tx;
diff --git a/src/consensus/proto/mod.rs b/src/consensus/proto/mod.rs
deleted file mode 100644
index 67ab86150..000000000
--- a/src/consensus/proto/mod.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-/// Block proposal protocol
-mod protocol_proposal;
-pub use protocol_proposal::ProtocolProposal;
-
-/// Transaction broadcast protocol
-mod protocol_tx;
-pub use protocol_tx::ProtocolTx;
-
-/// Validator + Replicator blockchain sync protocol
-mod protocol_sync;
-pub use protocol_sync::ProtocolSync;
-
-/// Validator consensus sync protocol
-mod protocol_sync_consensus;
-pub use protocol_sync_consensus::ProtocolSyncConsensus;
diff --git a/src/consensus/proto/protocol_proposal.rs b/src/consensus/proto/protocol_proposal.rs
deleted file mode 100644
index a71b5cc65..000000000
--- a/src/consensus/proto/protocol_proposal.rs
+++ /dev/null
@@ -1,134 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::sync::Arc;
-
-use async_trait::async_trait;
-use log::{debug, error, trace};
-use smol::Executor;
-use url::Url;
-
-use crate::{
- consensus::{BlockProposal, ValidatorStatePtr},
- net::{
- ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
- ProtocolJobsManager, ProtocolJobsManagerPtr,
- },
- Result,
-};
-
-pub struct ProtocolProposal {
- proposal_sub: MessageSubscription,
- jobsman: ProtocolJobsManagerPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- channel_address: Url,
-}
-
-impl ProtocolProposal {
- pub async fn init(
- channel: ChannelPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- ) -> Result {
- debug!(target: "consensus::protocol_proposal::init()", "Adding ProtocolProposal to the protocol registry");
- let msg_subsystem = channel.message_subsystem();
- msg_subsystem.add_dispatch::().await;
-
- let proposal_sub = channel.subscribe_msg::().await?;
-
- Ok(Arc::new(Self {
- proposal_sub,
- jobsman: ProtocolJobsManager::new("ProposalProtocol", channel.clone()),
- state,
- p2p,
- channel_address: channel.address().clone(),
- }))
- }
-
- async fn handle_receive_proposal(self: Arc) -> Result<()> {
- debug!(target: "consensus::protocol_proposal::handle_receive_proposal()", "START");
- let exclude_list = vec![self.channel_address.clone()];
- loop {
- let proposal = match self.proposal_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_proposal::handle_receive_proposal()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- debug!(
- target: "consensus::protocol_proposal::handle_receive_proposal()",
- "recv: {}", proposal);
- trace!(
- target: "consensus::protocol_proposal::handle_receive_proposal()",
- "Full proposal: {:?}",
- proposal
- );
-
- let proposal_copy = (*proposal).clone();
-
- // Verify we have the proposal already
- let mut lock = self.state.write().await;
- if lock.consensus.proposal_exists(&proposal_copy.hash) {
- debug!(
- target: "consensus::protocol_proposal::handle_receive_proposal()",
- "Proposal already received."
- );
- continue
- }
-
- match lock.receive_proposal(&proposal_copy, None).await {
- Ok(broadcast) => {
- if broadcast {
- // Broadcast proposal to rest of nodes
- self.p2p.broadcast_with_exclude(&proposal_copy, &exclude_list).await;
- }
- }
- Err(e) => {
- error!(
- target: "consensus::protocol_proposal::handle_receive_proposal()",
- "receive_proposal error: {}",
- e
- );
- continue
- }
- }
- }
- }
-}
-
-#[async_trait]
-impl ProtocolBase for ProtocolProposal {
- async fn start(self: Arc, executor: Arc>) -> Result<()> {
- debug!(target: "consensus::protocol_proposal::start()", "START");
- self.jobsman.clone().start(executor.clone());
- self.jobsman.clone().spawn(self.clone().handle_receive_proposal(), executor.clone()).await;
- debug!(target: "consensus::protocol_proposal::start()", "END");
- Ok(())
- }
-
- fn name(&self) -> &'static str {
- "ProtocolProposal"
- }
-}
diff --git a/src/consensus/proto/protocol_sync.rs b/src/consensus/proto/protocol_sync.rs
deleted file mode 100644
index dc3ff267c..000000000
--- a/src/consensus/proto/protocol_sync.rs
+++ /dev/null
@@ -1,374 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::sync::Arc;
-
-use async_trait::async_trait;
-use log::{debug, error, info};
-use smol::Executor;
-
-use darkfi_sdk::blockchain::Slot;
-
-use crate::{
- consensus::{
- block::{BlockInfo, BlockOrder, BlockResponse},
- state::{SlotRequest, SlotResponse},
- ValidatorStatePtr,
- },
- net::{
- ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
- ProtocolJobsManager, ProtocolJobsManagerPtr,
- },
- Result,
-};
-
-// Constant defining how many blocks we send during syncing.
-const BATCH: u64 = 10;
-
-pub struct ProtocolSync {
- channel: ChannelPtr,
- request_sub: MessageSubscription,
- slot_request_sub: MessageSubscription,
- block_sub: MessageSubscription,
- slots_sub: MessageSubscription,
- jobsman: ProtocolJobsManagerPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- consensus_mode: bool,
-}
-
-impl ProtocolSync {
- pub async fn init(
- channel: ChannelPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- consensus_mode: bool,
- ) -> Result {
- let msg_subsystem = channel.message_subsystem();
- msg_subsystem.add_dispatch::().await;
- msg_subsystem.add_dispatch::().await;
- msg_subsystem.add_dispatch::().await;
- msg_subsystem.add_dispatch::().await;
-
- let request_sub = channel.subscribe_msg::().await?;
- let slot_request_sub = channel.subscribe_msg::().await?;
- let block_sub = channel.subscribe_msg::().await?;
- let slots_sub = channel.subscribe_msg::().await?;
-
- Ok(Arc::new(Self {
- channel: channel.clone(),
- request_sub,
- slot_request_sub,
- block_sub,
- slots_sub,
- jobsman: ProtocolJobsManager::new("SyncProtocol", channel),
- state,
- p2p,
- consensus_mode,
- }))
- }
-
- async fn handle_receive_request(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "START"
- );
- loop {
- let order = match self.request_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- debug!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "received {:?}",
- order
- );
-
- // Extra validations can be added here
- /*
- let key = order.slot;
- let blocks = match self.state.read().await.blockchain.get_blocks_after(key, BATCH) {
- Ok(v) => v,
- Err(e) => {
- error!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "get_blocks_after fail: {}",
- e
- );
- continue
- }
- };
- debug!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "Found {} blocks",
- blocks.len()
- );
- */
- let blocks = vec![BlockInfo::default()];
-
- let response = BlockResponse { blocks };
- if let Err(e) = self.channel.send(&response).await {
- error!(
- target: "consensus::protocol_sync::handle_receive_request()",
- "channel send fail: {}",
- e
- )
- };
- }
- }
-
- async fn handle_receive_block(self: Arc) -> Result<()> {
- debug!(target: "consensus::protocol_sync::handle_receive_block()", "START");
- let _exclude_list = [self.channel.address()];
- loop {
- let info = match self.block_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- // Check if node has finished syncing its blockchain
- if !self.state.read().await.synced {
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "Node still syncing blockchain, skipping..."
- );
- continue
- }
-
- // Check if node started participating in consensus.
- // Consensus-mode enabled nodes have already performed these steps,
- // during proposal finalization. They still listen to this sub,
- // in case they go out of sync and become a none-consensus node.
- if self.consensus_mode {
- let lock = self.state.read().await;
- let current = lock.consensus.time_keeper.current_slot();
- let participating = lock.consensus.participating;
- if let Some(slot) = participating {
- if current >= slot {
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "node runs in consensus mode, skipping..."
- );
- continue
- }
- }
- }
-
- info!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "Received block: {}",
- info.blockhash()
- );
-
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "Processing received block"
- );
- /*
- let info_copy = (*info).clone();
- match self.state.write().await.receive_finalized_block(info_copy.clone()).await {
- Ok(v) => {
- if v {
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "block processed successfully, broadcasting..."
- );
- self.p2p.broadcast_with_exclude(&info_copy, &exclude_list).await;
- }
- }
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_block()",
- "error processing finalized block: {}",
- e
- );
- }
- };
- */
- }
- }
-
- async fn handle_receive_slot_request(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "START"
- );
- loop {
- let request = match self.slot_request_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "received {:?}",
- request
- );
-
- // Extra validations can be added here
- let key = request.slot;
- let slots = match self.state.read().await.blockchain.get_slots_after(key, BATCH) {
- Ok(v) => v,
- Err(e) => {
- error!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "get_slots_after fail: {}",
- e
- );
- continue
- }
- };
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "Found {} slots",
- slots.len()
- );
-
- let response = SlotResponse { slots };
- if let Err(e) = self.channel.send(&response).await {
- error!(
- target: "consensus::protocol_sync::handle_receive_slot_request()",
- "channel send fail: {}",
- e
- )
- };
- }
- }
-
- async fn handle_receive_slot(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "START"
- );
- let exclude_list = vec![self.channel.address().clone()];
- loop {
- let slot = match self.slots_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- // Check if node has finished syncing its blockchain
- if !self.state.read().await.synced {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "Node still syncing blockchain, skipping..."
- );
- continue
- }
-
- // Check if node started participating in consensus.
- // Consensus-mode enabled nodes have already performed these steps,
- // during proposal finalization. They still listen to this sub,
- // in case they go out of sync and become a none-consensus node.
- if self.consensus_mode {
- let lock = self.state.read().await;
- let current = lock.consensus.time_keeper.current_slot();
- let participating = lock.consensus.participating;
- if let Some(slot) = participating {
- if current >= slot {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "node runs in consensus mode, skipping..."
- );
- continue
- }
- }
- }
-
- info!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "Received slot: {}",
- slot.id
- );
-
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "Processing received slot"
- );
- let slot_copy = (*slot).clone();
- match self.state.write().await.receive_finalized_slots(slot_copy.clone()).await {
- Ok(v) => {
- if v {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "slot processed successfully, broadcasting..."
- );
- self.p2p.broadcast_with_exclude(&slot_copy, &exclude_list).await;
- }
- }
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync::handle_receive_slot()",
- "error processing finalized slot: {}",
- e
- );
- }
- };
- }
- }
-}
-
-#[async_trait]
-impl ProtocolBase for ProtocolSync {
- async fn start(self: Arc, executor: Arc>) -> Result<()> {
- debug!(target: "consensus::protocol_sync::start()", "START");
- self.jobsman.clone().start(executor.clone());
- self.jobsman.clone().spawn(self.clone().handle_receive_request(), executor.clone()).await;
- self.jobsman
- .clone()
- .spawn(self.clone().handle_receive_slot_request(), executor.clone())
- .await;
- self.jobsman.clone().spawn(self.clone().handle_receive_block(), executor.clone()).await;
- self.jobsman.clone().spawn(self.clone().handle_receive_slot(), executor.clone()).await;
- debug!(target: "consensus::protocol_sync::start()", "END");
- Ok(())
- }
-
- fn name(&self) -> &'static str {
- "ProtocolSync"
- }
-}
diff --git a/src/consensus/proto/protocol_sync_consensus.rs b/src/consensus/proto/protocol_sync_consensus.rs
deleted file mode 100644
index fe52e0ade..000000000
--- a/src/consensus/proto/protocol_sync_consensus.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::sync::Arc;
-
-use async_trait::async_trait;
-use log::{debug, error};
-use smol::Executor;
-
-use crate::{
- consensus::{
- state::{ConsensusRequest, ConsensusResponse, ConsensusSyncRequest, ConsensusSyncResponse},
- ValidatorStatePtr,
- },
- net::{
- ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
- ProtocolJobsManager, ProtocolJobsManagerPtr,
- },
- Result,
-};
-
-pub struct ProtocolSyncConsensus {
- channel: ChannelPtr,
- request_sub: MessageSubscription,
- sync_request_sub: MessageSubscription,
- jobsman: ProtocolJobsManagerPtr,
- state: ValidatorStatePtr,
-}
-
-impl ProtocolSyncConsensus {
- pub async fn init(
- channel: ChannelPtr,
- state: ValidatorStatePtr,
- _p2p: P2pPtr,
- ) -> Result {
- let msg_subsystem = channel.message_subsystem();
- msg_subsystem.add_dispatch::().await;
- msg_subsystem.add_dispatch::().await;
-
- let request_sub = channel.subscribe_msg::().await?;
- let sync_request_sub = channel.subscribe_msg::().await?;
-
- Ok(Arc::new(Self {
- channel: channel.clone(),
- request_sub,
- sync_request_sub,
- jobsman: ProtocolJobsManager::new("SyncConsensusProtocol", channel),
- state,
- }))
- }
-
- async fn handle_receive_request(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_request()",
- "START"
- );
- loop {
- let req = match self.request_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_request()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_request()",
- "received {:?}",
- req
- );
-
- // Extra validations can be added here.
- let lock = self.state.read().await;
- let bootstrap_slot = lock.consensus.bootstrap_slot;
- let current_slot = lock.consensus.time_keeper.current_slot();
- let mut forks = vec![];
- for fork in &lock.consensus.forks {
- forks.push(fork.clone().into());
- }
- let pending_txs = match lock.blockchain.get_pending_txs() {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_request()",
- "Failed querying pending txs store: {}",
- e
- );
- vec![]
- }
- };
- let slots = lock.consensus.slots.clone();
- let mut f_history = vec![];
- for f in &lock.consensus.f_history {
- let f_str = format!("{:}", f);
- f_history.push(f_str);
- }
- let mut err_history = vec![];
- for err in &lock.consensus.err_history {
- let err_str = format!("{:}", err);
- err_history.push(err_str);
- }
- let nullifiers = lock.consensus.nullifiers.clone();
- let response = ConsensusResponse {
- bootstrap_slot,
- current_slot,
- forks,
- pending_txs,
- slots,
- f_history,
- err_history,
- nullifiers,
- };
- if let Err(e) = self.channel.send(&response).await {
- error!(
- target: "consensus::protocol_sync_consensus::handle_receive_request()",
- "channel send fail: {}",
- e
- );
- };
- }
- }
-
- async fn handle_receive_sync_request(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
- "START"
- );
- loop {
- let req = match self.sync_request_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- debug!(
- target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
- "received {:?}",
- req
- );
-
- // Extra validations can be added here.
- let lock = self.state.read().await;
- let bootstrap_slot = lock.consensus.bootstrap_slot;
- let proposing = lock.consensus.proposing;
- let is_empty = lock.consensus.slots_is_empty();
- let response = ConsensusSyncResponse { bootstrap_slot, proposing, is_empty };
- if let Err(e) = self.channel.send(&response).await {
- error!(
- target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
- "channel send fail: {}",
- e
- );
- };
- }
- }
-}
-
-#[async_trait]
-impl ProtocolBase for ProtocolSyncConsensus {
- async fn start(self: Arc, executor: Arc>) -> Result<()> {
- debug!(
- target: "consensus::protocol_sync_consensus::start()",
- "START"
- );
- self.jobsman.clone().start(executor.clone());
- self.jobsman.clone().spawn(self.clone().handle_receive_request(), executor.clone()).await;
- self.jobsman
- .clone()
- .spawn(self.clone().handle_receive_sync_request(), executor.clone())
- .await;
- debug!(
- target: "consensus::protocol_sync_consensus::start()",
- "END"
- );
- Ok(())
- }
-
- fn name(&self) -> &'static str {
- "ProtocolSyncConsensus"
- }
-}
diff --git a/src/consensus/proto/protocol_tx.rs b/src/consensus/proto/protocol_tx.rs
deleted file mode 100644
index f90eb56f2..000000000
--- a/src/consensus/proto/protocol_tx.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::sync::Arc;
-
-use async_trait::async_trait;
-use log::debug;
-use smol::Executor;
-use url::Url;
-
-use crate::{
- consensus::ValidatorStatePtr,
- impl_p2p_message,
- net::{
- ChannelPtr, Message, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
- ProtocolJobsManager, ProtocolJobsManagerPtr,
- },
- tx::Transaction,
- Result,
-};
-
-impl_p2p_message!(Transaction, "tx");
-
-pub struct ProtocolTx {
- tx_sub: MessageSubscription,
- jobsman: ProtocolJobsManagerPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- channel_address: Url,
-}
-
-impl ProtocolTx {
- pub async fn init(
- channel: ChannelPtr,
- state: ValidatorStatePtr,
- p2p: P2pPtr,
- ) -> Result {
- debug!(
- target: "consensus::protocol_tx::init()",
- "Adding ProtocolTx to the protocol registry"
- );
- let msg_subsystem = channel.message_subsystem();
- msg_subsystem.add_dispatch::().await;
-
- let tx_sub = channel.subscribe_msg::().await?;
-
- Ok(Arc::new(Self {
- tx_sub,
- jobsman: ProtocolJobsManager::new("TxProtocol", channel.clone()),
- state,
- p2p,
- channel_address: channel.address().clone(),
- }))
- }
-
- async fn handle_receive_tx(self: Arc) -> Result<()> {
- debug!(
- target: "consensus::protocol_tx::handle_receive_tx()",
- "START"
- );
- let exclude_list = vec![self.channel_address.clone()];
- loop {
- let tx = match self.tx_sub.receive().await {
- Ok(v) => v,
- Err(e) => {
- debug!(
- target: "consensus::protocol_tx::handle_receive_tx()",
- "recv fail: {}",
- e
- );
- continue
- }
- };
-
- // Check if node has finished syncing its blockchain
- if !self.state.read().await.synced {
- debug!(
- target: "consensus::protocol_tx::handle_receive_tx()",
- "Node still syncing blockchain, skipping..."
- );
- continue
- }
-
- let tx_copy = (*tx).clone();
-
- // Nodes use unconfirmed_txs vector as seen_txs pool.
- if self.state.write().await.append_tx(tx_copy.clone()).await {
- self.p2p.broadcast_with_exclude(&tx_copy, &exclude_list).await;
- }
- }
- }
-}
-
-#[async_trait]
-impl ProtocolBase for ProtocolTx {
- async fn start(self: Arc, executor: Arc>) -> Result<()> {
- debug!(target: "consensus::protocol_tx::start()", "START");
- self.jobsman.clone().start(executor.clone());
- self.jobsman.clone().spawn(self.clone().handle_receive_tx(), executor.clone()).await;
- debug!(target: "consensus::protocol_tx::start()", "END");
- Ok(())
- }
-
- fn name(&self) -> &'static str {
- "ProtocolTx"
- }
-}
diff --git a/src/consensus/rcpt.rs b/src/consensus/rcpt.rs
deleted file mode 100644
index 6b8925883..000000000
--- a/src/consensus/rcpt.rs
+++ /dev/null
@@ -1,95 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use crypto_api_chachapoly::ChachaPolyIetf;
-use darkfi_sdk::{
- crypto::{
- diffie_hellman::{kdf_sapling, sapling_ka_agree},
- keypair::PublicKey,
- SecretKey,
- },
- pasta::pallas,
-};
-use darkfi_serial::{async_trait, Decodable, Encodable, SerialDecodable, SerialEncodable};
-use rand::rngs::OsRng;
-
-use crate::Error;
-
-/// transfered lead coin is rcpt into two coins,
-/// first coin is transfered rcpt coin.
-/// second coin is the change returning to sender, or different address.
-#[derive(Debug, Clone, Copy, Eq, PartialEq, SerialEncodable, SerialDecodable)]
-pub struct TxRcpt {
- /// rcpt coin nonce
- pub rho: pallas::Base,
- /// rcpt coin commitment opening
- pub opening: pallas::Scalar,
- /// rcpt coin value
- pub value: u64,
-}
-
-pub const PLAINTEXT_SIZE: usize = 32 + 32 + 8;
-pub const AEAD_TAG_SIZE: usize = 16;
-pub const CIPHER_SIZE: usize = PLAINTEXT_SIZE + AEAD_TAG_SIZE;
-
-impl TxRcpt {
- /// encrypt received coin, by recipient public key
- pub fn encrypt(&self, public: &PublicKey) -> EncryptedTxRcpt {
- let ephem_secret = SecretKey::random(&mut OsRng);
- let ephem_public = PublicKey::from_secret(ephem_secret);
- let shared_secret = sapling_ka_agree(&ephem_secret, public);
- let key = kdf_sapling(&shared_secret, &ephem_public);
-
- let mut input = Vec::new();
- self.encode(&mut input).unwrap();
-
- let mut ciphertext = [0u8; CIPHER_SIZE];
- assert_eq!(
- ChachaPolyIetf::aead_cipher()
- .seal_to(&mut ciphertext, &input, &[], key.as_ref(), &[0u8; 12])
- .unwrap(),
- CIPHER_SIZE
- );
-
- EncryptedTxRcpt { ciphertext, ephem_public }
- }
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
-pub struct EncryptedTxRcpt {
- ciphertext: [u8; CIPHER_SIZE],
- ephem_public: PublicKey,
-}
-
-impl EncryptedTxRcpt {
- pub fn decrypt(&self, secret: &SecretKey) -> TxRcpt {
- let shared_secret = sapling_ka_agree(secret, &self.ephem_public);
- let key = kdf_sapling(&shared_secret, &self.ephem_public);
-
- let mut plaintext = [0; CIPHER_SIZE];
- assert_eq!(
- ChachaPolyIetf::aead_cipher()
- .open_to(&mut plaintext, &self.ciphertext, &[], key.as_ref(), &[0u8; 12])
- .map_err(|_| Error::TxRcptDecryptionError)
- .unwrap(),
- PLAINTEXT_SIZE
- );
-
- TxRcpt::decode(&plaintext[..]).unwrap()
- }
-}
diff --git a/src/consensus/state.rs b/src/consensus/state.rs
deleted file mode 100644
index 09f111870..000000000
--- a/src/consensus/state.rs
+++ /dev/null
@@ -1,898 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_sdk::{
- blockchain::{PidOutput, PreviousSlot, Slot},
- crypto::MerkleTree,
- pasta::{group::ff::PrimeField, pallas},
-};
-use darkfi_serial::{async_trait, deserialize, serialize, SerialDecodable, SerialEncodable};
-use log::info;
-use rand::{thread_rng, Rng};
-
-use super::{
- constants,
- lead_coin::{LeadCoin, LeadCoinSecrets},
- utils::fbig2base,
- Block, BlockProposal, Float10,
-};
-use crate::{
- blockchain::Blockchain,
- impl_p2p_message,
- net::Message,
- tx::Transaction,
- util::time::{TimeKeeper, Timestamp},
- wallet::WalletPtr,
- Error, Result,
-};
-
-use std::{
- fs::File,
- io::{prelude::*, BufWriter},
-};
-
-/// This struct represents the information required by the consensus algorithm
-pub struct ConsensusState {
- /// Wallet interface
- pub wallet: WalletPtr,
- /// Canonical (finalized) blockchain
- pub blockchain: Blockchain,
- /// Network bootstrap timestamp
- pub bootstrap_ts: Timestamp,
- /// Helper structure to calculate time related operations
- pub time_keeper: TimeKeeper,
- /// Genesis block hash
- pub genesis_block: blake3::Hash,
- /// Total sum of initial staking coins
- pub initial_distribution: u64,
- /// Flag to enable single-node mode
- pub single_node: bool,
- /// Slot the network was bootstrapped
- pub bootstrap_slot: u64,
- /// Participating start slot
- pub participating: Option,
- /// Node is able to propose proposals
- pub proposing: bool,
- /// Last slot node check for finalization
- pub checked_finalization: u64,
- /// Fork chains containing block proposals
- pub forks: Vec,
- /// Current epoch
- pub epoch: u64,
- /// Hot/live slots
- pub slots: Vec,
- /// Last slot leaders count
- pub previous_leaders: u64,
- /// Controller output history
- pub f_history: Vec,
- /// Controller proportional error history
- pub err_history: Vec,
- // TODO: Aren't these already in db after finalization?
- /// Canonical competing coins
- pub coins: Vec,
- /// Canonical coin commitments tree
- pub coins_tree: MerkleTree,
- /// Canonical seen nullifiers from proposals
- pub nullifiers: Vec,
-}
-
-impl ConsensusState {
- pub fn new(
- wallet: WalletPtr,
- blockchain: Blockchain,
- bootstrap_ts: Timestamp,
- genesis_ts: Timestamp,
- genesis_data: blake3::Hash,
- initial_distribution: u64,
- single_node: bool,
- ) -> Self {
- let genesis_block = Block::genesis_block(genesis_ts, genesis_data).blockhash();
- let time_keeper =
- TimeKeeper::new(genesis_ts, constants::EPOCH_LENGTH as u64, constants::SLOT_TIME, 0);
- Self {
- wallet,
- blockchain,
- bootstrap_ts,
- time_keeper,
- genesis_block,
- initial_distribution,
- single_node,
- bootstrap_slot: 0,
- participating: None,
- proposing: false,
- checked_finalization: 0,
- forks: vec![],
- epoch: 0,
- slots: vec![],
- previous_leaders: 0,
- f_history: vec![constants::FLOAT10_ZERO.clone()],
- err_history: vec![constants::FLOAT10_ZERO.clone(), constants::FLOAT10_ZERO.clone()],
- coins: vec![],
- coins_tree: MerkleTree::new(constants::EPOCH_LENGTH * 100),
- nullifiers: vec![],
- }
- }
-
- /// Finds the last slot a proposal or block was generated.
- pub fn last_slot(&self) -> Result {
- let mut slot = 0;
- for chain in &self.forks {
- for state_checkpoint in &chain.sequence {
- if state_checkpoint.proposal.block.header.slot > slot {
- slot = state_checkpoint.proposal.block.header.slot;
- }
- }
- }
-
- // We return here in case proposals exist,
- // so we don't query the sled database.
- if slot > 0 {
- return Ok(slot)
- }
-
- let (last_slot, _) = self.blockchain.last()?;
- Ok(last_slot)
- }
-
- /// Set participating slot to next.
- pub fn set_participating(&mut self) -> Result<()> {
- self.participating = Some(self.time_keeper.current_slot() + 1);
- Ok(())
- }
-
- /// Generate current slot
- fn generate_slot(
- &mut self,
- fork_hashes: Vec,
- fork_previous_hashes: Vec,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- ) {
- let id = self.time_keeper.current_slot();
- let previous = PreviousSlot::new(0, fork_hashes, fork_previous_hashes, 0.0);
- let pid = PidOutput::new(0.0, 0.0, sigma1, sigma2);
- let slot = Slot::new(id, previous, pid, self.get_last_eta(), 0, 0);
- info!(target: "consensus::state", "generate_slot: {:?}", slot);
- self.slots.push(slot);
- }
-
- // Initialize node lead coins and set current epoch and eta.
- pub async fn init_coins(&mut self) -> Result<()> {
- self.epoch = self.time_keeper.current_epoch();
- self.coins = self.create_coins().await?;
- self.update_forks_checkpoints();
- Ok(())
- }
-
- /// Check if new epoch has started and generate slot.
- /// Returns flag to signify if epoch has changed.
- pub async fn epoch_changed(
- &mut self,
- fork_hashes: Vec,
- fork_previous_hashes: Vec,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- ) -> Result {
- self.generate_slot(fork_hashes, fork_previous_hashes, sigma1, sigma2);
- let epoch = self.time_keeper.current_epoch();
- if epoch <= self.epoch {
- return Ok(false)
- }
- self.epoch = epoch;
-
- Ok(true)
- }
-
- /// Return 2-term target approximation sigma coefficients.
- pub fn sigmas(&mut self) -> (pallas::Base, pallas::Base) {
- let f = self.win_inv_prob_with_full_stake();
- let total_stake = self.total_stake();
- let total_sigma = Float10::try_from(total_stake).unwrap();
- self.calc_sigmas(f, total_sigma)
- }
-
- fn calc_sigmas(&self, f: Float10, total_sigma: Float10) -> (pallas::Base, pallas::Base) {
- info!(target: "consensus::state", "sigmas(): f: {}", f);
- info!(target: "consensus::state", "sigmas(): total network stake: {:}", total_sigma);
-
- let one = constants::FLOAT10_ONE.clone();
- let neg_one = constants::FLOAT10_NEG_ONE.clone();
- let two = constants::FLOAT10_TWO.clone();
-
- let field_p = Float10::try_from(constants::P).unwrap();
-
- let x = one - f;
- let c = x.ln();
- let neg_c = neg_one * c;
-
- let sigma1_fbig = neg_c.clone() /
- (total_sigma.clone() + constants::FLOAT10_EPSILON.clone()) *
- field_p.clone();
- info!(target: "consensus::state", "sigma1_fbig: {:}", sigma1_fbig);
- let sigma1 = fbig2base(sigma1_fbig);
-
- let sigma2_fbig = (neg_c / (total_sigma + constants::FLOAT10_EPSILON.clone()))
- .powf(two.clone()) *
- (field_p / two);
- info!(target: "consensus::state", "sigma2_fbig: {:}", sigma2_fbig);
- let sigma2 = fbig2base(sigma2_fbig);
-
- (sigma1, sigma2)
- }
-
- /// Generate coins for provided sigmas.
- /// NOTE: The strategy here is having a single competing coin per slot.
- // TODO: DRK coin need to be burned, and consensus coin to be minted.
- async fn create_coins(&mut self) -> Result> {
- // TODO: cleanup LeadCoinSecrets, no need to keep a vector
- let (seeds, epoch_secrets) = {
- let mut rng = thread_rng();
- let mut seeds: Vec = Vec::with_capacity(constants::EPOCH_LENGTH);
- for _ in 0..constants::EPOCH_LENGTH {
- seeds.push(rng.gen());
- }
- (seeds, LeadCoinSecrets::generate())
- };
-
- // LeadCoin matrix containing node competing coins.
- let mut coins: Vec = Vec::with_capacity(constants::EPOCH_LENGTH);
-
- // Retrieve coin from wallet
- // NOTE: In future this will be retrieved from the money contract.
-
- // Execute the query and see if we find any rows
- let query_str = format!("SELECT * FROM {}", constants::CONSENSUS_COIN_TABLE);
- let wallet_conn = self.wallet.conn.lock().await;
- let mut stmt = wallet_conn.prepare(&query_str)?;
-
- let coin = stmt.query_row((), |row| {
- let bytes: Vec = row.get(constants::CONSENSUS_COIN_COL)?;
- let coin = deserialize(&bytes).unwrap();
- Ok(coin)
- });
-
- stmt.finalize()?;
-
- let coin = match coin {
- Ok(c) => c,
- Err(_) => {
- // If no records are found, we generate a new coin and save it to the database
- info!(target: "consensus::state", "create_coins(): No LeadCoin was found in DB, generating new one...");
- // Temporarily, we compete with fixed stake.
- // This stake should be based on how many nodes we want to run, and they all
- // must sum to initial distribution total coins.
- //let stake = self.initial_distribution;
- let c = LeadCoin::new(
- 0,
- self.time_keeper.current_slot(),
- epoch_secrets.secret_keys[0].inner(),
- epoch_secrets.merkle_roots[0],
- 0,
- epoch_secrets.merkle_paths[0].clone(),
- pallas::Base::from(seeds[0]),
- &mut self.coins_tree,
- );
- let query_str = format!(
- "INSERT INTO {} ({}) VALUES (?1);",
- constants::CONSENSUS_COIN_TABLE,
- constants::CONSENSUS_COIN_COL
- );
- let mut stmt = wallet_conn.prepare(&query_str)?;
- stmt.execute([serialize(&c)])?;
- c
- }
- };
-
- info!(target: "consensus::state", "create_coins(): Will use LeadCoin with value: {}", coin.value);
- coins.push(coin);
-
- Ok(coins)
- }
-
- /// Leadership reward, assuming constant reward
- /// TODO (res) implement reward mechanism with accord to DRK,DARK token-economics
- fn reward(&self) -> u64 {
- constants::REWARD
- }
-
- /// Auxillary function to calculate total slot rewards.
- fn slot_rewards(&self) -> u64 {
- // Retrieve existing blocks excluding genesis
- let blocks = (self.blockchain.len() as u64) - 1;
- // Retrieve longest fork length, to include those proposals in the calculation
- let max_fork_length = self.longest_chain_length() as u64;
- // Calculate rewarded slots
- let rewarded_slots = blocks + max_fork_length;
-
- rewarded_slots * self.reward()
- }
-
- /// Network total stake, assuming constant reward.
- /// Only used for fine-tuning. At genesis epoch first slot, of absolute index 0,
- /// if no stake was distributed, the total stake would be 0.
- /// To avoid division by zero, we asume total stake at first division is GENESIS_TOTAL_STAKE(1).
- fn total_stake(&self) -> u64 {
- let total_stake = self.slot_rewards() + self.initial_distribution;
- if total_stake == 0 {
- return constants::GENESIS_TOTAL_STAKE
- }
- total_stake
- }
-
- fn f_err(&mut self) -> Float10 {
- info!(target: "consensus::state", "Previous leaders: {}", self.previous_leaders);
- // Write counter to file
- let mut count_str: String = self.previous_leaders.to_string();
- count_str.push(',');
- let f =
- File::options().append(true).create(true).open(constants::LEADER_HISTORY_LOG).unwrap();
- {
- let mut writer = BufWriter::new(f);
- let _ = writer.write(&count_str.into_bytes()).unwrap();
- }
- // Calculate feedback
- let feedback = Float10::try_from(self.previous_leaders as i64).unwrap();
- // Reset previous leaders counter
- self.previous_leaders = 0;
- let target = constants::FLOAT10_ONE.clone();
- target - feedback
- }
-
- fn discrete_pid(&mut self) -> Float10 {
- let k1 = constants::KP.clone() + constants::KI.clone() + constants::KD.clone();
- let k2 = constants::FLOAT10_NEG_ONE.clone() * constants::KP.clone() +
- constants::FLOAT10_NEG_TWO.clone() * constants::KD.clone();
- let k3 = constants::KD.clone();
- let f_len = self.f_history.len();
- let err = self.f_err();
- let err_len = self.err_history.len();
- let ret = self.f_history[f_len - 1].clone() +
- k1.clone() * err.clone() +
- k2.clone() * self.err_history[err_len - 1].clone() +
- k3.clone() * self.err_history[err_len - 2].clone();
- info!(target: "consensus::state", "pid::f-1: {:}", self.f_history[f_len - 1].clone());
- info!(target: "consensus::state", "pid::err: {:}", err);
- info!(target: "consensus::state", "pid::err-1: {}", self.err_history[err_len - 1].clone());
- info!(target: "consensus::state", "pid::err-2: {}", self.err_history[err_len - 2].clone());
- info!(target: "consensus::state", "pid::k1: {}", k1);
- info!(target: "consensus::state", "pid::k2: {}", k2);
- info!(target: "consensus::state", "pid::k3: {}", k3);
- self.err_history.push(err);
- ret
- }
- /// the probability inverse of winning lottery having all the stake
- /// returns f
- fn win_inv_prob_with_full_stake(&mut self) -> Float10 {
- let mut f = self.discrete_pid();
- if f <= constants::FLOAT10_ZERO.clone() {
- f = constants::MIN_F.clone()
- } else if f >= constants::FLOAT10_ONE.clone() {
- f = constants::MAX_F.clone()
- }
- // log f history
- let file =
- File::options().append(true).create(true).open(constants::F_HISTORY_LOG).unwrap();
- {
- let mut f_history = format!("{:}", f);
- f_history.push(',');
- let mut writer = BufWriter::new(file);
- let _ = writer.write(&f_history.into_bytes()).unwrap();
- }
- self.f_history.push(f.clone());
- f
- }
-
- /// Check that the participant/stakeholder coins win the slot lottery.
- /// If the stakeholder has multiple competing winning coins, only the highest value
- /// coin is selected, since the stakeholder can't give more than one proof per block/slot.
- /// * 'sigma1', 'sigma2': slot sigmas
- /// Returns: (check: bool, idx: usize) where idx is the winning coin's index
- pub fn is_slot_leader(
- &mut self,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- ) -> (bool, i64, usize) {
- // Check if node can produce proposals
- if !self.proposing {
- return (false, 0, 0)
- }
-
- let fork_index = self.longest_chain_index();
- let competing_coins = if fork_index == -1 {
- self.coins.clone()
- } else {
- self.forks[fork_index as usize].sequence.last().unwrap().coins.clone()
- };
-
- // If on single-node mode, node always proposes by extending the
- // single fork it holds.
- if self.single_node {
- return (true, fork_index, 0)
- }
-
- let mut won = false;
- let mut highest_stake = 0;
- let mut highest_stake_idx = 0;
- let total_stake = self.total_stake();
- for (winning_idx, coin) in competing_coins.iter().enumerate() {
- info!(target: "consensus::state", "is_slot_leader: coin stake: {:?}", coin.value);
- info!(target: "consensus::state", "is_slot_leader: total stake: {}", total_stake);
- info!(target: "consensus::state", "is_slot_leader: relative stake: {}", (coin.value as f64) / total_stake as f64);
-
- let first_winning = coin.is_leader(
- sigma1,
- sigma2,
- self.get_last_eta(),
- pallas::Base::from(self.time_keeper.current_slot()),
- );
-
- if first_winning && !won {
- highest_stake_idx = winning_idx;
- }
-
- won |= first_winning;
- if won && coin.value > highest_stake {
- highest_stake = coin.value;
- highest_stake_idx = winning_idx;
- }
- }
-
- (won, fork_index, highest_stake_idx)
- }
-
- /// Finds the longest forkchain the node holds and
- /// returns its index.
- pub fn longest_chain_index(&self) -> i64 {
- let mut length = 0;
- let mut index = -1;
-
- if !self.forks.is_empty() {
- for (i, chain) in self.forks.iter().enumerate() {
- if chain.sequence.len() > length {
- length = chain.sequence.len();
- index = i as i64;
- }
- }
- }
-
- index
- }
-
- /// Finds the length of longest fork chain the node holds.
- pub fn longest_chain_length(&self) -> usize {
- let mut max = 0;
- for fork in &self.forks {
- if fork.sequence.len() > max {
- max = fork.sequence.len();
- }
- }
-
- max
- }
-
- /// Given a proposal, find the index of the fork chain it extends.
- pub fn find_extended_chain_index(&mut self, proposal: &BlockProposal) -> Result {
- // We iterate through all forks to find which fork to extend
- let mut chain_index = -1;
- let mut state_checkpoint_index = 0;
- for (c_index, chain) in self.forks.iter().enumerate() {
- // Traverse sequence in reverse
- for (sc_index, state_checkpoint) in chain.sequence.iter().enumerate().rev() {
- if proposal.block.header.previous == state_checkpoint.proposal.hash {
- chain_index = c_index as i64;
- state_checkpoint_index = sc_index;
- break
- }
- }
- if chain_index != -1 {
- break
- }
- }
-
- // If no fork was found, we check with canonical
- if chain_index == -1 {
- let (last_slot, last_block) = self.blockchain.last()?;
- if proposal.block.header.previous != last_block ||
- proposal.block.header.slot <= last_slot
- {
- info!(target: "consensus::state", "find_extended_chain_index(): Proposal doesn't extend any known chain");
- return Ok(-2)
- }
-
- // Proposal extends canonical chain
- return Ok(-1)
- }
-
- // Found fork chain
- let chain = &self.forks[chain_index as usize];
- // Proposal extends fork at last proposal
- if state_checkpoint_index == (chain.sequence.len() - 1) {
- return Ok(chain_index)
- }
-
- info!(target: "consensus::state", "find_extended_chain_index(): Proposal to fork a forkchain was received.");
- let mut chain = self.forks[chain_index as usize].clone();
- // We keep all proposals until the one it extends
- chain.sequence.drain((state_checkpoint_index + 1)..);
- self.forks.push(chain);
- Ok(self.forks.len() as i64 - 1)
- }
-
- /// Search the chains we're holding for the given proposal.
- pub fn proposal_exists(&self, input_proposal: &blake3::Hash) -> bool {
- for chain in self.forks.iter() {
- for state_checkpoint in chain.sequence.iter().rev() {
- if input_proposal == &state_checkpoint.proposal.hash {
- return true
- }
- }
- }
-
- false
- }
-
- /// Utility function to extract leader selection lottery randomness(eta),
- /// defined as the hash of the last block, converted to pallas base.
- pub fn get_last_eta(&self) -> pallas::Base {
- let (_, hash) = self.blockchain.last().unwrap();
- let mut bytes: [u8; 32] = *hash.as_bytes();
- // Read first 254 bits
- bytes[30] = 0;
- bytes[31] = 0;
- pallas::Base::from_repr(bytes).unwrap()
- }
-
- /// Auxillary function to retrieve slot of provided slot UID.
- pub fn get_slot(&self, id: u64) -> Result {
- // Check hot/live slotz
- for slot in self.slots.iter().rev() {
- if slot.id == id {
- return Ok(slot.clone())
- }
- }
- // Check if slot is finalized
- if let Ok(slots) = self.blockchain.get_slots_by_id(&[id]) {
- if !slots.is_empty() {
- if let Some(known_slot) = &slots[0] {
- return Ok(known_slot.clone())
- }
- }
- }
- Err(Error::SlotNotFound(id))
- }
-
- /// Auxillary function to check if node has seen current or previous slots.
- /// This check ensures that either the slots exist in memory or node has seen the finalization of these slots.
- pub fn slots_is_empty(&self) -> bool {
- let current_slot = self.time_keeper.current_slot();
- if self.get_slot(current_slot).is_ok() {
- return false
- }
- let previous_slot = current_slot - 1;
- self.get_slot(previous_slot).is_err()
- }
-
- /// Auxillary function to update all fork state checkpoints to nodes coins current canonical states.
- /// Note: This function should only be invoked once on nodes' coins creation.
- pub fn update_forks_checkpoints(&mut self) {
- for fork in &mut self.forks {
- for state_checkpoint in &mut fork.sequence {
- state_checkpoint.coins = self.coins.clone();
- state_checkpoint.coins_tree = self.coins_tree.clone();
- }
- }
- }
-
- /// Retrieve current forks last proposal hashes and their previous
- /// hashes. If node holds no fork, retrieve last canonical hash.
- pub fn fork_hashes(&self) -> (Vec, Vec) {
- let mut hashes = vec![];
- let mut previous_hashes = vec![];
- for fork in &self.forks {
- let proposal = &fork.sequence.last().unwrap().proposal;
- hashes.push(proposal.hash);
- previous_hashes.push(proposal.block.header.previous);
- }
-
- if hashes.is_empty() {
- hashes.push(self.genesis_block);
- previous_hashes.push(self.genesis_block);
- }
-
- (hashes, previous_hashes)
- }
-
- /// Auxiliary structure to reset consensus state for a resync
- pub fn reset(&mut self) {
- self.participating = None;
- self.proposing = false;
- self.forks = vec![];
- self.slots = vec![];
- self.previous_leaders = 0;
- self.f_history = vec![constants::FLOAT10_ZERO.clone()];
- self.err_history = vec![constants::FLOAT10_ZERO.clone(), constants::FLOAT10_ZERO.clone()];
- self.nullifiers = vec![];
- }
-}
-
-/// Auxiliary structure used for consensus syncing.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct ConsensusRequest {}
-impl_p2p_message!(ConsensusRequest, "consensusrequest");
-
-/// Auxiliary structure used for consensus syncing.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct ConsensusResponse {
- /// Slot the network was bootstrapped
- pub bootstrap_slot: u64,
- /// Current slot
- pub current_slot: u64,
- /// Hot/live data used by the consensus algorithm
- pub forks: Vec,
- /// Pending transactions
- pub pending_txs: Vec,
- /// Hot/live slots
- pub slots: Vec,
- // TODO: When Float10 supports encoding/decoding this should be
- // replaced by directly using Vec
- /// Controller output history
- pub f_history: Vec,
- /// Controller proportional error history
- pub err_history: Vec,
- /// Seen nullifiers from proposals
- pub nullifiers: Vec,
-}
-
-impl_p2p_message!(ConsensusResponse, "consensusresponse");
-
-/// Auxiliary structure used for consensus syncing.
-#[derive(Debug, SerialEncodable, SerialDecodable)]
-pub struct ConsensusSyncRequest {}
-
-impl_p2p_message!(ConsensusSyncRequest, "consensussyncrequest");
-
-/// Auxiliary structure used for consensus syncing.
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct ConsensusSyncResponse {
- /// Node known bootstrap slot
- pub bootstrap_slot: u64,
- /// Node is able to propose proposals
- pub proposing: bool,
- /// Node has hot/live slots
- pub is_empty: bool,
-}
-
-impl_p2p_message!(ConsensusSyncResponse, "consensussyncresponse");
-impl_p2p_message!(Slot, "slot");
-
-/// Auxiliary structure used for slots syncing
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct SlotRequest {
- /// Slot UID
- pub slot: u64,
-}
-
-impl_p2p_message!(SlotRequest, "slotrequest");
-
-/// Auxiliary structure used for slots syncing
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct SlotResponse {
- /// Response blocks.
- pub slots: Vec,
-}
-
-impl_p2p_message!(SlotResponse, "slotresponse");
-
-/// Auxiliary structure used to keep track of consensus state checkpoints.
-#[derive(Debug, Clone)]
-pub struct StateCheckpoint {
- /// Block proposal
- pub proposal: BlockProposal,
- /// Node competing coins current state
- pub coins: Vec,
- /// Coin commitments tree current state
- pub coins_tree: MerkleTree,
- /// Seen nullifiers from proposals current state
- pub nullifiers: Vec,
-}
-
-impl StateCheckpoint {
- pub fn new(
- proposal: BlockProposal,
- coins: Vec,
- coins_tree: MerkleTree,
- nullifiers: Vec,
- ) -> Self {
- Self { proposal, coins, coins_tree, nullifiers }
- }
-}
-
-/// Auxiliary structure used for forked consensus state checkpoints syncing
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct StateCheckpointInfo {
- /// Block proposal
- pub proposal: BlockProposal,
- /// Seen nullifiers from proposals current state
- pub nullifiers: Vec,
-}
-
-impl From for StateCheckpointInfo {
- fn from(state_checkpoint: StateCheckpoint) -> Self {
- Self { proposal: state_checkpoint.proposal, nullifiers: state_checkpoint.nullifiers }
- }
-}
-
-impl From for StateCheckpoint {
- fn from(state_checkpoint_info: StateCheckpointInfo) -> Self {
- Self {
- proposal: state_checkpoint_info.proposal,
- coins: vec![],
- coins_tree: MerkleTree::new(constants::EPOCH_LENGTH * 100),
- nullifiers: state_checkpoint_info.nullifiers,
- }
- }
-}
-
-/// This struct represents a sequence of consensus state checkpoints.
-#[derive(Debug, Clone)]
-pub struct Fork {
- pub genesis_block: blake3::Hash,
- pub sequence: Vec,
-}
-
-impl Fork {
- pub fn new(genesis_block: blake3::Hash, initial_state_checkpoint: StateCheckpoint) -> Self {
- Self { genesis_block, sequence: vec![initial_state_checkpoint] }
- }
-
- /// Insertion of a valid state checkpoint.
- pub fn add(&mut self, state_checkpoint: &StateCheckpoint) {
- if self.check_state_checkpoint(state_checkpoint, self.sequence.last().unwrap()) {
- self.sequence.push(state_checkpoint.clone());
- }
- }
-
- /// A fork chain is considered valid when every state checkpoint is valid,
- /// based on the `check_state_checkpoint` function
- pub fn check_chain(&self) -> bool {
- for (index, state_checkpoint) in self.sequence[1..].iter().enumerate() {
- if !self.check_state_checkpoint(state_checkpoint, &self.sequence[index]) {
- return false
- }
- }
-
- true
- }
-
- /// A state checkpoint is considered valid when its proposal parent hash is equal to the
- /// hash of the previous checkpoint's proposal and their slots are incremental,
- /// excluding the genesis block proposal.
- pub fn check_state_checkpoint(
- &self,
- state_checkpoint: &StateCheckpoint,
- previous: &StateCheckpoint,
- ) -> bool {
- if state_checkpoint.proposal.block.header.previous == self.genesis_block {
- info!(target: "consensus::state", "check_checkpoint(): Genesis block proposal provided.");
- return false
- }
-
- if state_checkpoint.proposal.block.header.previous != previous.proposal.hash ||
- state_checkpoint.proposal.block.header.slot <= previous.proposal.block.header.slot
- {
- info!(target: "consensus::state", "check_checkpoint(): Provided state checkpoint proposal is invalid.");
- return false
- }
-
- // TODO: validate rest checkpoint info(like nullifiers)
-
- true
- }
-}
-
-/// Auxiliary structure used for forks syncing
-#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
-pub struct ForkInfo {
- pub genesis_block: blake3::Hash,
- pub sequence: Vec,
-}
-
-impl From for ForkInfo {
- fn from(fork: Fork) -> Self {
- let mut sequence = vec![];
- for state_checkpoint in fork.sequence {
- sequence.push(state_checkpoint.into());
- }
- Self { genesis_block: fork.genesis_block, sequence }
- }
-}
-
-impl From for Fork {
- fn from(fork_info: ForkInfo) -> Self {
- let mut sequence = vec![];
- for checkpoint in fork_info.sequence {
- sequence.push(checkpoint.into());
- }
- Self { genesis_block: fork_info.genesis_block, sequence }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::{
- consensus::{
- state::{Blockchain, ConsensusState},
- utils::fbig2base,
- Float10, TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES,
- TESTNET_GENESIS_TIMESTAMP, TESTNET_INITIAL_DISTRIBUTION,
- },
- wallet::WalletDb,
- };
-
- #[test]
- fn calc_sigmas_test() {
- smol::block_on(async {
- // Generate dummy state
- let wallet = WalletDb::new(None, None).unwrap();
- let sled_db = sled::Config::new().temporary(true).open().unwrap();
- let blockchain = Blockchain::new(&sled_db).unwrap();
- let state = ConsensusState::new(
- wallet,
- blockchain,
- *TESTNET_BOOTSTRAP_TIMESTAMP,
- *TESTNET_GENESIS_TIMESTAMP,
- *TESTNET_GENESIS_HASH_BYTES,
- *TESTNET_INITIAL_DISTRIBUTION,
- true,
- );
-
- let precision_diff = Float10::try_from(
- "10000000000000000000000000000000000000000000000000000000000000000000000000",
- )
- .unwrap();
- let precision_diff_base = fbig2base(precision_diff);
- let f = Float10::try_from("0.01").unwrap();
- let total_stake = Float10::try_from("100").unwrap();
- let (sigma1, sigma2) = state.calc_sigmas(f, total_stake);
- let sigma1_rhs = Float10::try_from(
- "2909373465034095801035568917399197865646520818579502832252119592405565440",
- )
- .unwrap();
- let sigma1_rhs_base = fbig2base(sigma1_rhs);
- let sigma2_rhs = Float10::try_from(
- "9137556389643100714432609642916129738741963230846798778430644027392",
- )
- .unwrap();
- let sigma2_rhs_base = fbig2base(sigma2_rhs);
- let sigma1_delta = if sigma1_rhs_base > sigma1 {
- sigma1_rhs_base - sigma1
- } else {
- sigma1 - sigma1_rhs_base
- };
- let sigma2_delta = if sigma2_rhs_base > sigma2 {
- sigma2_rhs_base - sigma2
- } else {
- sigma2 - sigma2_rhs_base
- };
- //note! test cases were generated by low precision python scripts.
- //https://github.com/ertosns/lotterysim/blob/master/pallas_unittests.csv
- assert!(sigma1_delta < precision_diff_base);
- assert!(sigma2_delta < precision_diff_base);
- });
- }
-}
diff --git a/src/consensus/stx.rs b/src/consensus/stx.rs
deleted file mode 100644
index 4fe905d8b..000000000
--- a/src/consensus/stx.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_sdk::{
- crypto::MerkleNode,
- pasta::{arithmetic::CurveAffine, group::Curve, pallas},
-};
-use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
-
-use crate::{
- zk::{proof::VerifyingKey, Proof},
- Error, Result,
-};
-
-#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
-pub struct TransferStx {
- /// sender's coin, or coin1_commitment in zk
- pub coin_commitment: pallas::Point,
- /// sender's coin pk
- pub coin_pk: pallas::Base,
- /// sender's coin sk's root
- pub coin_root_sk: MerkleNode,
- /// coin3_commitment in zk
- pub change_coin_commitment: pallas::Point,
- /// coin4_commitment in zk
- pub transfered_coin_commitment: pallas::Point,
- /// nullifiers coin1_nullifier
- pub nullifier: pallas::Base,
- /// sk coin creation slot
- pub slot: pallas::Base,
- /// root to coin's commitments
- pub root: MerkleNode,
- /// transfer proof
- pub proof: Proof,
-}
-
-impl TransferStx {
- /// verify the transfer proof.
- pub fn verify(&self, vk: VerifyingKey) -> Result<()> {
- if self.proof.verify(&vk, &self.public_inputs()).is_err() {
- return Err(Error::TransferTxVerification)
- }
- Ok(())
- }
-
- /// arrange public inputs from Stxfer
- pub fn public_inputs(&self) -> Vec {
- let cm1 = self.coin_commitment.to_affine().coordinates().unwrap();
- let cm3 = self.change_coin_commitment.to_affine().coordinates().unwrap();
- let cm4 = self.transfered_coin_commitment.to_affine().coordinates().unwrap();
- vec![
- self.coin_pk,
- *cm1.x(),
- *cm1.y(),
- *cm3.x(),
- *cm3.y(),
- *cm4.x(),
- *cm4.y(),
- self.root.inner(),
- self.coin_root_sk.inner(),
- self.nullifier,
- ]
- }
-}
diff --git a/src/consensus/task/block_sync.rs b/src/consensus/task/block_sync.rs
deleted file mode 100644
index 8a19b2fa9..000000000
--- a/src/consensus/task/block_sync.rs
+++ /dev/null
@@ -1,123 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use crate::{
- consensus::{
- block::{BlockOrder, BlockResponse},
- state::{SlotRequest, SlotResponse},
- ValidatorStatePtr,
- },
- net, Result,
-};
-use log::{debug, info, warn};
-
-/// async task used for block syncing.
-pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Result<()> {
- info!(target: "consensus::block_sync", "Starting blockchain sync...");
- // Getting a random connected channel to ask from peers
- match p2p.random_channel().await {
- Some(channel) => {
- let msg_subsystem = channel.message_subsystem();
-
- // Communication setup for slots
- msg_subsystem.add_dispatch::().await;
- let slot_response_sub = channel.subscribe_msg::().await?;
-
- // Communication setup for blocks
- msg_subsystem.add_dispatch::().await;
- let block_response_sub = channel.subscribe_msg::().await?;
-
- // Node loops until both slots and blocks have been synced
- let mut slots_synced = false;
- let mut blocks_synced = false;
- loop {
- // Node sends the last known slot of the canonical blockchain
- // and loops until the response is the same slot (used to utilize batch requests).
- let mut last = state.read().await.blockchain.last_slot()?;
- info!(target: "consensus::block_sync", "Last known slot: {:?}", last.id);
-
- loop {
- // Node creates a `SlotRequest` and sends it
- let request = SlotRequest { slot: last.id };
- channel.send(&request).await?;
-
- // Node stores response data.
- let resp = slot_response_sub.receive().await?;
-
- // Verify and store retrieveds
- debug!(target: "consensus::block_sync", "block_sync_task(): Processing received slots");
- state.write().await.receive_slots(&resp.slots).await?;
-
- let last_received = state.read().await.blockchain.last_slot()?;
- info!(target: "consensus::block_sync", "Last received slot: {:?}", last_received.id);
-
- if last.id == last_received.id {
- break
- }
-
- blocks_synced = false;
- last = last_received;
- }
-
- // We force a recheck of slots after blocks have been synced
- if blocks_synced {
- slots_synced = true;
- }
-
- // Node sends the last known block hash of the canonical blockchain
- // and loops until the response is the same block (used to utilize
- // batch requests).
- let mut last = state.read().await.blockchain.last()?;
- info!(target: "consensus::block_sync", "Last known block: {:?} - {:?}", last.0, last.1);
-
- loop {
- // Node creates a `BlockOrder` and sends it
- let order = BlockOrder { slot: last.0, block: last.1 };
- channel.send(&order).await?;
-
- // Node stores response data.
- let _resp = block_response_sub.receive().await?;
-
- // Verify and store retrieved blocks
- debug!(target: "consensus::block_sync", "block_sync_task(): Processing received blocks");
- //state.write().await.receive_sync_blocks(&resp.blocks).await?;
-
- let last_received = state.read().await.blockchain.last()?;
- info!(target: "consensus::block_sync", "Last received block: {:?} - {:?}", last_received.0, last_received.1);
-
- if last == last_received {
- blocks_synced = true;
- break
- }
-
- slots_synced = false;
- last = last_received;
- }
-
- if slots_synced && blocks_synced {
- break
- }
- }
- }
- None => warn!(target: "consensus::block_sync", "Node is not connected to other nodes"),
- };
-
- state.write().await.synced = true;
- info!(target: "consensus::block_sync", "Blockchain synced!");
- Ok(())
-}
diff --git a/src/consensus/task/consensus_sync.rs b/src/consensus/task/consensus_sync.rs
deleted file mode 100644
index eff8c4b3a..000000000
--- a/src/consensus/task/consensus_sync.rs
+++ /dev/null
@@ -1,160 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use log::{info, warn};
-
-use crate::{
- consensus::{
- state::{ConsensusRequest, ConsensusResponse, ConsensusSyncRequest, ConsensusSyncResponse},
- Float10, ValidatorStatePtr,
- },
- net::P2pPtr,
- system::sleep,
- Result,
-};
-
-/// async task used for consensus state syncing.
-/// Returns flag if node is not connected to other peers or consensus hasn't started,
-/// so it can immediately start proposing proposals.
-pub async fn consensus_sync_task(p2p: P2pPtr, state: ValidatorStatePtr) -> Result {
- info!(target: "consensus::consensus_sync", "Starting consensus state sync...");
- let current_slot = state.read().await.consensus.time_keeper.current_slot();
- // Loop through connected channels
- let channels = p2p.channels().await;
- if channels.is_empty() {
- warn!(target: "consensus::consensus_sync", "Node is not connected to other nodes");
- let mut lock = state.write().await;
- lock.consensus.bootstrap_slot = current_slot;
- lock.consensus.init_coins().await?;
- info!(target: "consensus::consensus_sync", "Consensus state synced!");
- return Ok(true)
- }
-
- // Node iterates the channel peers to check if at least on peer has seen slots
- let mut peer = None;
- for channel in channels {
- // Communication setup
- let msg_subsystem = channel.message_subsystem();
- msg_subsystem.add_dispatch::().await;
- let response_sub = channel.subscribe_msg::().await?;
- // Node creates a `ConsensusSyncRequest` and sends it
- let request = ConsensusSyncRequest {};
- channel.send(&request).await?;
-
- // Node checks response
- let response = response_sub.receive().await?;
- if response.bootstrap_slot == current_slot {
- warn!(target: "consensus::consensus_sync", "Network was just bootstraped, checking rest nodes");
- continue
- }
- if !response.proposing {
- warn!(target: "consensus::consensus_sync", "Node is not proposing, checking rest nodes");
- continue
- }
- if response.is_empty {
- warn!(target: "consensus::consensus_sync", "Node has not seen any slots, retrying...");
- continue
- }
- // Keep peer to ask for consensus state
- peer = Some(channel.clone());
- break
- }
-
- // If no peer knows about any slots, that means that the network was bootstrapped or restarted
- // and no node has started consensus.
- if peer.is_none() {
- warn!(target: "consensus::consensus_sync", "No node that has seen any slots was found, or network was just boostrapped.");
- let mut lock = state.write().await;
- lock.consensus.bootstrap_slot = current_slot;
- lock.consensus.init_coins().await?;
- info!(target: "consensus::consensus_sync", "Consensus state synced!");
- return Ok(true)
- }
- let peer = peer.unwrap();
-
- // Listen for next finalization
- info!(target: "consensus::consensus_sync", "Waiting for next finalization...");
- let subscriber = state.read().await.subscribers.get("blocks").unwrap().clone();
- let subscription = subscriber.sub.subscribe().await;
- subscription.receive().await;
- subscription.unsubscribe().await;
-
- // After finalization occurs, sync our consensus state.
- // This ensures that the received state always consists of 1 fork with one proposal.
- info!(target: "consensus::consensus_sync", "Finalization signal received, requesting consensus state...");
- // Communication setup
- let msg_subsystem = peer.message_subsystem();
- msg_subsystem.add_dispatch::().await;
- let response_sub = peer.subscribe_msg::().await?;
- // Node creates a `ConsensusRequest` and sends it
- peer.send(&ConsensusRequest {}).await?;
-
- // Node verifies response came from a participating node.
- // Extra validations can be added here.
- let mut response = response_sub.receive().await?;
- // Verify that peer has finished finalizing forks
- loop {
- if !response.forks.is_empty() {
- warn!(target: "consensus::consensus_sync", "Peer has not finished finalization, retrying...");
- sleep(1).await;
- peer.send(&ConsensusRequest {}).await?;
- response = response_sub.receive().await?;
- continue
- }
- break
- }
-
- // Verify that the node has received all finalized blocks
- loop {
- if !state.read().await.blockchain.has_slot_order(response.current_slot)? {
- warn!(target: "consensus::consensus_sync", "Node has not finished finalization, retrying...");
- sleep(1).await;
- continue
- }
- break
- }
-
- // Node stores response data.
- let mut lock = state.write().await;
- let mut forks = vec![];
- for fork in &response.forks {
- forks.push(fork.clone().into());
- }
- lock.consensus.bootstrap_slot = response.bootstrap_slot;
- lock.consensus.forks = forks;
- lock.append_pending_txs(&response.pending_txs).await;
- lock.consensus.slots = response.slots.clone();
- lock.consensus.previous_leaders = 1;
- let mut f_history = vec![];
- for f in &response.f_history {
- let f_float = Float10::try_from(f.as_str()).unwrap();
- f_history.push(f_float);
- }
- lock.consensus.f_history = f_history;
- let mut err_history = vec![];
- for err in &response.err_history {
- let err_float = Float10::try_from(err.as_str()).unwrap();
- err_history.push(err_float);
- }
- lock.consensus.err_history = err_history;
- lock.consensus.nullifiers = response.nullifiers.clone();
- lock.consensus.init_coins().await?;
-
- info!(target: "consensus::consensus_sync", "Consensus state synced!");
- Ok(false)
-}
diff --git a/src/consensus/task/mod.rs b/src/consensus/task/mod.rs
deleted file mode 100644
index a0e633297..000000000
--- a/src/consensus/task/mod.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-// TODO: Handle ? with matches in these files. They should be robust.
-
-mod block_sync;
-pub use block_sync::block_sync_task;
-
-mod consensus_sync;
-pub use consensus_sync::consensus_sync_task;
-
-mod proposal;
-pub use proposal::proposal_task;
diff --git a/src/consensus/task/proposal.rs b/src/consensus/task/proposal.rs
deleted file mode 100644
index df6ac3fa2..000000000
--- a/src/consensus/task/proposal.rs
+++ /dev/null
@@ -1,335 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::sync::Arc;
-
-use log::{debug, error, info, warn};
-
-use super::consensus_sync_task;
-use crate::{
- consensus::{constants, ValidatorStatePtr},
- net::P2pPtr,
- system::sleep,
- util::time::Timestamp,
- Result,
-};
-
-/// async task used for participating in the consensus protocol
-pub async fn proposal_task(
- consensus_p2p: P2pPtr,
- sync_p2p: P2pPtr,
- state: ValidatorStatePtr,
- ex: Arc>,
-) -> Result<()> {
- // Check if network is configured to start in the future,
- // otherwise wait for current or next slot finalization period for optimal sync conditions.
- // NOTE: Network beign configured to start in the future should always be the case
- // when bootstrapping or restarting a network.
- let current_ts = Timestamp::current_time();
- let bootstrap_ts = state.read().await.consensus.bootstrap_ts;
- if current_ts < bootstrap_ts {
- let diff = bootstrap_ts.0 - current_ts.0;
- info!(target: "consensus::proposal", "consensus: Waiting for network bootstrap: {} seconds", diff);
- sleep(diff).await;
- } else {
- let mut sleep_time = state.read().await.consensus.time_keeper.next_n_slot_start(1);
- let sync_offset = constants::FINAL_SYNC_DUR;
- loop {
- if sleep_time > sync_offset {
- sleep_time -= sync_offset;
- break
- }
- info!(target: "consensus::proposal", "consensus: Waiting for next slot ({:?})", sleep_time);
- sleep(sleep_time).await;
- sleep_time = state.read().await.consensus.time_keeper.next_n_slot_start(1);
- }
- info!(target: "consensus::proposal", "consensus: Waiting for finalization sync period ({:?})", sleep_time);
- sleep(sleep_time).await;
- }
-
- let mut retries = 0;
- // Sync loop
- loop {
- // Resetting consensus state, so node can still follow the finalized blocks by
- // the sync p2p network/protocols
- state.write().await.consensus.reset();
-
- // Checking sync retries
- if retries > constants::SYNC_MAX_RETRIES {
- error!(target: "consensus::proposal", "consensus: Node reached max sync retries ({}) due to not being able to follow up with consensus processing.", constants::SYNC_MAX_RETRIES);
- warn!(target: "consensus::proposal", "consensus: Terminating consensus participation.");
- break
- }
-
- // Node syncs its consensus state
- match consensus_sync_task(consensus_p2p.clone(), state.clone()).await {
- Ok(p) => {
- // Check if node is not connected to other nodes and can
- // start proposing immediately.
- if p {
- info!(target: "consensus::proposal", "consensus: Node can start proposing!");
- state.write().await.consensus.proposing = p;
- }
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Failed syncing consensus state: {}. Quitting consensus.", e);
- // TODO: Perhaps notify over a channel in order to
- // stop consensus p2p protocols.
- return Ok(())
- }
- };
-
- // Node modifies its participating slot to next.
- match state.write().await.consensus.set_participating() {
- Ok(()) => {
- info!(target: "consensus::proposal", "consensus: Node will start participating in the next slot")
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Failed to set participation slot: {}", e)
- }
- }
-
- // Record epoch we start the consensus loop
- let start_epoch = state.read().await.consensus.time_keeper.current_epoch();
-
- // Start executing consensus
- consensus_loop(consensus_p2p.clone(), sync_p2p.clone(), state.clone(), ex.clone()).await;
-
- // Reset retries counter if more epochs have passed than sync retries duration
- let break_epoch = state.read().await.consensus.time_keeper.current_epoch();
- if (break_epoch - start_epoch) > constants::SYNC_RETRIES_DURATION {
- retries = 0;
- }
-
- // Increase retries count on consensus loop break
- retries += 1;
- }
-
- Ok(())
-}
-
-/// Consensus protocol loop
-async fn consensus_loop(
- consensus_p2p: P2pPtr,
- sync_p2p: P2pPtr,
- state: ValidatorStatePtr,
- ex: Arc>,
-) {
- // Note: when a node can start produce proposals is only enforced in code,
- // where we verify if the hardware can keep up with the consensus, by
- // counting how many consecutive slots node successfully listened and process
- // everything. Additionally, we check each proposer coin creation slot to be
- // greater than an epoch length. Later, this will be enforced via contract,
- // where it will be explicit when a node can produce proposals,
- // and after which slot they can be considered as valid.
- let mut listened_slots = 0;
- let mut changed_status = false;
- loop {
- // Check if node can start proposing.
- // This code ensures that we only change the status once
- // and listened_slots doesn't increment further.
- if listened_slots > constants::EPOCH_LENGTH {
- if !changed_status {
- info!(target: "consensus::proposal", "consensus: Node can start proposing!");
- state.write().await.consensus.proposing = true;
- changed_status = true;
- }
- } else {
- listened_slots += 1;
- }
-
- // Node waits and execute consensus protocol propose period.
- if propose_period(consensus_p2p.clone(), state.clone()).await {
- // Node needs to resync
- warn!(
- target: "consensus::proposal",
- "consensus: Node missed slot {} due to proposal processing, resyncing...",
- state.read().await.consensus.time_keeper.current_slot()
- );
- break
- }
-
- // Node waits and execute consensus protocol finalization period.
- if finalization_period(sync_p2p.clone(), state.clone(), ex.clone()).await {
- // Node needs to resync
- warn!(
- target: "consensus::proposal",
- "consensus: Node missed slot {} due to finalizated blocks processing, resyncing...",
- state.read().await.consensus.time_keeper.current_slot()
- );
- break
- }
- }
-}
-
-/// async function to wait and execute consensus protocol propose period.
-/// Propose period consists of 2 parts:
-/// - Generate current slot
-/// - Check if slot leader to generate and broadcast proposal
-/// Returns flag in case node needs to resync.
-async fn propose_period(consensus_p2p: P2pPtr, state: ValidatorStatePtr) -> bool {
- // Node sleeps until next slot
- let seconds_next_slot = state.read().await.consensus.time_keeper.next_n_slot_start(1);
- info!(target: "consensus::proposal", "consensus: Waiting for next slot ({} sec)", seconds_next_slot);
- sleep(seconds_next_slot).await;
-
- // Keep a record of slot to verify if next slot got skipped during processing
- let processing_slot = state.read().await.consensus.time_keeper.current_slot();
-
- // Retrieve current forks last and second to last hash
- let (fork_hashes, fork_previous_hashes) = state.read().await.consensus.fork_hashes();
-
- // Retrieve slot sigmas
- let (sigma1, sigma2) = state.write().await.consensus.sigmas();
- // Node checks if epoch has changed and generate slot
- let epoch_changed = state
- .write()
- .await
- .consensus
- .epoch_changed(fork_hashes, fork_previous_hashes, sigma1, sigma2)
- .await;
- match epoch_changed {
- Ok(changed) => {
- if changed {
- info!(target: "consensus::proposal", "consensus: New epoch started: {}", state.read().await.consensus.epoch);
- }
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Epoch check failed: {}", e);
- return false
- }
- };
-
- // Node checks if it's the slot leader to generate a new proposal
- // for that slot.
- let (won, fork_index, coin_index) =
- state.write().await.consensus.is_slot_leader(sigma1, sigma2);
- let result = if won {
- state.write().await.propose(processing_slot, fork_index, coin_index, sigma1, sigma2).await
- } else {
- Ok(None)
- };
- let (proposal, coin, derived_blind) = match result {
- Ok(pair) => {
- if pair.is_none() {
- info!(target: "consensus::proposal", "consensus: Node is not the slot lead");
- return false
- }
- pair.unwrap()
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Block proposal failed: {}", e);
- return false
- }
- };
-
- // Node checks if it missed finalization period due to proposal creation
- let next_slot_start = state.read().await.consensus.time_keeper.next_n_slot_start(1);
- if next_slot_start <= constants::FINAL_SYNC_DUR {
- warn!(
- target: "consensus::proposal",
- "consensus: Node missed slot {} finalization period due to proposal creation, resyncing...",
- state.read().await.consensus.time_keeper.current_slot()
- );
- return true
- }
-
- // Node stores the proposal and broadcast to rest nodes
- info!(target: "consensus::proposal", "consensus: Node is the slot leader: Proposed block: {}", proposal);
- debug!(target: "consensus::proposal", "consensus: Full proposal: {:?}", proposal);
- match state
- .write()
- .await
- .receive_proposal(&proposal, Some((coin_index, coin, derived_blind)))
- .await
- {
- Ok(_) => {
- // Here we don't have to check to broadcast, because the flag
- // will always be true, since the node is able to produce proposals
- info!(target: "consensus::proposal", "consensus: Block proposal saved successfully");
- // Broadcast proposal to other consensus nodes
- consensus_p2p.broadcast(&proposal).await;
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Block proposal save failed: {}", e);
- }
- }
-
- // Verify node didn't skip next slot
- processing_slot != state.read().await.consensus.time_keeper.current_slot()
-}
-
-/// async function to wait and execute consensus protocol finalization period.
-/// Returns flag in case node needs to resync.
-async fn finalization_period(
- _sync_p2p: P2pPtr,
- state: ValidatorStatePtr,
- _ex: Arc>,
-) -> bool {
- // Node sleeps until finalization sync period starts
- let next_slot_start = state.read().await.consensus.time_keeper.next_n_slot_start(1);
- if next_slot_start > constants::FINAL_SYNC_DUR {
- let seconds_sync_period = next_slot_start - constants::FINAL_SYNC_DUR;
- info!(target: "consensus::proposal", "consensus: Waiting for finalization sync period ({} sec)", seconds_sync_period);
- sleep(seconds_sync_period).await;
- } else {
- warn!(
- target: "consensus::proposal",
- "consensus: Node missed slot {} finalization period due to proposals processing, resyncing...",
- state.read().await.consensus.time_keeper.current_slot()
- );
- return true
- }
-
- // Keep a record of slot to verify if next slot got skipped during processing
- let completed_slot = state.read().await.consensus.time_keeper.current_slot();
-
- // Check if any forks can be finalized
- /*
- match state.write().await.chain_finalization().await {
- Ok((to_broadcast_block, to_broadcast_slots)) => {
- // Broadcasting in background
- if !to_broadcast_block.is_empty() || !to_broadcast_slots.is_empty() {
- ex.spawn(async move {
- // Broadcast finalized blocks info, if any:
- info!(target: "consensus::proposal", "consensus: Broadcasting finalized blocks");
- for info in to_broadcast_block {
- sync_p2p.broadcast(&info).await;
- }
-
- // Broadcast finalized slots, if any:
- info!(target: "consensus::proposal", "consensus: Broadcasting finalized slots");
- for slot in to_broadcast_slots {
- sync_p2p.broadcast(slot).await;
- info!(target: "consensus::proposal", "consensus: Broadcasted slot");
- // TODO: You can give an error if you query P2P and check if there are any connected channels
- }
- })
- .detach();
- } else {
- info!(target: "consensus::proposal", "consensus: No finalized blocks or slots to broadcast");
- }
- }
- Err(e) => {
- error!(target: "consensus::proposal", "consensus: Finalization check failed: {}", e);
- }
- }
- */
- // Verify node didn't skip next slot
- completed_slot != state.read().await.consensus.time_keeper.current_slot()
-}
diff --git a/src/consensus/tx.rs b/src/consensus/tx.rs
deleted file mode 100644
index 28833082d..000000000
--- a/src/consensus/tx.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
-
-use crate::consensus::{EncryptedTxRcpt, TransferStx};
-
-/// transfer transaction
-#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
-pub struct Tx {
- pub xfer: TransferStx,
- pub cipher: EncryptedTxRcpt,
-}
diff --git a/src/consensus/types.rs b/src/consensus/types.rs
deleted file mode 100644
index a7500e3cf..000000000
--- a/src/consensus/types.rs
+++ /dev/null
@@ -1,118 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-//! Type aliases used in the consensus codebase.
-use std::ops::{Add, AddAssign, Div, Mul, Sub};
-
-use dashu::{
- base::Abs,
- float::{round::mode::Zero, FBig, Repr},
-};
-
-use super::constants::RADIX_BITS;
-
-const B: u64 = 10;
-
-#[derive(Clone, PartialEq, PartialOrd, Debug)]
-pub struct Float10(FBig);
-
-impl Float10 {
- pub fn repr(&self) -> &Repr {
- self.0.repr()
- }
-
- pub fn abs(&self) -> Self {
- Self(self.0.clone().abs())
- }
-
- pub fn powf(&self, exp: Self) -> Self {
- Self(self.0.powf(&exp.0))
- }
-
- pub fn ln(&self) -> Self {
- Self(self.0.ln())
- }
-}
-
-impl Add for Float10 {
- type Output = Self;
-
- fn add(self, other: Self) -> Self {
- Self(self.0 + other.0)
- }
-}
-
-impl AddAssign for Float10 {
- fn add_assign(&mut self, other: Self) {
- *self = Self(self.0.clone() + other.0);
- }
-}
-
-impl Sub for Float10 {
- type Output = Self;
-
- fn sub(self, other: Self) -> Self {
- Self(self.0 - other.0)
- }
-}
-
-impl Mul for Float10 {
- type Output = Self;
-
- fn mul(self, other: Self) -> Self {
- Self(self.0 * other.0)
- }
-}
-
-impl Div for Float10 {
- type Output = Self;
-
- fn div(self, other: Self) -> Self {
- Self(self.0 / other.0)
- }
-}
-
-impl std::fmt::Display for Float10 {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(f, "{}", self.0)
- }
-}
-
-impl TryFrom<&str> for Float10 {
- type Error = crate::Error;
-
- fn try_from(value: &str) -> Result {
- Ok(Self(FBig::from_str_native(value)?.with_precision(RADIX_BITS).value()))
- }
-}
-
-impl TryFrom for Float10 {
- type Error = crate::Error;
-
- fn try_from(value: u64) -> Result {
- Ok(Self(FBig::from(value)))
- }
-}
-
-impl TryFrom for Float10 {
- type Error = crate::Error;
-
- fn try_from(value: i64) -> Result {
- Ok(Self(FBig::from(value)))
- }
-}
diff --git a/src/consensus/utils.rs b/src/consensus/utils.rs
deleted file mode 100644
index c1b680bc4..000000000
--- a/src/consensus/utils.rs
+++ /dev/null
@@ -1,111 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use darkfi_sdk::pasta::{group::ff::PrimeField, pallas};
-use dashu::integer::{IBig, Sign, UBig};
-use log::debug;
-
-use super::Float10;
-
-pub fn fbig2ibig(f: Float10) -> IBig {
- let rad = IBig::from(10);
- let sig = f.repr().significand();
- let exp = f.repr().exponent();
-
- let val: IBig = if exp >= 0 {
- sig.clone() * rad.pow(exp.unsigned_abs())
- } else {
- sig.clone() / rad.pow(exp.unsigned_abs())
- };
-
- val
-}
-
-/// note! nagative values in pallas field won't wraps, and won't
-/// convert back to same value.
-pub fn fbig2base(f: Float10) -> pallas::Base {
- debug!(target: "consensus::utils", "fbig -> base (f): {}", f);
- let val: IBig = fbig2ibig(f);
- let (sign, word) = val.as_sign_words();
- let mut words: [u64; 4] = [0, 0, 0, 0];
- words[..word.len()].copy_from_slice(word);
- match sign {
- Sign::Positive => pallas::Base::from_raw(words),
- Sign::Negative => pallas::Base::from_raw(words).neg(),
- }
-}
-
-/// note! only support positive conversion, and zero.
-/// used for testing purpose on non-negative values at the moment.
-pub fn base2ibig(base: pallas::Base) -> IBig {
- //
- let byts: [u8; 32] = base.to_repr();
- let words: [u64; 4] = [
- u64::from_le_bytes(byts[0..8].try_into().expect("")),
- u64::from_le_bytes(byts[8..16].try_into().expect("")),
- u64::from_le_bytes(byts[16..24].try_into().expect("")),
- u64::from_le_bytes(byts[24..32].try_into().expect("")),
- ];
- let uparts = UBig::from_words(&words);
- IBig::from_parts(Sign::Positive, uparts)
-}
-
-#[cfg(test)]
-mod tests {
- use dashu::integer::IBig;
-
- use crate::consensus::{
- types::Float10,
- utils::{base2ibig, fbig2base, fbig2ibig},
- };
- use darkfi_sdk::pasta::pallas;
-
- #[test]
- fn dashu_fbig2ibig() {
- let f = Float10::try_from("234234223.000").unwrap();
- let i: IBig = fbig2ibig(f);
- let sig = IBig::from(234234223);
- assert_eq!(i, sig);
- }
-
- #[test]
- fn dashu_test_base2ibig() {
- //
- let fbig: Float10 = Float10::try_from(
- "289480223093290488558927462521719769633630564819415607159546767643499676303",
- )
- .unwrap();
- let ibig = fbig2ibig(fbig.clone());
- let res_base: pallas::Base = fbig2base(fbig.clone());
- let res_ibig: IBig = base2ibig(res_base);
- assert_eq!(res_ibig, ibig);
- }
-
- #[test]
- fn dashu_test2_base2ibig() {
- //assert that field wrapping for negative values won't hold during conversions.
- let fbig: Float10 = Float10::try_from(
- "-20065240046497827215558476051577517633529246907153511707181011345840062564.87",
- )
- .unwrap();
- let ibig = fbig2ibig(fbig.clone());
- let res_base: pallas::Base = fbig2base(fbig.clone());
- let res_ibig: IBig = base2ibig(res_base);
- assert_ne!(res_ibig, ibig);
- }
-}
diff --git a/src/consensus/validator.rs b/src/consensus/validator.rs
deleted file mode 100644
index 3f25a0c13..000000000
--- a/src/consensus/validator.rs
+++ /dev/null
@@ -1,1199 +0,0 @@
-/* This file is part of DarkFi (https://dark.fi)
- *
- * Copyright (C) 2020-2024 Dyne.org foundation
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-use std::{collections::HashMap, io::Cursor, sync::Arc};
-
-use darkfi_sdk::{
- blockchain::Slot,
- crypto::{
- contract_id::{CONSENSUS_CONTRACT_ID, DAO_CONTRACT_ID, MONEY_CONTRACT_ID},
- schnorr::{SchnorrPublic, SchnorrSecret},
- MerkleNode, MerkleTree, PublicKey, SecretKey,
- },
- pasta::{group::ff::PrimeField, pallas},
-};
-use darkfi_serial::{serialize, Decodable, Encodable, WriteExt};
-use halo2_proofs::arithmetic::Field;
-use log::{debug, error, info, warn};
-use rand::rngs::OsRng;
-use smol::lock::RwLock;
-
-use crate::{
- blockchain::{BlockInfo, Blockchain, BlockchainOverlay, BlockchainOverlayPtr},
- rpc::jsonrpc::JsonSubscriber,
- runtime::vm_runtime::Runtime,
- tx::Transaction,
- util::time::{TimeKeeper, Timestamp},
- wallet::WalletPtr,
- zk::{
- proof::{ProvingKey, VerifyingKey},
- vm::ZkCircuit,
- vm_heap::empty_witnesses,
- },
- zkas::ZkBinary,
- Error, Result,
-};
-
-use super::{
- constants,
- lead_coin::LeadCoin,
- state::{ConsensusState, Fork, StateCheckpoint},
- BlockProposal, Header, LeadInfo, LeadProof,
-};
-
-/// Atomic pointer to validator state.
-pub type ValidatorStatePtr = Arc>;
-
-/// This struct represents the state of a validator node.
-pub struct ValidatorState {
- /// Leader proof proving key
- pub lead_proving_key: Option,
- /// Leader proof verifying key
- pub lead_verifying_key: VerifyingKey,
- /// Hot/Live data used by the consensus algorithm
- pub consensus: ConsensusState,
- /// Canonical (finalized) blockchain
- pub blockchain: Blockchain,
- /// A map of various subscribers exporting live info from the blockchain
- pub subscribers: HashMap<&'static str, JsonSubscriber>,
- /// Wallet interface
- pub wallet: WalletPtr,
- /// Flag signalling node has finished initial sync
- pub synced: bool,
- /// Flag to enable single-node mode
- pub single_node: bool,
-}
-
-impl ValidatorState {
- #[allow(clippy::too_many_arguments)]
- pub async fn new(
- db: &sled::Db,
- bootstrap_ts: Timestamp,
- genesis_ts: Timestamp,
- genesis_data: blake3::Hash,
- initial_distribution: u64,
- wallet: WalletPtr,
- faucet_pubkeys: Vec,
- enable_participation: bool,
- single_node: bool,
- ) -> Result {
- debug!(target: "consensus::validator", "Initializing ValidatorState");
-
- debug!(target: "consensus::validator", "Initializing wallet tables for consensus");
-
- // Initialize consensus coin table.
- // NOTE: In future this will be redundant as consensus coins will live in the money contract.
- if enable_participation {
- wallet.exec_sql(include_str!("consensus_coin.sql")).await?;
- }
-
- debug!(target: "consensus::validator", "Generating leader proof keys with k: {}", constants::LEADER_PROOF_K);
- let bincode = include_bytes!("../../proof/lead.zk.bin");
- let zkbin = ZkBinary::decode(bincode)?;
- let circuit = ZkCircuit::new(empty_witnesses(&zkbin)?, &zkbin);
-
- let lead_verifying_key = VerifyingKey::build(constants::LEADER_PROOF_K, &circuit);
- // We only need this proving key if we're going to participate in the consensus.
- let lead_proving_key = if enable_participation {
- Some(ProvingKey::build(constants::LEADER_PROOF_K, &circuit))
- } else {
- None
- };
-
- let blockchain = Blockchain::new(db)?;
- let mut genesis_block = BlockInfo::default();
- genesis_block.header.timestamp = genesis_ts;
- blockchain.add_block(&genesis_block)?;
-
- let consensus = ConsensusState::new(
- wallet.clone(),
- blockchain.clone(),
- bootstrap_ts,
- genesis_ts,
- genesis_data,
- initial_distribution,
- single_node,
- );
-
- // -----NATIVE WASM CONTRACTS-----
- // This is the current place where native contracts are being deployed.
- // When the `Blockchain` object is created, it doesn't care whether it
- // already has the contract data or not. If there's existing data, it
- // will just open the necessary db and trees, and give back what it has.
- // This means that on subsequent runs our native contracts will already
- // be in a deployed state, so what we actually do here is a redeployment.
- // This kind of operation should only modify the contract's state in case
- // it wasn't deployed before (meaning the initial run). Otherwise, it
- // shouldn't touch anything, or just potentially update the db schemas or
- // whatever is necessary. This logic should be handled in the init function
- // of the actual contract, so make sure the native contracts handle this well.
-
- // The faucet pubkeys are pubkeys which are allowed to create clear inputs
- // in the money contract.
- let money_contract_deploy_payload = serialize(&faucet_pubkeys);
- let dao_contract_deploy_payload = vec![];
- let consensus_contract_deploy_payload = vec![];
-
- let native_contracts = vec![
- (
- "Money Contract",
- *MONEY_CONTRACT_ID,
- include_bytes!("../contract/money/darkfi_money_contract.wasm").to_vec(),
- money_contract_deploy_payload,
- ),
- (
- "DAO Contract",
- *DAO_CONTRACT_ID,
- include_bytes!("../contract/dao/darkfi_dao_contract.wasm").to_vec(),
- dao_contract_deploy_payload,
- ),
- (
- "Consensus Contract",
- *CONSENSUS_CONTRACT_ID,
- include_bytes!("../contract/consensus/darkfi_consensus_contract.wasm").to_vec(),
- consensus_contract_deploy_payload,
- ),
- ];
-
- info!(target: "consensus::validator", "Deploying native wasm contracts");
- let blockchain_overlay = BlockchainOverlay::new(&blockchain)?;
- for nc in native_contracts {
- info!(target: "consensus::validator", "Deploying {} with ContractID {}", nc.0, nc.1);
- let mut runtime = Runtime::new(
- &nc.2[..],
- blockchain_overlay.clone(),
- nc.1,
- consensus.time_keeper.clone(),
- )?;
- runtime.deploy(&nc.3)?;
- info!(target: "consensus::validator", "Successfully deployed {}", nc.0);
- }
- blockchain_overlay.lock().unwrap().overlay.lock().unwrap().apply()?;
-
- info!(target: "consensus::validator", "Finished deployment of native wasm contracts");
- // -----END NATIVE WASM CONTRACTS-----
-
- // Here we initialize various subscribers that can export live consensus/blockchain data.
- let mut subscribers = HashMap::new();
- let block_subscriber = JsonSubscriber::new("blockchain.subscribe_blocks");
- let err_txs_subscriber = JsonSubscriber::new("blockchain.subscribe_err_txs");
- subscribers.insert("blocks", block_subscriber);
- subscribers.insert("err_txs", err_txs_subscriber);
-
- let state = Arc::new(RwLock::new(ValidatorState {
- lead_proving_key,
- lead_verifying_key,
- consensus,
- blockchain,
- subscribers,
- wallet,
- synced: false,
- single_node,
- }));
-
- Ok(state)
- }
-
- /// The node retrieves a transaction, validates its state transition,
- /// and appends it to the pending txs store.
- pub async fn append_tx(&mut self, tx: Transaction) -> bool {
- let tx_hash = blake3::hash(&serialize(&tx));
- let tx_in_txstore = match self.blockchain.transactions.contains(&tx_hash) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "consensus::validator", "append_tx(): Failed querying txstore: {}", e);
- return false
- }
- };
-
- let tx_in_pending_txs_store = match self.blockchain.pending_txs.contains(&tx_hash) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "consensus::validator", "append_tx(): Failed querying pending txs store: {}", e);
- return false
- }
- };
-
- if tx_in_txstore || tx_in_pending_txs_store {
- info!(target: "consensus::validator", "append_tx(): We have already seen this tx.");
- return false
- }
-
- info!(target: "consensus::validator", "append_tx(): Starting state transition validation");
- match self
- .verify_transactions(&[tx.clone()], self.consensus.time_keeper.current_slot(), false)
- .await
- {
- Ok(erroneous_txs) => {
- if !erroneous_txs.is_empty() {
- error!(target: "consensus::validator", "append_tx(): Erroneous transaction detected");
- return false
- }
- }
- Err(e) => {
- error!(target: "consensus::validator", "append_tx(): Failed to verify transaction: {}", e);
- return false
- }
- }
-
- if let Err(e) = self.blockchain.add_pending_txs(&[tx]) {
- error!(target: "consensus::validator", "append_tx(): Failed to insert transaction to pending txs store: {}", e);
- return false
- }
- info!(target: "consensus::validator", "append_tx(): Appended tx to pending txs store");
- true
- }
-
- /// The node retrieves transactions vector, validates their state transition,
- /// and appends successfull ones to the pending txs store.
- pub async fn append_pending_txs(&mut self, txs: &[Transaction]) {
- let mut filtered_txs = vec![];
- // Filter already seen transactions
- for tx in txs {
- let tx_hash = blake3::hash(&serialize(tx));
- let tx_in_txstore = match self.blockchain.transactions.contains(&tx_hash) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "consensus::validator", "append_pending_txs(): Failed querying txstore: {}", e);
- continue
- }
- };
-
- let tx_in_pending_txs_store = match self.blockchain.pending_txs.contains(&tx_hash) {
- Ok(v) => v,
- Err(e) => {
- error!(target: "consensus::validator", "append_pending_txs(): Failed querying pending txs store: {}", e);
- continue
- }
- };
-
- if tx_in_txstore || tx_in_pending_txs_store {
- info!(target: "consensus::validator", "append_pending_txs(): We have already seen this tx.");
- continue
- }
-
- filtered_txs.push(tx.clone());
- }
-
- // Verify transactions and filter erroneous ones
- info!(target: "consensus::validator", "append_pending_txs(): Starting state transition validation");
- let erroneous_txs = match self
- .verify_transactions(
- &filtered_txs[..],
- self.consensus.time_keeper.current_slot(),
- false,
- )
- .await
- {
- Ok(erroneous_txs) => erroneous_txs,
- Err(e) => {
- error!(target: "consensus::validator", "append_pending_txs(): Failed to verify transactions: {}", e);
- return
- }
- };
- if !erroneous_txs.is_empty() {
- filtered_txs.retain(|x| !erroneous_txs.contains(x));
- }
-
- if let Err(e) = self.blockchain.add_pending_txs(&filtered_txs) {
- error!(target: "consensus::validator", "append_pending_txs(): Failed to insert transactions to pending txs store: {}", e);
- return
- }
- info!(target: "consensus::validator", "append_pending_txs(): Appended tx to pending txs store");
- }
-
- /// The node removes erroneous transactions from the pending txs store.
- async fn purge_pending_txs(&self) -> Result<()> {
- info!(target: "consensus::validator", "purge_pending_txs(): Removing erroneous transactions from pending transactions store...");
- let pending_txs = self.blockchain.get_pending_txs()?;
- if pending_txs.is_empty() {
- info!(target: "consensus::validator", "purge_pending_txs(): No pending transactions found");
- return Ok(())
- }
- let erroneous_txs = self
- .verify_transactions(&pending_txs[..], self.consensus.time_keeper.current_slot(), false)
- .await?;
- if erroneous_txs.is_empty() {
- info!(target: "consensus::validator", "purge_pending_txs(): No erroneous transactions found");
- return Ok(())
- }
- info!(target: "consensus::validator", "purge_pending_txs(): Removing {} erroneous transactions...", erroneous_txs.len());
- self.blockchain.remove_pending_txs(&erroneous_txs)?;
-
- let _err_txs_subscriber = self.subscribers.get("err_txs").unwrap();
- for err_tx in erroneous_txs {
- let _tx_hash = blake3::hash(&serialize(&err_tx)).to_hex().as_str().to_string();
- info!(target: "consensus::validator", "purge_pending_txs(): Sending notification about erroneous transaction");
- // TODO: err_txs_subscriber.notify(&[tx_hash]).await;
- }
-
- Ok(())
- }
-
- /// Generate a block proposal for the current slot, containing all
- /// pending transactions. Proposal extends the longest fork
- /// chain the node is holding.
- pub async fn propose(
- &mut self,
- slot: u64,
- fork_index: i64,
- coin_index: usize,
- sigma1: pallas::Base,
- sigma2: pallas::Base,
- ) -> Result