Night of the living dead

This commit is contained in:
skoupidi
2024-01-29 21:41:01 +02:00
parent 92387af2ab
commit 16103b84a7
110 changed files with 1001 additions and 13841 deletions

26
Cargo.lock generated
View File

@@ -1931,7 +1931,6 @@ dependencies = [
"darkfi",
"darkfi-sdk",
"darkfi-serial",
"darkfi_consensus_contract",
"darkfi_dao_contract",
"darkfi_deployooor_contract",
"darkfi_money_contract",
@@ -2032,28 +2031,6 @@ dependencies = [
"url",
]
[[package]]
name = "darkfi_consensus_contract"
version = "0.4.1"
dependencies = [
"blake3 1.5.0",
"bs58",
"chacha20poly1305",
"darkfi",
"darkfi-contract-test-harness",
"darkfi-sdk",
"darkfi-serial",
"darkfi_money_contract",
"getrandom 0.2.12",
"halo2_proofs",
"log",
"rand 0.8.5",
"simplelog",
"sled",
"smol",
"thiserror",
]
[[package]]
name = "darkfi_dao_contract"
version = "0.4.1"
@@ -2114,7 +2091,7 @@ dependencies = [
]
[[package]]
name = "darkfid2"
name = "darkfid"
version = "0.4.1"
dependencies = [
"async-trait",
@@ -2124,7 +2101,6 @@ dependencies = [
"darkfi-contract-test-harness",
"darkfi-sdk",
"darkfi-serial",
"darkfi_consensus_contract",
"darkfi_money_contract",
"easy-parallel",
"log",

View File

@@ -20,8 +20,7 @@ doctest = false
[workspace]
members = [
"bin/zkas",
#"bin/darkfid",
"bin/darkfid2",
"bin/darkfid",
"bin/darkfi-mmproxy",
"bin/drk",
#"bin/faucetd",
@@ -46,7 +45,6 @@ members = [
"src/contract/test-harness",
"src/contract/money",
"src/contract/dao",
"src/contract/consensus",
"src/contract/deployooor",
"example/dchat/dchatd",

View File

@@ -18,7 +18,7 @@ PROOFS_BIN = $(PROOFS_SRC:=.bin)
# List of all binaries built
BINS = \
zkas \
darkfid2 \
darkfid \
darkfi-mmproxy \
darkirc \
genev \
@@ -42,11 +42,10 @@ $(PROOFS_BIN): zkas $(PROOFS_SRC)
contracts: zkas
$(MAKE) -C src/contract/money
$(MAKE) -C src/contract/consensus
$(MAKE) -C src/contract/dao
$(MAKE) -C src/contract/deployooor
darkfid2: contracts
darkfid: contracts
$(MAKE) -C bin/$@ \
PREFIX="$(PREFIX)" \
CARGO="$(CARGO)" \
@@ -147,11 +146,10 @@ coverage: contracts $(PROOFS_BIN)
clean:
$(MAKE) -C src/contract/money clean
$(MAKE) -C src/contract/consensus clean
$(MAKE) -C src/contract/dao clean
$(MAKE) -C src/contract/deployooor clean
$(MAKE) -C bin/zkas clean
$(MAKE) -C bin/darkfid2 clean
$(MAKE) -C bin/darkfid clean
$(MAKE) -C bin/darkfi-mmproxy clean
$(MAKE) -C bin/darkirc clean
$(MAKE) -C bin/genev/genev-cli clean

View File

@@ -9,14 +9,24 @@ license = "AGPL-3.0-only"
edition = "2021"
[dependencies]
async-trait = "0.1.77"
# Darkfi
darkfi = {path = "../../", features = ["async-daemonize", "bs58"]}
darkfi_money_contract = {path = "../../src/contract/money"}
darkfi-contract-test-harness = {path = "../../src/contract/test-harness"}
darkfi-sdk = {path = "../../src/sdk"}
darkfi-serial = {path = "../../src/serial"}
# Misc
blake3 = "1.5.0"
bs58 = "0.5.0"
darkfi = {path = "../../", features = ["async-daemonize", "validator"]}
darkfi-sdk = {path = "../../src/sdk", features = ["async"]}
darkfi-serial = {path = "../../src/serial"}
log = "0.4.20"
num-bigint = "0.4.4"
rand = "0.8.5"
sled = "0.34.7"
toml = "0.8.8"
# JSON-RPC
async-trait = "0.1.77"
tinyjson = "2.5.1"
url = "2.5.0"

View File

@@ -6,29 +6,37 @@ PREFIX = $(HOME)/.cargo
# Cargo binary
CARGO = cargo +nightly
# Compile target
RUST_TARGET = $(shell rustc -Vv | grep '^host: ' | cut -d' ' -f2)
# Uncomment when doing musl static builds
#RUSTFLAGS = -C target-feature=+crt-static -C link-self-contained=yes
SRC = \
Cargo.toml \
../../Cargo.toml \
$(shell find src -type f) \
$(shell find ../../src -type f) \
$(shell find src -type f -name '*.rs') \
$(shell find ../../src -type f -name '*.rs') \
$(shell find ../../src/contract -type f -name '*.wasm')
BIN = ../../darkfid
BIN = $(shell grep '^name = ' Cargo.toml | cut -d' ' -f3 | tr -d '"')
all: $(BIN)
$(BIN): $(SRC)
$(CARGO) build $(TARGET_PRFX)$(RUST_TARGET) --release --package darkfid
cp -f ../../target/$(RUST_TARGET)/release/darkfid $@
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --target=$(RUST_TARGET) --release --package $@
cp -f ../../target/$(RUST_TARGET)/release/$@ $@
cp -f ../../target/$(RUST_TARGET)/release/$@ ../../$@
clean:
rm -f $(BIN)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(RUST_TARGET) --release --package $(BIN)
rm -f $(BIN) ../../$(BIN)
install: all
mkdir -p $(DESTDIR)$(PREFIX)/bin
cp -f $(BIN) $(DESTDIR)$(PREFIX)/bin
chmod 755 $(DESTDIR)$(PREFIX)/bin/darkfid
chmod 755 $(DESTDIR)$(PREFIX)/bin/$(BIN)
uninstall:
rm -f $(DESTDIR)$(PREFIX)/bin/darkfid
rm -f $(DESTDIR)$(PREFIX)/bin/$(BIN)
.PHONY: all clean install uninstall

View File

@@ -6,80 +6,470 @@
## The default values are left commented. They can be overridden either by
## uncommenting, or by using the command-line.
# Chain to use (testnet, mainnet)
chain = "testnet"
# Path to the wallet database
wallet_path = "~/.config/darkfi/darkfid_wallet_testnet.db"
# Password for the wallet database
#wallet_pass = "changeme"
# Path to the blockchain database directory
database = "~/.config/darkfi/darkfid_blockchain_testnet"
# JSON-RPC listen URL
rpc_listen = "tcp://127.0.0.1:8340"
# Blockchain network to use
network = "testnet"
# Localnet blockchain network configuration
[network_config."localnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_localnet"
# Finalization threshold, denominated by number of blocks
threshold = 3
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 10
# Optional fixed PoW difficulty, used for testing
pow_fixed_difficulty = 1
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 10
# Whitelisted faucet addresses
faucet_pub = []
# Participate in the consensus protocol
consensus = false
consensus = true
# Enable single-node mode for local testing
single_node = false
# Wallet address to receive consensus rewards.
# This is a dummy one so the miner can start,
# replace with your own one.
recipient = "5ZHfYpt4mpJcwBNxfEyxLzeFJUEeoePs5NQ5jVEgHrMf"
# P2P accept addresses for the consensus protocol
#consensus_p2p_accept = ["tls://127.0.0.1:8341"]
# Skip syncing process and start node right away
skip_sync = true
# P2P external addresses for the consensus protocol
#consensus_p2p_external = ["tls://127.0.0.1:8341"]
# Enable PoS testing mode for local testing
pos_testing_mode = true
# Connection slots for the consensus protocol
#consensus_slots = 8
## Localnet sync P2P network settings
[network_config."localnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
inbound = ["tcp+tls://0.0.0.0:8242"]
# Seed nodes to connect to for the consensus protocol
#consensus_p2p_seed = []
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
#external_addrs = []
# Seed nodes JSON-RPC listen URL for clock synchronization
#consensus_seed_rpc = []
# Peer nodes to manually connect to
#peers = []
# Peers to connect to for the consensus protocol
#consensus_p2p_peer = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
#seeds = []
# Peers JSON-RPC listen URL for clock synchronization
#consensus_peer_rpc = []
# Whitelisted network transports for outbound connections
#allowed_transports = ["tcp+tls"]
# Prefered transports of outbound connections for the consensus protocol
#consensus_p2p_transports = ["tls", "tcp"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# P2P accept addresses for the syncing protocol
sync_p2p_accept = ["tls://0.0.0.0:8342"]
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# P2P external addresses for the syncing protocol
#sync_p2p_external = ["tls://127.0.0.1:8342"]
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Connection slots for the syncing protocol
sync_slots = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Seed nodes to connect to for the syncing protocol
sync_p2p_seed = ["tls://lilith0.dark.fi:8342", "tls://lilith1.dark.fi:8342"]
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Peers to connect to for the syncing protocol
#sync_p2p_peer = []
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Prefered transports of outbound connections for the syncing protocol
sync_p2p_transports = ["tls"]
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Enable localnet hosts
localnet = false
# Allow localnet hosts
localnet = true
# Enable channel log
#channel_log = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Whitelisted cashier addresses
#cashier_pub = []
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Localnet consensus P2P network settings
[network_config."localnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
#inbound = ["tcp+tls://0.0.0.0:8241"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
#external_addrs = []
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
#seeds = []
# Whitelisted network transports for outbound connections
#allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = true
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
# Testnet blockchain network configuration
[network_config."testnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_testnet"
# Finalization threshold, denominated by number of blocks
threshold = 6
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 90
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 90
# Whitelisted faucet addresses
faucet_pub = ["3ce5xa3PjuQGFtTaF7AvMJp7fGxqeGRJx7zj3LCwNCkP"]
# Verify system clock is correct
#clock_sync = true
# Participate in the consensus protocol
consensus = false
# Wallet address to receive consensus rewards
#recipient = "YOUR_WALLET_ADDRESS_HERE"
# Skip syncing process and start node right away
skip_sync = false
# Enable PoS testing mode for local testing
pos_testing_mode = false
## Testnet sync P2P network settings
[network_config."testnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8342"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8342"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8342", "tcp+tls://[::]:8342"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8342"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342", "tcp+tls://[ipv6 address here]:8342"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8342", "tcp+tls://lilith1.dark.fi:8342"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
outbound_connections = 8
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Testnet consensus P2P network settings
[network_config."testnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8341"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8341"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8341", "tcp+tls://[::]:8341"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8341"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341", "tcp+tls://[ipv6 address here]:8341"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8341", "tcp+tls://lilith1.dark.fi:8341"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
# Mainnet blockchain network configuration
[network_config."mainnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_mainnet"
# Finalization threshold, denominated by number of blocks
threshold = 11
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 90
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 90
# Whitelisted faucet addresses
faucet_pub = []
# Participate in the consensus protocol
consensus = false
# Wallet address to receive consensus rewards
#recipient = "YOUR_WALLET_ADDRESS_HERE"
# Skip syncing process and start node right away
skip_sync = false
# Enable PoS testing mode for local testing
pos_testing_mode = false
## Mainnet sync P2P network settings
[network_config."mainnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8442"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8442"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8442", "tcp+tls://[::]:8442"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8442"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442", "tcp+tls://[ipv6 address here]:8442"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8442", "tcp+tls://lilith1.dark.fi:8442"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
outbound_connections = 8
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Mainnet consensus P2P network settings
[network_config."mainnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8441"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8441"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8441", "tcp+tls://[::]:8441"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8441"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441", "tcp+tls://[ipv6 address here]:8441"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8441", "tcp+tls://lilith1.dark.fi:8441"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5

View File

@@ -21,16 +21,6 @@ use darkfi::rpc::jsonrpc::{ErrorCode::ServerError, JsonError, JsonResult};
/// Custom RPC errors available for darkfid.
/// Please sort them sensefully.
pub enum RpcError {
/*
// Wallet/Key-related errors
NoRowsFoundInWallet = -32101,
Keygen = -32101,
KeypairFetch = -32102,
KeypairNotFound = -32103,
InvalidKeypair = -32104,
InvalidAddressParam = -32105,
DecryptionFailed = -32106,
*/
// Transaction-related errors
TxSimulationFail = -32110,
TxBroadcastFail = -32111,
@@ -44,20 +34,13 @@ pub enum RpcError {
// Contract-related errors
ContractZkasDbNotFound = -32200,
// Misc errors
PingFailed = -32300,
}
fn to_tuple(e: RpcError) -> (i32, String) {
let msg = match e {
/*
// Wallet/Key-related errors
RpcError::NoRowsFoundInWallet => "No queried rows found in wallet",
RpcError::Keygen => "Failed generating keypair",
RpcError::KeypairFetch => "Failed fetching keypairs from wallet",
RpcError::KeypairNotFound => "Keypair not found",
RpcError::InvalidKeypair => "Invalid keypair",
RpcError::InvalidAddressParam => "Invalid address parameter",
RpcError::DecryptionFailed => "Decryption failed",
*/
// Transaction-related errors
RpcError::TxSimulationFail => "Failed simulating transaction state change",
RpcError::TxBroadcastFail => "Failed broadcasting transaction",
@@ -68,6 +51,8 @@ fn to_tuple(e: RpcError) -> (i32, String) {
RpcError::ParseError => "Parse error",
// Contract-related errors
RpcError::ContractZkasDbNotFound => "zkas database not found for given contract",
// Misc errors
RpcError::PingFailed => "Miner daemon ping error",
};
(e as i32, msg.to_string())

View File

@@ -16,49 +16,65 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::{collections::HashSet, path::Path, str::FromStr, sync::Arc};
use async_trait::async_trait;
use darkfi_sdk::crypto::PublicKey;
use log::{error, info};
use smol::{
lock::{Mutex, MutexGuard},
stream::StreamExt,
use std::{
collections::{HashMap, HashSet},
str::FromStr,
sync::Arc,
};
use log::{error, info};
use smol::{lock::Mutex, stream::StreamExt};
use structopt_toml::{serde::Deserialize, structopt::StructOpt, StructOptToml};
use url::Url;
use darkfi::{
async_daemonize, cli_desc,
consensus::{
constants::{
MAINNET_BOOTSTRAP_TIMESTAMP, MAINNET_GENESIS_HASH_BYTES, MAINNET_GENESIS_TIMESTAMP,
MAINNET_INITIAL_DISTRIBUTION, TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES,
TESTNET_GENESIS_TIMESTAMP, TESTNET_INITIAL_DISTRIBUTION,
},
proto::{ProtocolProposal, ProtocolSync, ProtocolSyncConsensus, ProtocolTx},
task::{block_sync_task, proposal_task},
validator::ValidatorStatePtr,
ValidatorState,
},
net,
net::P2pPtr,
async_daemonize,
blockchain::BlockInfo,
cli_desc,
net::{settings::SettingsOpt, P2pPtr},
rpc::{
clock_sync::check_clock,
jsonrpc::{ErrorCode::MethodNotFound, JsonError, JsonRequest, JsonResult},
client::RpcClient,
jsonrpc::JsonSubscriber,
server::{listen_and_serve, RequestHandler},
},
system::{StoppableTask, StoppableTaskPtr},
util::path::expand_path,
wallet::{WalletDb, WalletPtr},
util::{path::expand_path, time::TimeKeeper},
validator::{utils::genesis_txs_total, Validator, ValidatorConfig, ValidatorPtr},
Error, Result,
};
use darkfi_sdk::crypto::PublicKey;
use darkfi_serial::deserialize_async;
#[cfg(test)]
mod tests;
mod error;
use error::{server_error, RpcError};
/// JSON-RPC requests handler and methods
mod rpc;
mod rpc_blockchain;
mod rpc_tx;
/// Validator async tasks
mod task;
use task::{miner_task, sync_task};
/// P2P net protocols
mod proto;
/// Utility functions
mod utils;
use utils::{parse_blockchain_config, spawn_consensus_p2p, spawn_sync_p2p};
const CONFIG_FILE: &str = "darkfid_config.toml";
const CONFIG_FILE_CONTENTS: &str = include_str!("../darkfid_config.toml");
/// Note:
/// If you change these don't forget to remove their corresponding database folder,
/// since if it already has a genesis block, provided one is ignored.
const GENESIS_BLOCK_LOCALNET: &str = include_str!("../genesis_block_localnet");
const GENESIS_BLOCK_TESTNET: &str = include_str!("../genesis_block_testnet");
const GENESIS_BLOCK_MAINNET: &str = include_str!("../genesis_block_mainnet");
#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)]
#[serde(default)]
@@ -68,109 +84,13 @@ struct Args {
/// Configuration file to use
config: Option<String>,
#[structopt(long, default_value = "testnet")]
/// Chain to use (testnet, mainnet)
chain: String,
#[structopt(long)]
/// Participate in consensus
consensus: bool,
#[structopt(long)]
/// Enable single-node mode for local testing
single_node: bool,
#[structopt(long, default_value = "~/.config/darkfi/darkfid_wallet.db")]
/// Path to wallet database
wallet_path: String,
#[structopt(long, default_value = "changeme")]
/// Password for the wallet database
wallet_pass: String,
#[structopt(long, default_value = "~/.config/darkfi/darkfid_blockchain")]
/// Path to blockchain database
database: String,
#[structopt(long, default_value = "tcp://127.0.0.1:8340")]
#[structopt(short, long, default_value = "tcp://127.0.0.1:8340")]
/// JSON-RPC listen URL
rpc_listen: Url,
#[structopt(long)]
/// P2P accept addresses for the consensus protocol (repeatable flag)
consensus_p2p_accept: Vec<Url>,
#[structopt(long)]
/// P2P external addresses for the consensus protocol (repeatable flag)
consensus_p2p_external: Vec<Url>,
#[structopt(long, default_value = "8")]
/// Connection slots for the consensus protocol
consensus_slots: usize,
#[structopt(long)]
/// Connect to peer for the consensus protocol (repeatable flag)
consensus_p2p_peer: Vec<Url>,
#[structopt(long)]
/// Peers JSON-RPC listen URL for clock synchronization (repeatable flag)
consensus_peer_rpc: Vec<Url>,
#[structopt(long)]
/// Connect to seed for the consensus protocol (repeatable flag)
consensus_p2p_seed: Vec<Url>,
#[structopt(long)]
/// Seed nodes JSON-RPC listen URL for clock synchronization (repeatable flag)
consensus_seed_rpc: Vec<Url>,
#[structopt(long)]
/// Prefered transports of outbound connections for the consensus protocol (repeatable flag)
consensus_p2p_transports: Vec<String>,
#[structopt(long)]
/// P2P accept addresses for the syncing protocol (repeatable flag)
sync_p2p_accept: Vec<Url>,
#[structopt(long)]
/// P2P external addresses for the syncing protocol (repeatable flag)
sync_p2p_external: Vec<Url>,
#[structopt(long, default_value = "8")]
/// Connection slots for the syncing protocol
sync_slots: usize,
#[structopt(long)]
/// Connect to peer for the syncing protocol (repeatable flag)
sync_p2p_peer: Vec<Url>,
#[structopt(long)]
/// Connect to seed for the syncing protocol (repeatable flag)
sync_p2p_seed: Vec<Url>,
#[structopt(long)]
/// Prefered transports of outbound connections for the syncing protocol (repeatable flag)
sync_p2p_transports: Vec<String>,
#[structopt(long)]
/// Enable localnet hosts
localnet: bool,
#[structopt(long)]
/// Enable channel log
channel_log: bool,
#[structopt(long)]
/// Whitelisted cashier public key (repeatable flag)
cashier_pub: Vec<String>,
#[structopt(long)]
/// Whitelisted faucet public key (repeatable flag)
faucet_pub: Vec<String>,
#[structopt(long)]
/// Verify system clock is correct
clock_sync: bool,
#[structopt(short, long, default_value = "testnet")]
/// Blockchain network to use
network: String,
#[structopt(short, long)]
/// Set log file to ouput into
@@ -181,268 +101,230 @@ struct Args {
verbose: u8,
}
pub struct Darkfid {
synced: Mutex<bool>, // AtomicBool is weird in Arc
consensus_p2p: Option<P2pPtr>,
sync_p2p: Option<P2pPtr>,
_wallet: WalletPtr,
validator_state: ValidatorStatePtr,
rpc_connections: Mutex<HashSet<StoppableTaskPtr>>,
/// Defines a blockchain network configuration.
/// Default values correspond to a local network.
#[derive(Clone, Debug, serde::Deserialize, structopt::StructOpt, structopt_toml::StructOptToml)]
#[structopt()]
pub struct BlockchainNetwork {
#[structopt(long, default_value = "~/.local/darkfi/darkfid_blockchain_localnet")]
/// Path to blockchain database
pub database: String,
#[structopt(long, default_value = "3")]
/// Finalization threshold, denominated by number of blocks
pub threshold: usize,
#[structopt(long, default_value = "tcp://127.0.0.1:28467")]
/// minerd JSON-RPC endpoint
pub minerd_endpoint: Url,
#[structopt(long, default_value = "10")]
/// PoW block production target, in seconds
pub pow_target: usize,
#[structopt(long)]
/// Optional fixed PoW difficulty, used for testing
pub pow_fixed_difficulty: Option<usize>,
#[structopt(long, default_value = "10")]
/// Epoch duration, denominated by number of blocks/slots
pub epoch_length: u64,
#[structopt(long, default_value = "10")]
/// PoS slot duration, in seconds
pub slot_time: u64,
#[structopt(long)]
/// Whitelisted faucet public key (repeatable flag)
pub faucet_pub: Vec<String>,
#[structopt(long)]
/// Participate in the consensus protocol
pub consensus: bool,
#[structopt(long)]
/// Wallet address to receive consensus rewards
pub recipient: Option<String>,
#[structopt(long)]
/// Skip syncing process and start node right away
pub skip_sync: bool,
#[structopt(long)]
/// Enable PoS testing mode for local testing
pub pos_testing_mode: bool,
/// Syncing network settings
#[structopt(flatten)]
pub sync_net: SettingsOpt,
/// Consensus network settings
#[structopt(flatten)]
pub consensus_net: SettingsOpt,
}
// JSON-RPC methods
mod rpc_blockchain;
mod rpc_misc;
mod rpc_tx;
mod rpc_wallet;
// Internal methods
//mod internal;
#[async_trait]
impl RequestHandler for Darkfid {
async fn handle_request(&self, req: JsonRequest) -> JsonResult {
match req.method.as_str() {
// =====================
// Miscellaneous methods
// =====================
"ping" => return self.pong(req.id, req.params).await,
"clock" => return self.misc_clock(req.id, req.params).await,
"sync_dnet_switch" => return self.misc_sync_dnet_switch(req.id, req.params).await,
"consensus_dnet_switch" => {
return self.misc_consensus_dnet_switch(req.id, req.params).await
}
// ==================
// Blockchain methods
// ==================
"blockchain.get_slot" => return self.blockchain_get_slot(req.id, req.params).await,
"blockchain.get_tx" => return self.blockchain_get_tx(req.id, req.params).await,
"blockchain.last_known_slot" => {
return self.blockchain_last_known_slot(req.id, req.params).await
}
"blockchain.subscribe_blocks" => {
return self.blockchain_subscribe_blocks(req.id, req.params).await
}
"blockchain.subscribe_err_txs" => {
return self.blockchain_subscribe_err_txs(req.id, req.params).await
}
"blockchain.lookup_zkas" => {
return self.blockchain_lookup_zkas(req.id, req.params).await
}
// ===================
// Transaction methods
// ===================
"tx.simulate" => return self.tx_simulate(req.id, req.params).await,
"tx.broadcast" => return self.tx_broadcast(req.id, req.params).await,
// ==============
// Wallet methods
// ==============
"wallet.exec_sql" => return self.wallet_exec_sql(req.id, req.params).await,
"wallet.query_row_single" => {
return self.wallet_query_row_single(req.id, req.params).await
}
"wallet.query_row_multi" => {
return self.wallet_query_row_multi(req.id, req.params).await
}
// ==============
// Invalid method
// ==============
_ => return JsonError::new(MethodNotFound, None, req.id).into(),
}
}
async fn connections_mut(&self) -> MutexGuard<'_, HashSet<StoppableTaskPtr>> {
self.rpc_connections.lock().await
}
/// Daemon structure
pub struct Darkfid {
/// Syncing P2P network pointer
sync_p2p: P2pPtr,
/// Optional consensus P2P network pointer
consensus_p2p: Option<P2pPtr>,
/// Validator(node) pointer
validator: ValidatorPtr,
/// A map of various subscribers exporting live info from the blockchain
subscribers: HashMap<&'static str, JsonSubscriber>,
/// JSON-RPC connection tracker
rpc_connections: Mutex<HashSet<StoppableTaskPtr>>,
/// JSON-RPC client to execute requests to the miner daemon
rpc_client: Option<RpcClient>,
}
impl Darkfid {
pub async fn new(
validator_state: ValidatorStatePtr,
sync_p2p: P2pPtr,
consensus_p2p: Option<P2pPtr>,
sync_p2p: Option<P2pPtr>,
_wallet: WalletPtr,
validator: ValidatorPtr,
subscribers: HashMap<&'static str, JsonSubscriber>,
rpc_client: Option<RpcClient>,
) -> Self {
Self {
synced: Mutex::new(false),
consensus_p2p,
sync_p2p,
_wallet,
validator_state,
consensus_p2p,
validator,
subscribers,
rpc_connections: Mutex::new(HashSet::new()),
rpc_client,
}
}
}
async_daemonize!(realmain);
async fn realmain(args: Args, ex: Arc<smol::Executor<'static>>) -> Result<()> {
if args.consensus && args.clock_sync {
// We verify that if peer/seed nodes are configured, their rpc config also exists
if ((!args.consensus_p2p_peer.is_empty() && args.consensus_peer_rpc.is_empty()) ||
(args.consensus_p2p_peer.is_empty() && !args.consensus_peer_rpc.is_empty())) ||
((!args.consensus_p2p_seed.is_empty() && args.consensus_seed_rpc.is_empty()) ||
(args.consensus_p2p_seed.is_empty() && !args.consensus_seed_rpc.is_empty()))
{
error!(
"Consensus peer/seed nodes misconfigured: both p2p and rpc urls must be present"
);
return Err(Error::ConfigInvalid)
info!(target: "darkfid", "Initializing DarkFi node...");
// Grab blockchain network configuration
let (blockchain_config, genesis_block) = match args.network.as_str() {
"localnet" => {
(parse_blockchain_config(args.config, "localnet").await?, GENESIS_BLOCK_LOCALNET)
}
// We verify that the system clock is valid before initializing
let peers = [&args.consensus_peer_rpc[..], &args.consensus_seed_rpc[..]].concat();
if (check_clock(&peers).await).is_err() {
error!("System clock is invalid, terminating...");
return Err(Error::InvalidClock)
};
}
// Initialize or load wallet
let wallet = WalletDb::new(Some(expand_path(&args.wallet_path)?), Some(&args.wallet_pass))?;
// Initialize or open sled database
let db_path =
Path::new(expand_path(&args.database)?.to_str().unwrap()).join(args.chain.clone());
let sled_db = sled::open(&db_path)?;
// Initialize validator state
let (bootstrap_ts, genesis_ts, genesis_data, initial_distribution) = match args.chain.as_str() {
"mainnet" => (
*MAINNET_BOOTSTRAP_TIMESTAMP,
*MAINNET_GENESIS_TIMESTAMP,
*MAINNET_GENESIS_HASH_BYTES,
*MAINNET_INITIAL_DISTRIBUTION,
),
"testnet" => (
*TESTNET_BOOTSTRAP_TIMESTAMP,
*TESTNET_GENESIS_TIMESTAMP,
*TESTNET_GENESIS_HASH_BYTES,
*TESTNET_INITIAL_DISTRIBUTION,
),
x => {
error!("Unsupported chain `{}`", x);
"testnet" => {
(parse_blockchain_config(args.config, "testnet").await?, GENESIS_BLOCK_TESTNET)
}
"mainnet" => {
(parse_blockchain_config(args.config, "mainnet").await?, GENESIS_BLOCK_MAINNET)
}
_ => {
error!("Unsupported chain `{}`", args.network);
return Err(Error::UnsupportedChain)
}
};
// Parse faucet addresses
let mut faucet_pubkeys = vec![];
for i in args.cashier_pub {
let pk = PublicKey::from_str(&i)?;
faucet_pubkeys.push(pk);
if blockchain_config.pos_testing_mode {
info!(target: "darkfid", "Node is configured to run in PoS testing mode!");
}
for i in args.faucet_pub {
let pk = PublicKey::from_str(&i)?;
faucet_pubkeys.push(pk);
// Parse the genesis block
let bytes = bs58::decode(&genesis_block.trim()).into_vec()?;
let genesis_block: BlockInfo = deserialize_async(&bytes).await?;
// Initialize or open sled database
let db_path = expand_path(&blockchain_config.database)?;
let sled_db = sled::open(&db_path)?;
// Initialize validator configuration
let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
let time_keeper = TimeKeeper::new(
genesis_block.header.timestamp,
blockchain_config.epoch_length,
blockchain_config.slot_time,
0,
);
let pow_fixed_difficulty = if let Some(diff) = blockchain_config.pow_fixed_difficulty {
info!(target: "darkfid", "Node is configured to run with fixed PoW difficulty: {}", diff);
Some(diff.into())
} else {
None
};
let config = ValidatorConfig::new(
time_keeper,
blockchain_config.threshold,
blockchain_config.pow_target,
pow_fixed_difficulty,
genesis_block,
genesis_txs_total,
vec![],
blockchain_config.pos_testing_mode,
false, // TODO: Make configurable
);
// Initialize validator
let validator = Validator::new(&sled_db, config).await?;
// Here we initialize various subscribers that can export live blockchain/consensus data.
let mut subscribers = HashMap::new();
subscribers.insert("blocks", JsonSubscriber::new("blockchain.subscribe_blocks"));
subscribers.insert("txs", JsonSubscriber::new("blockchain.subscribe_txs"));
if blockchain_config.consensus {
subscribers.insert("proposals", JsonSubscriber::new("blockchain.subscribe_proposals"));
}
if args.single_node {
info!("Node is configured to run in single-node mode!");
}
// Initialize syncing P2P network
let sync_p2p =
spawn_sync_p2p(&blockchain_config.sync_net.into(), &validator, &subscribers, ex.clone())
.await;
// Initialize validator state
let state = ValidatorState::new(
&sled_db,
bootstrap_ts,
genesis_ts,
genesis_data,
initial_distribution,
wallet.clone(),
faucet_pubkeys,
args.consensus,
args.single_node,
)
.await?;
let sync_p2p = {
info!("Registering block sync P2P protocols...");
let sync_network_settings = net::Settings {
inbound_addrs: args.sync_p2p_accept,
outbound_connections: args.sync_slots,
external_addrs: args.sync_p2p_external,
peers: args.sync_p2p_peer.clone(),
seeds: args.sync_p2p_seed.clone(),
allowed_transports: args.sync_p2p_transports,
localnet: args.localnet,
..Default::default()
// Initialize consensus P2P network
let (consensus_p2p, rpc_client) = if blockchain_config.consensus {
let Ok(rpc_client) = RpcClient::new(blockchain_config.minerd_endpoint, ex.clone()).await
else {
error!(target: "darkfid", "Failed to initialize miner daemon rpc client, check if minerd is running");
return Err(Error::RpcClientStopped)
};
let p2p = net::P2p::new(sync_network_settings, ex.clone()).await;
let registry = p2p.protocol_registry();
let _state = state.clone();
registry
.register(net::SESSION_ALL, move |channel, p2p| {
let state = _state.clone();
async move {
ProtocolSync::init(channel, state, p2p, args.consensus)
.await
.unwrap()
}
})
.await;
let _state = state.clone();
registry
.register(net::SESSION_ALL, move |channel, p2p| {
let state = _state.clone();
async move { ProtocolTx::init(channel, state, p2p).await.unwrap() }
})
.await;
Some(p2p)
(
Some(
spawn_consensus_p2p(
&blockchain_config.consensus_net.into(),
&validator,
&subscribers,
ex.clone(),
)
.await,
),
Some(rpc_client),
)
} else {
(None, None)
};
// P2P network settings for the consensus protocol
let consensus_p2p = {
if !args.consensus {
None
} else {
info!("Registering consensus P2P protocols...");
let consensus_network_settings = net::Settings {
inbound_addrs: args.consensus_p2p_accept,
outbound_connections: args.consensus_slots,
external_addrs: args.consensus_p2p_external,
peers: args.consensus_p2p_peer.clone(),
seeds: args.consensus_p2p_seed.clone(),
allowed_transports: args.consensus_p2p_transports,
localnet: args.localnet,
..Default::default()
};
let p2p = net::P2p::new(consensus_network_settings, ex.clone()).await;
let registry = p2p.protocol_registry();
let _state = state.clone();
registry
.register(net::SESSION_ALL, move |channel, p2p| {
let state = _state.clone();
async move { ProtocolProposal::init(channel, state, p2p).await.unwrap() }
})
.await;
let _state = state.clone();
registry
.register(net::SESSION_ALL, move |channel, p2p| {
let state = _state.clone();
async move { ProtocolSyncConsensus::init(channel, state, p2p).await.unwrap() }
})
.await;
Some(p2p)
}
};
// Initialize program state
let darkfid =
Darkfid::new(state.clone(), consensus_p2p.clone(), sync_p2p.clone(), wallet.clone()).await;
// Initialize node
let darkfid = Darkfid::new(
sync_p2p.clone(),
consensus_p2p.clone(),
validator.clone(),
subscribers,
rpc_client,
)
.await;
let darkfid = Arc::new(darkfid);
info!(target: "darkfid", "Node initialized successfully!");
// Pinging minerd daemon to verify it listens
if blockchain_config.consensus {
if let Err(e) = darkfid.ping_miner_daemon().await {
error!(target: "darkfid", "Failed to ping miner daemon: {}", e);
return Err(Error::RpcClientStopped)
}
}
// JSON-RPC server
info!("Starting JSON-RPC server");
info!(target: "darkfid", "Starting JSON-RPC server");
// Here we create a task variable so we can manually close the
// task later. P2P tasks don't need this since it has its own
// stop() function to shut down, also terminating the task we
// created for it.
let rpc_task = StoppableTask::new();
let darkfid_ = darkfid.clone();
rpc_task.clone().start(
@@ -457,69 +339,81 @@ async fn realmain(args: Args, ex: Arc<smol::Executor<'static>>) -> Result<()> {
ex.clone(),
);
info!("Starting sync P2P network");
sync_p2p.clone().unwrap().start().await?;
// TODO: I think this is not necessary anymore
//info!("Waiting for sync P2P outbound connections");
//sync_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?;
match block_sync_task(sync_p2p.clone().unwrap(), state.clone()).await {
Ok(()) => *darkfid.synced.lock().await = true,
Err(e) => error!("Failed syncing blockchain: {}", e),
}
info!(target: "darkfid", "Starting sync P2P network");
sync_p2p.clone().start().await?;
// Consensus protocol
let proposal_task = if args.consensus && *darkfid.synced.lock().await {
info!("Starting consensus P2P network");
if blockchain_config.consensus {
info!(target: "darkfid", "Starting consensus P2P network");
let consensus_p2p = consensus_p2p.clone().unwrap();
consensus_p2p.clone().start().await?;
} else {
info!(target: "darkfid", "Not starting consensus P2P network");
}
// TODO: I think this is not necessary anymore
//info!("Waiting for consensus P2P outbound connections");
//consensus_p2p.clone().unwrap().wait_for_outbound(ex.clone()).await?;
// Sync blockchain
if !blockchain_config.skip_sync {
sync_task(&darkfid).await?;
} else {
*darkfid.validator.synced.write().await = true;
}
// Clean node pending transactions
darkfid.validator.purge_pending_txs().await?;
// Consensus protocol
let consensus_task = if blockchain_config.consensus {
info!(target: "darkfid", "Starting consensus protocol task");
// Grab rewards recipient public key(address)
if blockchain_config.recipient.is_none() {
return Err(Error::ParseFailed("Recipient address missing"))
}
let recipient = match PublicKey::from_str(&blockchain_config.recipient.unwrap()) {
Ok(address) => address,
Err(_) => return Err(Error::InvalidAddress),
};
info!("Starting consensus protocol task");
let task = StoppableTask::new();
task.clone().start(
proposal_task(consensus_p2p.clone(), sync_p2p.clone().unwrap(), state, ex.clone()),
// Weird hack to prevent lifetimes hell
async move { miner_task(&darkfid, &recipient).await },
|res| async {
match res {
Ok(()) | Err(Error::ProposalTaskStopped) => { /* Do nothing */ }
Err(e) => error!(target: "darkfid", "Failed starting proposal task: {}", e),
Ok(()) | Err(Error::MinerTaskStopped) => { /* Do nothing */ }
Err(e) => error!(target: "darkfid", "Failed starting miner task: {}", e),
}
},
Error::ProposalTaskStopped,
Error::MinerTaskStopped,
ex.clone(),
);
Some(task)
} else {
info!("Not starting consensus P2P network");
info!(target: "darkfid", "Not participating in consensus");
None
};
// Signal handling for graceful termination.
let (signals_handler, signals_task) = SignalHandler::new(ex)?;
signals_handler.wait_termination(signals_task).await?;
info!("Caught termination signal, cleaning up and exiting...");
info!(target: "darkfid", "Caught termination signal, cleaning up and exiting...");
info!(target: "darkfid", "Stopping JSON-RPC server...");
rpc_task.stop().await;
info!(target: "darkfid", "Stopping syncing P2P network...");
sync_p2p.clone().unwrap().stop().await;
if let Some(task) = proposal_task {
info!(target: "darkfid", "Stopping proposal task...");
task.stop().await;
sync_p2p.stop().await;
if blockchain_config.consensus {
info!(target: "darkfid", "Stopping consensus P2P network...");
consensus_p2p.unwrap().stop().await;
info!(target: "darkfid", "Stopping consensus task...");
consensus_task.unwrap().stop().await;
}
info!("Flushing sled database...");
info!(target: "darkfid", "Flushing sled database...");
let flushed_bytes = sled_db.flush_async().await?;
info!("Flushed {} bytes", flushed_bytes);
info!(target: "darkfid", "Flushed {} bytes", flushed_bytes);
Ok(())
}

View File

@@ -16,10 +16,10 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::str::FromStr;
use std::{collections::HashMap, str::FromStr};
use darkfi_sdk::crypto::ContractId;
use darkfi_serial::{deserialize, serialize};
use darkfi_serial::{deserialize_async, serialize_async};
use log::{debug, error};
use tinyjson::JsonValue;
@@ -32,8 +32,7 @@ use darkfi::{
util::encoding::base64,
};
use super::Darkfid;
use crate::{server_error, RpcError};
use crate::{server_error, Darkfid, RpcError};
impl Darkfid {
// RPCAPI:
@@ -48,7 +47,7 @@ impl Darkfid {
// struct serialized into base64.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.get_slot", "params": ["0"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
// <-- {"jsonrpc": "2.0", "result": {...}, "id": 1}
pub async fn blockchain_get_slot(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
@@ -60,15 +59,10 @@ impl Darkfid {
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
let validator_state = self.validator_state.read().await;
let blocks = match validator_state.blockchain.get_blocks_by_slot(&[slot]) {
Ok(v) => {
drop(validator_state);
v
}
let blocks = match self.validator.blockchain.get_blocks_by_slot(&[slot]) {
Ok(v) => v,
Err(e) => {
error!("[RPC] blockchain.get_slot: Failed fetching block by slot: {}", e);
error!(target: "darkfid::rpc::blockchain_get_slot", "Failed fetching block by slot: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
@@ -77,7 +71,7 @@ impl Darkfid {
return server_error(RpcError::UnknownSlot, id, None)
}
let block = base64::encode(&serialize(&blocks[0]));
let block = base64::encode(&serialize_async(&blocks[0]).await);
JsonResponse::new(JsonValue::String(block), id).into()
}
@@ -96,7 +90,7 @@ impl Darkfid {
// <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
pub async fn blockchain_get_tx(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
if params.len() != 1 {
return JsonError::new(InvalidParams, None, id).into()
}
@@ -106,25 +100,19 @@ impl Darkfid {
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
let validator_state = self.validator_state.read().await;
let txs = match validator_state.blockchain.transactions.get(&[tx_hash], true) {
Ok(txs) => {
drop(validator_state);
txs
}
let txs = match self.validator.blockchain.transactions.get(&[tx_hash], true) {
Ok(txs) => txs,
Err(e) => {
error!("[RPC] blockchain.get_tx: Failed fetching tx by hash: {}", e);
error!(target: "darkfid::rpc::blockchain_get_tx", "Failed fetching tx by hash: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
// This would be an logic error somewhere
assert_eq!(txs.len(), 1);
// and strict was used during .get()
let tx = txs[0].as_ref().unwrap();
let tx_enc = base64::encode(&serialize(tx));
let tx_enc = base64::encode(&serialize_async(tx).await);
JsonResponse::new(JsonValue::String(tx_enc), id).into()
}
@@ -145,12 +133,12 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
let blockchain = { self.validator_state.read().await.blockchain.clone() };
let blockchain = self.validator.blockchain.clone();
let Ok(last_slot) = blockchain.last() else {
return JsonError::new(InternalError, None, id).into()
};
JsonResponse::new(JsonValue::String(last_slot.0.to_string()), id).into()
JsonResponse::new(JsonValue::Number(last_slot.0 as f64), id).into()
}
// RPCAPI:
@@ -166,23 +154,47 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
self.validator_state.read().await.subscribers.get("blocks").unwrap().clone().into()
self.subscribers.get("blocks").unwrap().clone().into()
}
// RPCAPI:
// Initializes a subscription to erroneous transactions notifications.
// Initializes a subscription to new incoming transactions.
// Once a subscription is established, `darkfid` will send JSON-RPC notifications of
// erroneous transactions to the subscriber.
// new incoming transactions to the subscriber.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_err_txs", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_err_txs", "params": [`tx_hash`]}
pub async fn blockchain_subscribe_err_txs(&self, id: u16, params: JsonValue) -> JsonResult {
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [`tx_hash`]}
pub async fn blockchain_subscribe_txs(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
self.validator_state.read().await.subscribers.get("err_txs").unwrap().clone().into()
self.subscribers.get("txs").unwrap().clone().into()
}
// RPCAPI:
// Initializes a subscription to new incoming proposals, asuming node participates
// in consensus. Once a subscription is established, `darkfid` will send JSON-RPC
// notifications of new incoming proposals to the subscriber.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [`blockinfo`]}
pub async fn blockchain_subscribe_proposals(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
// Since proposals subscriber is only active if we participate to consensus,
// we have to check if it actually exists in the subscribers map.
let proposals_subscriber = self.subscribers.get("proposals");
if proposals_subscriber.is_none() {
error!(target: "darkfid::rpc::blockchain_subscribe_proposals", "Proposals subscriber not found");
return JsonError::new(InternalError, None, id).into()
}
proposals_subscriber.unwrap().clone().into()
}
// RPCAPI:
@@ -193,7 +205,7 @@ impl Darkfid {
// * `array[0]`: base58-encoded contract ID string
//
// **Returns:**
// * `array[n]`: Pairs of: `zkas_namespace` string, serialized and base64-encoded
// * `array[n]`: Pairs of: `zkas_namespace` string, serialized
// [`ZkBinary`](https://darkrenaissance.github.io/darkfi/development/darkfi/zkas/decoder/struct.ZkBinary.html)
// object
//
@@ -209,12 +221,12 @@ impl Darkfid {
let contract_id = match ContractId::from_str(contract_id) {
Ok(v) => v,
Err(e) => {
error!("[RPC] blockchain.lookup_zkas: Error decoding string to ContractId: {}", e);
error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Error decoding string to ContractId: {}", e);
return JsonError::new(InvalidParams, None, id).into()
}
};
let blockchain = { self.validator_state.read().await.blockchain.clone() };
let blockchain = self.validator.blockchain.clone();
let Ok(zkas_db) = blockchain.contracts.lookup(
&blockchain.sled_db,
@@ -222,7 +234,7 @@ impl Darkfid {
SMART_CONTRACT_ZKAS_DB_NAME,
) else {
error!(
"[RPC] blockchain.lookup_zkas: Did not find zkas db for ContractId: {}",
target: "darkfid::rpc::blockchain_lookup_zkas", "Did not find zkas db for ContractId: {}",
contract_id
);
return server_error(RpcError::ContractZkasDbNotFound, id, None)
@@ -231,13 +243,13 @@ impl Darkfid {
let mut ret = vec![];
for i in zkas_db.iter() {
debug!("Iterating over zkas db");
debug!(target: "darkfid::rpc::blockchain_lookup_zkas", "Iterating over zkas db");
let Ok((zkas_ns, zkas_bytes)) = i else {
error!("Internal sled error iterating db");
error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Internal sled error iterating db");
return JsonError::new(InternalError, None, id).into()
};
let Ok(zkas_ns) = deserialize(&zkas_ns) else {
let Ok(zkas_ns) = deserialize_async(&zkas_ns).await else {
return JsonError::new(InternalError, None, id).into()
};
@@ -250,4 +262,31 @@ impl Darkfid {
JsonResponse::new(JsonValue::Array(ret), id).into()
}
// RPCAPI:
// Returns the `chain_id` used for merge mining. A 32-byte hash of the genesis block.
//
// --> {"jsonrpc": "2.0", "method": "merge_mining_get_chain_id", "params": [], "id": 0}
// <-- {"jsonrpc": "2.0", "result": {"chain_id": 02f8...7863"}, "id": 0}
pub async fn merge_mining_get_chain_id(&self, id: u16, _params: JsonValue) -> JsonResult {
let chain_id = match self.validator.blockchain.genesis() {
Ok((_, v)) => v,
Err(e) => {
error!(
target: "darkfid::rpc::merge_mining_get_chain_id",
"[RPC] Error looking up genesis block: {}", e,
);
return JsonError::new(InternalError, None, id).into()
}
};
JsonResponse::new(
JsonValue::Object(HashMap::from([(
"chain_id".to_string(),
chain_id.to_hex().to_string().into(),
)])),
id,
)
.into()
}
}

View File

@@ -1,85 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use tinyjson::JsonValue;
use darkfi::{
rpc::jsonrpc::{ErrorCode, JsonError, JsonResponse, JsonResult},
util::time::Timestamp,
};
use super::Darkfid;
impl Darkfid {
// RPCAPI:
// Returns current system clock as u64 (string) timestamp
//
// --> {"jsonrpc": "2.0", "method": "clock", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "1234"}, "id": 1}
pub async fn misc_clock(&self, id: u16, _params: JsonValue) -> JsonResult {
JsonResponse::new(JsonValue::String(Timestamp::current_time().0.to_string()), id).into()
}
// RPCAPI:
// Activate or deactivate dnet in the sync P2P stack.
// By sending `true`, dnet will be activated, and by sending `false` dnet
// will be deactivated. Returns `true` on success.
//
// --> {"jsonrpc": "2.0", "method": "sync_dnet_switch", "params": [true], "id": 42}
// <-- {"jsonrpc": "2.0", "result": true, "id": 42}
pub async fn misc_sync_dnet_switch(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_bool() {
return JsonError::new(ErrorCode::InvalidParams, None, id).into()
}
let switch = params[0].get::<bool>().unwrap();
if *switch {
self.sync_p2p.as_ref().unwrap().dnet_enable().await;
} else {
self.sync_p2p.as_ref().unwrap().dnet_disable().await;
}
JsonResponse::new(JsonValue::Boolean(true), id).into()
}
// RPCAPI:
// Activate or deactivate dnet in the consensus P2P stack.
// By sending `true`, dnet will be activated, and by sending `false` dnet
// will be deactivated. Returns `true` on success.
//
// --> {"jsonrpc": "2.0", "method": "consensus_dnet_switch", "params": [true], "id": 42}
// <-- {"jsonrpc": "2.0", "result": true, "id": 42}
pub async fn misc_consensus_dnet_switch(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_bool() {
return JsonError::new(ErrorCode::InvalidParams, None, id).into()
}
let switch = params[0].get::<bool>().unwrap();
if *switch {
self.consensus_p2p.as_ref().unwrap().dnet_enable().await;
} else {
self.consensus_p2p.as_ref().unwrap().dnet_disable().await;
}
JsonResponse::new(JsonValue::Boolean(true), id).into()
}
}

View File

@@ -16,12 +16,15 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_serial::deserialize;
use log::{error, warn};
use darkfi_serial::deserialize_async;
use log::error;
use tinyjson::JsonValue;
use darkfi::{
rpc::jsonrpc::{ErrorCode::InvalidParams, JsonError, JsonResponse, JsonResult},
rpc::jsonrpc::{
ErrorCode::{InternalError, InvalidParams},
JsonError, JsonResponse, JsonResult,
},
tx::Transaction,
util::encoding::base64,
};
@@ -35,7 +38,7 @@ impl Darkfid {
// Returns `true` if the transaction is valid, otherwise, a corresponding
// error.
//
// --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base58encodedTX"], "id": 1}
// --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": true, "id": 1}
pub async fn tx_simulate(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
@@ -43,43 +46,38 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
if !(*self.synced.lock().await) {
error!("[RPC] tx.simulate: Blockchain is not synced");
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_simulate", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
let tx_enc = params[0].get::<String>().unwrap();
let tx_bytes = match base64::decode(tx_enc.trim()) {
let tx_enc = params[0].get::<String>().unwrap().trim();
let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
error!("[RPC] tx.simulate: Failed decoding base64 transaction");
error!(target: "darkfid::rpc::tx_simulate", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
let tx: Transaction = match deserialize(&tx_bytes) {
let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
error!("[RPC] tx.simulate: Failed deserializing bytes into Transaction: {}", e);
error!(target: "darkfid::rpc::tx_simulate", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
// Simulate state transition
let lock = self.validator_state.read().await;
let current_slot = lock.consensus.time_keeper.current_slot();
match lock.verify_transactions(&[tx], current_slot, false).await {
Ok(erroneous_txs) => {
if !erroneous_txs.is_empty() {
error!("[RPC] tx.simulate: invalid transaction provided");
return server_error(RpcError::TxSimulationFail, id, None)
}
}
Err(e) => {
error!("[RPC] tx.simulate: Failed to validate state transition: {}", e);
return server_error(RpcError::TxSimulationFail, id, None)
}
let current_slot = self.validator.consensus.time_keeper.current_slot();
let result = self.validator.add_transactions(&[tx], current_slot, false).await;
if result.is_err() {
error!(
target: "darkfid::rpc::tx_simulate", "Failed to validate state transition: {}",
result.err().unwrap()
);
return server_error(RpcError::TxSimulationFail, id, None)
};
JsonResponse::new(JsonValue::Boolean(true), id).into()
@@ -91,7 +89,7 @@ impl Darkfid {
// if the transaction is actually valid, and in turn it will return an
// error if this is the case. Otherwise, a transaction ID will be returned.
//
// --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base58encodedTX"], "id": 1}
// --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "txID...", "id": 1}
pub async fn tx_broadcast(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
@@ -99,66 +97,124 @@ impl Darkfid {
return JsonError::new(InvalidParams, None, id).into()
}
if !(*self.synced.lock().await) {
error!("[RPC] tx.transfer: Blockchain is not synced");
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_broadcast", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
let tx_enc = params[0].get::<String>().unwrap();
let tx_bytes = match base64::decode(tx_enc.trim()) {
let tx_enc = params[0].get::<String>().unwrap().trim();
let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
error!("[RPC] tx.broadcast: Failed decoding base64 transaction");
error!(target: "darkfid::rpc::tx_broadcast", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
let tx: Transaction = match deserialize(&tx_bytes) {
let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
error!("[RPC] tx.broadcast: Failed deserializing bytes into Transaction: {}", e);
error!(target: "darkfid::rpc::tx_broadcast", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
if self.consensus_p2p.is_some() {
// Consider we're participating in consensus here?
// The append_tx function performs a state transition check.
if !self.validator_state.write().await.append_tx(tx.clone()).await {
error!("[RPC] tx.broadcast: Failed to append transaction to mempool");
return server_error(RpcError::TxBroadcastFail, id, None)
// Consensus participants can directly perform
// the state transition check and append to their
// pending transactions store.
if self.validator.append_tx(&tx).await.is_err() {
error!(target: "darkfid::rpc::tx_broadcast", "Failed to append transaction to mempool");
return server_error(RpcError::TxSimulationFail, id, None)
}
} else {
// We'll perform the state transition check here.
let lock = self.validator_state.read().await;
let current_slot = lock.consensus.time_keeper.current_slot();
match lock.verify_transactions(&[tx.clone()], current_slot, false).await {
Ok(erroneous_txs) => {
if !erroneous_txs.is_empty() {
error!("[RPC] tx.broadcast: invalid transaction provided");
return server_error(RpcError::TxSimulationFail, id, None)
}
}
Err(e) => {
error!("[RPC] tx.broadcast: Failed to validate state transition: {}", e);
return server_error(RpcError::TxSimulationFail, id, None)
}
let current_slot = self.validator.consensus.time_keeper.current_slot();
let result = self.validator.add_transactions(&[tx.clone()], current_slot, false).await;
if result.is_err() {
error!(
target: "darkfid::rpc::tx_broadcast", "Failed to validate state transition: {}",
result.err().unwrap()
);
return server_error(RpcError::TxSimulationFail, id, None)
};
}
if let Some(sync_p2p) = &self.sync_p2p {
sync_p2p.broadcast(&tx).await;
if sync_p2p.channels().await.is_empty() {
error!("[RPC] tx.broadcast: Failed broadcasting tx, no connected channels");
return server_error(RpcError::TxBroadcastFail, id, None)
}
} else {
warn!("[RPC] tx.broadcast: No sync P2P network, not broadcasting transaction.");
self.sync_p2p.broadcast(&tx).await;
if self.sync_p2p.channels().await.is_empty() {
error!(target: "darkfid::rpc::tx_broadcast", "Failed broadcasting tx, no connected channels");
return server_error(RpcError::TxBroadcastFail, id, None)
}
let tx_hash = tx.hash().unwrap().to_string();
JsonResponse::new(JsonValue::String(tx_hash), id).into()
}
// RPCAPI:
// Queries the node pending transactions store to retrieve all transactions.
// Returns a vector of hex-encoded transaction hashes.
//
// --> {"jsonrpc": "2.0", "method": "tx.pending", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
pub async fn tx_pending(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_pending", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
let pending_txs = match self.validator.blockchain.get_pending_txs() {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
let pending_txs: Vec<JsonValue> =
pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
JsonResponse::new(JsonValue::Array(pending_txs), id).into()
}
// RPCAPI:
// Queries the node pending transactions store to remove all transactions.
// Returns a vector of hex-encoded transaction hashes.
//
// --> {"jsonrpc": "2.0", "method": "tx.clean_pending", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
pub async fn tx_clean_pending(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_clean_pending", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
let pending_txs = match self.validator.blockchain.get_pending_txs() {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
if let Err(e) = self.validator.blockchain.remove_pending_txs(&pending_txs) {
error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
};
let pending_txs: Vec<JsonValue> =
pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
JsonResponse::new(JsonValue::Array(pending_txs), id).into()
}
}

View File

@@ -1,441 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/*
use log::{debug, error};
use serde_json::{json, Value};
use darkfi::{
rpc::jsonrpc::{
ErrorCode::{InternalError, InvalidParams, ParseError},
JsonError, JsonResponse, JsonResult,
},
wallet::walletdb::QueryType,
};
use super::{error::RpcError, server_error, Darkfid};
*/
use darkfi::rpc::jsonrpc::JsonResult;
use tinyjson::JsonValue;
use super::Darkfid;
impl Darkfid {
// RPCAPI:
// Attempts to query for a single row in a given table.
// The parameters given contain paired metadata so we know how to decode the SQL data.
// An example of `params` is as such:
// ```
// params[0] -> "sql query"
// params[1] -> column_type
// params[2] -> "column_name"
// ...
// params[n-1] -> column_type
// params[n] -> "column_name"
// ```
// This function will fetch the first row it finds, if any. The `column_type` field
// is a type available in the `WalletDb` API as an enum called `QueryType`. If a row
// is not found, the returned result will be a JSON-RPC error.
// NOTE: This is obviously vulnerable to SQL injection. Open to interesting solutions.
//
// --> {"jsonrpc": "2.0", "method": "wallet.query_row_single", "params": [...], "id": 1}
// <-- {"jsonrpc": "2.0", "result": ["va", "lu", "es", ...], "id": 1}
pub async fn wallet_query_row_single(&self, _id: u16, _params: JsonValue) -> JsonResult {
todo!();
/* TODO: This will be abstracted away
// We need at least 3 params for something we want to fetch, and we want them in pairs.
// Also the first param should be a String
if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
// The remaining pairs should be typed properly too
let mut types: Vec<QueryType> = vec![];
let mut names: Vec<&str> = vec![];
for pair in params[1..].chunks(2) {
if !pair[0].is_u64() || !pair[1].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
let typ = pair[0].as_u64().unwrap();
if typ >= QueryType::Last as u64 {
return JsonError::new(InvalidParams, None, id).into()
}
types.push((typ as u8).into());
names.push(pair[1].as_str().unwrap());
}
// Get a wallet connection
let mut conn = match self.wallet.conn.acquire().await {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: Failed to acquire wallet connection: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
// Execute the query and see if we find a row
let row = match sqlx::query(params[0].as_str().unwrap()).fetch_one(&mut conn).await {
Ok(v) => Some(v),
Err(_) => None,
};
// Try to decode the row into what was requested
let mut ret: Vec<Value> = vec![];
for (typ, col) in types.iter().zip(names) {
match typ {
QueryType::Integer => {
let Some(ref row) = row else {
error!("[RPC] wallet.query_row_single: Got None for QueryType::Integer");
return server_error(RpcError::NoRowsFoundInWallet, id, None)
};
let value: i32 = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
ret.push(json!(value));
continue
}
QueryType::Blob => {
let Some(ref row) = row else {
error!("[RPC] wallet.query_row_single: Got None for QueryType::Blob");
return server_error(RpcError::NoRowsFoundInWallet, id, None)
};
let value: Vec<u8> = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
ret.push(json!(value));
continue
}
QueryType::OptionInteger => {
let Some(ref row) = row else {
ret.push(json!(None::<i32>));
continue
};
let value: i32 = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
ret.push(json!(value));
continue
}
QueryType::OptionBlob => {
let Some(ref row) = row else {
ret.push(json!(None::<Vec<u8>>));
continue
};
let value: Vec<u8> = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
ret.push(json!(value));
continue
}
QueryType::Text => {
let Some(ref row) = row else {
error!("[RPC] wallet.query_row_single: Got None for QueryType::Text");
return server_error(RpcError::NoRowsFoundInWallet, id, None)
};
let value: String = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_single: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
ret.push(json!(value));
continue
}
_ => unreachable!(),
}
}
JsonResponse::new(json!(ret), id).into()
*/
}
// RPCAPI:
// Attempts to query for all available rows in a given table.
// The parameters given contain paired metadata so we know how to decode the SQL data.
// They're the same as above in `wallet.query_row_single`.
// If there are any values found, they will be returned in a paired array. If not, an
// empty array will be returned.
//
// --> {"jsonrpc": "2.0", "method": "wallet.query_row_multi", "params": [...], "id": 1}
// <-- {"jsonrpc": "2.0", "result": [["va", "lu"], ["es", "es"], ...], "id": 1}
pub async fn wallet_query_row_multi(&self, _id: u16, _params: JsonValue) -> JsonResult {
todo!();
/* TODO: This will be abstracted away
// We need at least 3 params for something we want to fetch, and we want them in pairs.
// Also the first param (the query) should be a String.
if params.len() < 3 || params[1..].len() % 2 != 0 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
// The remaining pairs should be typed properly too
let mut types: Vec<QueryType> = vec![];
let mut names: Vec<&str> = vec![];
for pair in params[1..].chunks(2) {
if !pair[0].is_u64() || !pair[1].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
let typ = pair[0].as_u64().unwrap();
if typ >= QueryType::Last as u64 {
return JsonError::new(InvalidParams, None, id).into()
}
types.push((typ as u8).into());
names.push(pair[1].as_str().unwrap());
}
// Get a wallet connection
let mut conn = match self.wallet.conn.acquire().await {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_multi: Failed to acquire wallet connection: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
// Execute the query and see if we find any rows
let rows = match sqlx::query(params[0].as_str().unwrap()).fetch_all(&mut conn).await {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_multi: Failed to execute SQL query: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
debug!("[RPC] wallet.query_row_multi: Found {} rows", rows.len());
// Try to decode whatever we've found
let mut ret: Vec<Vec<Value>> = vec![];
for row in rows {
let mut row_ret: Vec<Value> = vec![];
for (typ, col) in types.iter().zip(names.clone()) {
match typ {
QueryType::Integer => {
let value: i32 = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_multi: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
row_ret.push(json!(value));
}
QueryType::Blob => {
let value: Vec<u8> = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_multi: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
row_ret.push(json!(value));
}
QueryType::OptionInteger => {
let value: Option<i32> = match row.try_get(col) {
Ok(v) => Some(v),
Err(_) => None,
};
row_ret.push(json!(value));
}
QueryType::OptionBlob => {
let value: Option<Vec<u8>> = match row.try_get(col) {
Ok(v) => Some(v),
Err(_) => None,
};
row_ret.push(json!(value));
}
QueryType::Text => {
let value: String = match row.try_get(col) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.query_row_multi: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
row_ret.push(json!(value));
}
_ => unreachable!(),
}
}
ret.push(row_ret);
}
JsonResponse::new(json!(ret), id).into()
*/
}
// RPCAPI:
// Executes an arbitrary SQL query on the wallet, and returns `true` on success.
// `params[1..]` can optionally be provided in pairs like in `wallet.query_row_single`.
//
// --> {"jsonrpc": "2.0", "method": "wallet.exec_sql", "params": ["CREATE TABLE ..."], "id": 1}
// <-- {"jsonrpc": "2.0", "result": true, "id": 1}
pub async fn wallet_exec_sql(&self, _id: u16, _params: JsonValue) -> JsonResult {
todo!();
/* TODO: This will be abstracted away
if params.is_empty() || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
if params.len() > 1 && params[1..].len() % 2 != 0 {
return JsonError::new(InvalidParams, None, id).into()
}
let query = params[0].as_str().unwrap();
debug!("Executing SQL query: {}", query);
let mut query = sqlx::query(query);
for pair in params[1..].chunks(2) {
if !pair[0].is_u64() || pair[0].as_u64().unwrap() >= QueryType::Last as u64 {
return JsonError::new(InvalidParams, None, id).into()
}
let typ = (pair[0].as_u64().unwrap() as u8).into();
match typ {
QueryType::Integer => {
let val: i32 = match serde_json::from_value(pair[1].clone()) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.exec_sql: Failed casting value to i32: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
query = query.bind(val);
}
QueryType::Blob => {
let val: Vec<u8> = match serde_json::from_value(pair[1].clone()) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.exec_sql: Failed casting value to Vec<u8>: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
query = query.bind(val);
}
QueryType::OptionInteger => {
let val: Option<i32> = match serde_json::from_value(pair[1].clone()) {
Ok(v) => v,
Err(e) => {
error!(
"[RPC] wallet.exec_sql: Failed casting value to Option<i32>: {}",
e
);
return JsonError::new(ParseError, None, id).into()
}
};
query = query.bind(val);
}
QueryType::OptionBlob => {
let val: Option<Vec<u8>> = match serde_json::from_value(pair[1].clone()) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.exec_sql: Failed casting value to Option<Vec<u8>>: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
query = query.bind(val);
}
QueryType::Text => {
let val: String = match serde_json::from_value(pair[1].clone()) {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.exec_sql: Failed casting value to String: {}", e);
return JsonError::new(ParseError, None, id).into()
}
};
query = query.bind(val);
}
_ => return JsonError::new(InvalidParams, None, id).into(),
}
}
// Get a wallet connection
let mut conn = match self.wallet.conn.acquire().await {
Ok(v) => v,
Err(e) => {
error!("[RPC] wallet.exec_sql: Failed to acquire wallet connection: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
if let Err(e) = query.execute(&mut conn).await {
error!("[RPC] wallet.exec_sql: Failed to execute sql query: {}", e);
return JsonError::new(InternalError, None, id).into()
};
JsonResponse::new(json!(true), id).into()
*/
}
}

View File

@@ -29,7 +29,6 @@ use darkfi::{
zkas::ZkBinary,
Result,
};
use darkfi_consensus_contract::model::SECRET_KEY_PREFIX;
use darkfi_money_contract::{
client::pow_reward_v1::PoWRewardCallBuilder, MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1,
};
@@ -161,7 +160,8 @@ async fn generate_next_block(
// We are deriving the next secret key for optimization.
// Next secret is the poseidon hash of:
// [prefix, current(previous) secret, signing(block) height].
let next_secret = poseidon_hash([SECRET_KEY_PREFIX, secret.inner(), height.into()]);
let prefix = pallas::Base::from_raw([4, 0, 0, 0]);
let next_secret = poseidon_hash([prefix, secret.inner(), height.into()]);
*secret = SecretKey::from(next_secret);
// Generate reward transaction

View File

@@ -36,7 +36,7 @@ use num_bigint::BigUint;
use url::Url;
use crate::{
proto::BlockInfoMessage,
// proto::BlockInfoMessage,
task::sync::sync_task,
utils::{spawn_consensus_p2p, spawn_sync_p2p},
Darkfid,
@@ -65,16 +65,13 @@ impl Harness {
ex: &Arc<smol::Executor<'static>>,
) -> Result<Self> {
// Use test harness to generate genesis transactions
let mut th =
TestHarness::new(&["money".to_string(), "consensus".to_string()], verify_fees).await?;
let (genesis_stake_tx, _) = th.genesis_stake(&Holder::Alice, config.alice_initial)?;
let mut th = TestHarness::new(&["money".to_string()], verify_fees).await?;
let (genesis_mint_tx, _) = th.genesis_mint(&Holder::Bob, config.bob_initial)?;
// Generate default genesis block
let mut genesis_block = BlockInfo::default();
// Append genesis transactions and calculate their total
genesis_block.txs.push(genesis_stake_tx);
genesis_block.txs.push(genesis_mint_tx);
let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
genesis_block.slots[0].total_tokens = genesis_txs_total;
@@ -171,12 +168,12 @@ impl Harness {
pub async fn add_blocks(&self, blocks: &[BlockInfo]) -> Result<()> {
// We simply broadcast the block using Alice's sync P2P
for block in blocks {
self.alice.sync_p2p.broadcast(&BlockInfoMessage::from(block)).await;
for _block in blocks {
//self.alice.sync_p2p.broadcast(&BlockInfoMessage::from(block)).await;
}
// and then add it to her chain
self.alice.validator.add_blocks(blocks).await?;
//self.alice.validator.add_blocks(blocks).await?;
Ok(())
}

View File

@@ -1,44 +0,0 @@
[package]
name = "darkfid2"
version = "0.4.1"
homepage = "https://dark.fi"
description = "DarkFi node daemon"
authors = ["Dyne.org foundation <foundation@dyne.org>"]
repository = "https://github.com/darkrenaissance/darkfi"
license = "AGPL-3.0-only"
edition = "2021"
[dependencies]
# Darkfi
darkfi = {path = "../../", features = ["async-daemonize", "bs58"]}
darkfi_consensus_contract = {path = "../../src/contract/consensus"}
darkfi_money_contract = {path = "../../src/contract/money"}
darkfi-contract-test-harness = {path = "../../src/contract/test-harness"}
darkfi-sdk = {path = "../../src/sdk"}
darkfi-serial = {path = "../../src/serial"}
# Misc
blake3 = "1.5.0"
bs58 = "0.5.0"
log = "0.4.20"
num-bigint = "0.4.4"
rand = "0.8.5"
sled = "0.34.7"
toml = "0.8.8"
# JSON-RPC
async-trait = "0.1.77"
tinyjson = "2.5.1"
url = "2.5.0"
# Daemon
easy-parallel = "3.3.1"
signal-hook-async-std = "0.2.2"
signal-hook = "0.3.17"
simplelog = "0.12.1"
smol = "1.3.0"
# Argument parsing
serde = {version = "1.0.195", features = ["derive"]}
structopt = "0.3.26"
structopt-toml = "0.5.1"

View File

@@ -1,42 +0,0 @@
.POSIX:
# Install prefix
PREFIX = $(HOME)/.cargo
# Cargo binary
CARGO = cargo +nightly
# Compile target
RUST_TARGET = $(shell rustc -Vv | grep '^host: ' | cut -d' ' -f2)
# Uncomment when doing musl static builds
#RUSTFLAGS = -C target-feature=+crt-static -C link-self-contained=yes
SRC = \
Cargo.toml \
../../Cargo.toml \
$(shell find src -type f -name '*.rs') \
$(shell find ../../src -type f -name '*.rs') \
$(shell find ../../src/contract -type f -name '*.wasm')
BIN = $(shell grep '^name = ' Cargo.toml | cut -d' ' -f3 | tr -d '"')
all: $(BIN)
$(BIN): $(SRC)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --target=$(RUST_TARGET) --release --package $@
cp -f ../../target/$(RUST_TARGET)/release/$@ $@
cp -f ../../target/$(RUST_TARGET)/release/$@ ../../$@
clean:
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(RUST_TARGET) --release --package $(BIN)
rm -f $(BIN) ../../$(BIN)
install: all
mkdir -p $(DESTDIR)$(PREFIX)/bin
cp -f $(BIN) $(DESTDIR)$(PREFIX)/bin
chmod 755 $(DESTDIR)$(PREFIX)/bin/$(BIN)
uninstall:
rm -f $(DESTDIR)$(PREFIX)/bin/$(BIN)
.PHONY: all clean install uninstall

View File

@@ -1,475 +0,0 @@
## darkfid configuration file
##
## Please make sure you go through all the settings so you can configure
## your daemon properly.
##
## The default values are left commented. They can be overridden either by
## uncommenting, or by using the command-line.
# JSON-RPC listen URL
rpc_listen = "tcp://127.0.0.1:8340"
# Blockchain network to use
network = "testnet"
# Localnet blockchain network configuration
[network_config."localnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_localnet"
# Finalization threshold, denominated by number of blocks
threshold = 3
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 10
# Optional fixed PoW difficulty, used for testing
pow_fixed_difficulty = 1
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 10
# Whitelisted faucet addresses
faucet_pub = []
# Participate in the consensus protocol
consensus = true
# Wallet address to receive consensus rewards.
# This is a dummy one so the miner can start,
# replace with your own one.
recipient = "5ZHfYpt4mpJcwBNxfEyxLzeFJUEeoePs5NQ5jVEgHrMf"
# Skip syncing process and start node right away
skip_sync = true
# Enable PoS testing mode for local testing
pos_testing_mode = true
## Localnet sync P2P network settings
[network_config."localnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
inbound = ["tcp+tls://0.0.0.0:8242"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
#external_addrs = []
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
#seeds = []
# Whitelisted network transports for outbound connections
#allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = true
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Localnet consensus P2P network settings
[network_config."localnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
#inbound = ["tcp+tls://0.0.0.0:8241"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
#external_addrs = []
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
#seeds = []
# Whitelisted network transports for outbound connections
#allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = true
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
# Testnet blockchain network configuration
[network_config."testnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_testnet"
# Finalization threshold, denominated by number of blocks
threshold = 6
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 90
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 90
# Whitelisted faucet addresses
faucet_pub = ["3ce5xa3PjuQGFtTaF7AvMJp7fGxqeGRJx7zj3LCwNCkP"]
# Participate in the consensus protocol
consensus = false
# Wallet address to receive consensus rewards
#recipient = "YOUR_WALLET_ADDRESS_HERE"
# Skip syncing process and start node right away
skip_sync = false
# Enable PoS testing mode for local testing
pos_testing_mode = false
## Testnet sync P2P network settings
[network_config."testnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8342"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8342"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8342", "tcp+tls://[::]:8342"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8342"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8342", "tcp+tls://[ipv6 address here]:8342"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8342", "tcp+tls://lilith1.dark.fi:8342"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
outbound_connections = 8
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Testnet consensus P2P network settings
[network_config."testnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8341"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8341"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8341", "tcp+tls://[::]:8341"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8341"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8341", "tcp+tls://[ipv6 address here]:8341"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8341", "tcp+tls://lilith1.dark.fi:8341"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
# Mainnet blockchain network configuration
[network_config."mainnet"]
# Path to the blockchain database directory
database = "~/.local/darkfi/darkfid_blockchain_mainnet"
# Finalization threshold, denominated by number of blocks
threshold = 11
# minerd JSON-RPC endpoint
minerd_endpoint = "tcp://127.0.0.1:28467"
# PoW block production target, in seconds
pow_target = 90
# Epoch duration, denominated by number of blocks/slots
epoch_length = 10
# PoS slot duration, in seconds
slot_time = 90
# Whitelisted faucet addresses
faucet_pub = []
# Participate in the consensus protocol
consensus = false
# Wallet address to receive consensus rewards
#recipient = "YOUR_WALLET_ADDRESS_HERE"
# Skip syncing process and start node right away
skip_sync = false
# Enable PoS testing mode for local testing
pos_testing_mode = false
## Mainnet sync P2P network settings
[network_config."mainnet".sync_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8442"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8442"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8442", "tcp+tls://[::]:8442"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8442"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8442", "tcp+tls://[ipv6 address here]:8442"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8442", "tcp+tls://lilith1.dark.fi:8442"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
outbound_connections = 8
# Inbound connections slots number, this many active inbound connections
# will be allowed. (This does not include manual or outbound connections)
#inbound_connections = 0
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5
## Mainnet consensus P2P network settings
[network_config."mainnet".consensus_net]
# P2P accept addresses the instance listens on for inbound connections
# You can also use an IPv6 address
inbound = ["tcp+tls://0.0.0.0:8441"]
# IPv6 version:
#inbound = ["tcp+tls://[::]:8441"]
# Combined:
#inbound = ["tcp+tls://0.0.0.0:8441", "tcp+tls://[::]:8441"]
# P2P external addresses the instance advertises so other peers can
# reach us and connect to us, as long as inbound addrs are configured.
# You can also use an IPv6 address
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441"]
# IPv6 version:
#external_addrs = ["tcp+tls://[ipv6 address here]:8441"]
# Combined:
#external_addrs = ["tcp+tls://XXX.XXX.XXX.XXX:8441", "tcp+tls://[ipv6 address here]:8441"]
# Peer nodes to manually connect to
#peers = []
# Seed nodes to connect to for peer discovery and/or adversising our
# own external addresses
seeds = ["tcp+tls://lilith0.dark.fi:8441", "tcp+tls://lilith1.dark.fi:8441"]
# Whitelisted network transports for outbound connections
allowed_transports = ["tcp+tls"]
# Allow transport mixing (e.g. Tor would be allowed to connect to `tcp://`)
#transport_mixing = true
# Outbound connection slots number, this many connections will be
# attempted. (This does not include manual connections)
#outbound_connections = 8
# Manual connections retry limit, 0 for forever looping
#manual_attempt_limit = 0
# Outbound connection timeout (in seconds)
#outbound_connect_timeout = 10
# Exchange versions (handshake) timeout (in seconds)
#channel_handshake_timeout = 4
# Ping-pong exchange execution interval (in seconds)
#channel_heartbeat_interval = 10
# Allow localnet hosts
localnet = false
# Delete a peer from hosts if they've been quarantined N times
#hosts_quarantine_limit = 50
# Cooling off time for peer discovery when unsuccessful
#outbound_peer_discovery_cooloff_time = 30
# Time between peer discovery attempts
#outbound_peer_discovery_attempt_time = 5

View File

@@ -1,69 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi::rpc::jsonrpc::{ErrorCode::ServerError, JsonError, JsonResult};
/// Custom RPC errors available for darkfid.
/// Please sort them sensefully.
pub enum RpcError {
// Transaction-related errors
TxSimulationFail = -32110,
TxBroadcastFail = -32111,
// State-related errors,
NotSynced = -32120,
UnknownSlot = -32121,
// Parsing errors
ParseError = -32190,
// Contract-related errors
ContractZkasDbNotFound = -32200,
// Misc errors
PingFailed = -32300,
}
fn to_tuple(e: RpcError) -> (i32, String) {
let msg = match e {
// Transaction-related errors
RpcError::TxSimulationFail => "Failed simulating transaction state change",
RpcError::TxBroadcastFail => "Failed broadcasting transaction",
// State-related errors
RpcError::NotSynced => "Blockchain is not synced",
RpcError::UnknownSlot => "Did not find slot",
// Parsing errors
RpcError::ParseError => "Parse error",
// Contract-related errors
RpcError::ContractZkasDbNotFound => "zkas database not found for given contract",
// Misc errors
RpcError::PingFailed => "Miner daemon ping error",
};
(e as i32, msg.to_string())
}
pub fn server_error(e: RpcError, id: u16, msg: Option<&str>) -> JsonResult {
let (code, default_msg) = to_tuple(e);
if let Some(message) = msg {
return JsonError::new(ServerError(code), Some(message.to_string()), id).into()
}
JsonError::new(ServerError(code), Some(default_msg), id).into()
}

View File

@@ -1,419 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::{
collections::{HashMap, HashSet},
str::FromStr,
sync::Arc,
};
use log::{error, info};
use smol::{lock::Mutex, stream::StreamExt};
use structopt_toml::{serde::Deserialize, structopt::StructOpt, StructOptToml};
use url::Url;
use darkfi::{
async_daemonize,
blockchain::BlockInfo,
cli_desc,
net::{settings::SettingsOpt, P2pPtr},
rpc::{
client::RpcClient,
jsonrpc::JsonSubscriber,
server::{listen_and_serve, RequestHandler},
},
system::{StoppableTask, StoppableTaskPtr},
util::{path::expand_path, time::TimeKeeper},
validator::{utils::genesis_txs_total, Validator, ValidatorConfig, ValidatorPtr},
Error, Result,
};
use darkfi_sdk::crypto::PublicKey;
use darkfi_serial::deserialize_async;
#[cfg(test)]
mod tests;
mod error;
use error::{server_error, RpcError};
/// JSON-RPC requests handler and methods
mod rpc;
mod rpc_blockchain;
mod rpc_tx;
/// Validator async tasks
mod task;
use task::{miner_task, sync_task};
/// P2P net protocols
mod proto;
/// Utility functions
mod utils;
use utils::{parse_blockchain_config, spawn_consensus_p2p, spawn_sync_p2p};
const CONFIG_FILE: &str = "darkfid_config.toml";
const CONFIG_FILE_CONTENTS: &str = include_str!("../darkfid_config.toml");
/// Note:
/// If you change these don't forget to remove their corresponding database folder,
/// since if it already has a genesis block, provided one is ignored.
const GENESIS_BLOCK_LOCALNET: &str = include_str!("../genesis_block_localnet");
const GENESIS_BLOCK_TESTNET: &str = include_str!("../genesis_block_testnet");
const GENESIS_BLOCK_MAINNET: &str = include_str!("../genesis_block_mainnet");
#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)]
#[serde(default)]
#[structopt(name = "darkfid", about = cli_desc!())]
struct Args {
#[structopt(short, long)]
/// Configuration file to use
config: Option<String>,
#[structopt(short, long, default_value = "tcp://127.0.0.1:8340")]
/// JSON-RPC listen URL
rpc_listen: Url,
#[structopt(short, long, default_value = "testnet")]
/// Blockchain network to use
network: String,
#[structopt(short, long)]
/// Set log file to ouput into
log: Option<String>,
#[structopt(short, parse(from_occurrences))]
/// Increase verbosity (-vvv supported)
verbose: u8,
}
/// Defines a blockchain network configuration.
/// Default values correspond to a local network.
#[derive(Clone, Debug, serde::Deserialize, structopt::StructOpt, structopt_toml::StructOptToml)]
#[structopt()]
pub struct BlockchainNetwork {
#[structopt(long, default_value = "~/.local/darkfi/darkfid_blockchain_localnet")]
/// Path to blockchain database
pub database: String,
#[structopt(long, default_value = "3")]
/// Finalization threshold, denominated by number of blocks
pub threshold: usize,
#[structopt(long, default_value = "tcp://127.0.0.1:28467")]
/// minerd JSON-RPC endpoint
pub minerd_endpoint: Url,
#[structopt(long, default_value = "10")]
/// PoW block production target, in seconds
pub pow_target: usize,
#[structopt(long)]
/// Optional fixed PoW difficulty, used for testing
pub pow_fixed_difficulty: Option<usize>,
#[structopt(long, default_value = "10")]
/// Epoch duration, denominated by number of blocks/slots
pub epoch_length: u64,
#[structopt(long, default_value = "10")]
/// PoS slot duration, in seconds
pub slot_time: u64,
#[structopt(long)]
/// Whitelisted faucet public key (repeatable flag)
pub faucet_pub: Vec<String>,
#[structopt(long)]
/// Participate in the consensus protocol
pub consensus: bool,
#[structopt(long)]
/// Wallet address to receive consensus rewards
pub recipient: Option<String>,
#[structopt(long)]
/// Skip syncing process and start node right away
pub skip_sync: bool,
#[structopt(long)]
/// Enable PoS testing mode for local testing
pub pos_testing_mode: bool,
/// Syncing network settings
#[structopt(flatten)]
pub sync_net: SettingsOpt,
/// Consensus network settings
#[structopt(flatten)]
pub consensus_net: SettingsOpt,
}
/// Daemon structure
pub struct Darkfid {
/// Syncing P2P network pointer
sync_p2p: P2pPtr,
/// Optional consensus P2P network pointer
consensus_p2p: Option<P2pPtr>,
/// Validator(node) pointer
validator: ValidatorPtr,
/// A map of various subscribers exporting live info from the blockchain
subscribers: HashMap<&'static str, JsonSubscriber>,
/// JSON-RPC connection tracker
rpc_connections: Mutex<HashSet<StoppableTaskPtr>>,
/// JSON-RPC client to execute requests to the miner daemon
rpc_client: Option<RpcClient>,
}
impl Darkfid {
pub async fn new(
sync_p2p: P2pPtr,
consensus_p2p: Option<P2pPtr>,
validator: ValidatorPtr,
subscribers: HashMap<&'static str, JsonSubscriber>,
rpc_client: Option<RpcClient>,
) -> Self {
Self {
sync_p2p,
consensus_p2p,
validator,
subscribers,
rpc_connections: Mutex::new(HashSet::new()),
rpc_client,
}
}
}
async_daemonize!(realmain);
async fn realmain(args: Args, ex: Arc<smol::Executor<'static>>) -> Result<()> {
info!(target: "darkfid", "Initializing DarkFi node...");
// Grab blockchain network configuration
let (blockchain_config, genesis_block) = match args.network.as_str() {
"localnet" => {
(parse_blockchain_config(args.config, "localnet").await?, GENESIS_BLOCK_LOCALNET)
}
"testnet" => {
(parse_blockchain_config(args.config, "testnet").await?, GENESIS_BLOCK_TESTNET)
}
"mainnet" => {
(parse_blockchain_config(args.config, "mainnet").await?, GENESIS_BLOCK_MAINNET)
}
_ => {
error!("Unsupported chain `{}`", args.network);
return Err(Error::UnsupportedChain)
}
};
if blockchain_config.pos_testing_mode {
info!(target: "darkfid", "Node is configured to run in PoS testing mode!");
}
// Parse the genesis block
let bytes = bs58::decode(&genesis_block.trim()).into_vec()?;
let genesis_block: BlockInfo = deserialize_async(&bytes).await?;
// Initialize or open sled database
let db_path = expand_path(&blockchain_config.database)?;
let sled_db = sled::open(&db_path)?;
// Initialize validator configuration
let genesis_txs_total = genesis_txs_total(&genesis_block.txs).await?;
let time_keeper = TimeKeeper::new(
genesis_block.header.timestamp,
blockchain_config.epoch_length,
blockchain_config.slot_time,
0,
);
let pow_fixed_difficulty = if let Some(diff) = blockchain_config.pow_fixed_difficulty {
info!(target: "darkfid", "Node is configured to run with fixed PoW difficulty: {}", diff);
Some(diff.into())
} else {
None
};
let config = ValidatorConfig::new(
time_keeper,
blockchain_config.threshold,
blockchain_config.pow_target,
pow_fixed_difficulty,
genesis_block,
genesis_txs_total,
vec![],
blockchain_config.pos_testing_mode,
false, // TODO: Make configurable
);
// Initialize validator
let validator = Validator::new(&sled_db, config).await?;
// Here we initialize various subscribers that can export live blockchain/consensus data.
let mut subscribers = HashMap::new();
subscribers.insert("blocks", JsonSubscriber::new("blockchain.subscribe_blocks"));
subscribers.insert("txs", JsonSubscriber::new("blockchain.subscribe_txs"));
if blockchain_config.consensus {
subscribers.insert("proposals", JsonSubscriber::new("blockchain.subscribe_proposals"));
}
// Initialize syncing P2P network
let sync_p2p =
spawn_sync_p2p(&blockchain_config.sync_net.into(), &validator, &subscribers, ex.clone())
.await;
// Initialize consensus P2P network
let (consensus_p2p, rpc_client) = if blockchain_config.consensus {
let Ok(rpc_client) = RpcClient::new(blockchain_config.minerd_endpoint, ex.clone()).await
else {
error!(target: "darkfid", "Failed to initialize miner daemon rpc client, check if minerd is running");
return Err(Error::RpcClientStopped)
};
(
Some(
spawn_consensus_p2p(
&blockchain_config.consensus_net.into(),
&validator,
&subscribers,
ex.clone(),
)
.await,
),
Some(rpc_client),
)
} else {
(None, None)
};
// Initialize node
let darkfid = Darkfid::new(
sync_p2p.clone(),
consensus_p2p.clone(),
validator.clone(),
subscribers,
rpc_client,
)
.await;
let darkfid = Arc::new(darkfid);
info!(target: "darkfid", "Node initialized successfully!");
// Pinging minerd daemon to verify it listens
if blockchain_config.consensus {
if let Err(e) = darkfid.ping_miner_daemon().await {
error!(target: "darkfid", "Failed to ping miner daemon: {}", e);
return Err(Error::RpcClientStopped)
}
}
// JSON-RPC server
info!(target: "darkfid", "Starting JSON-RPC server");
// Here we create a task variable so we can manually close the
// task later. P2P tasks don't need this since it has its own
// stop() function to shut down, also terminating the task we
// created for it.
let rpc_task = StoppableTask::new();
let darkfid_ = darkfid.clone();
rpc_task.clone().start(
listen_and_serve(args.rpc_listen, darkfid.clone(), None, ex.clone()),
|res| async move {
match res {
Ok(()) | Err(Error::RpcServerStopped) => darkfid_.stop_connections().await,
Err(e) => error!(target: "darkfid", "Failed starting sync JSON-RPC server: {}", e),
}
},
Error::RpcServerStopped,
ex.clone(),
);
info!(target: "darkfid", "Starting sync P2P network");
sync_p2p.clone().start().await?;
// Consensus protocol
if blockchain_config.consensus {
info!(target: "darkfid", "Starting consensus P2P network");
let consensus_p2p = consensus_p2p.clone().unwrap();
consensus_p2p.clone().start().await?;
} else {
info!(target: "darkfid", "Not starting consensus P2P network");
}
// Sync blockchain
if !blockchain_config.skip_sync {
sync_task(&darkfid).await?;
} else {
*darkfid.validator.synced.write().await = true;
}
// Clean node pending transactions
darkfid.validator.purge_pending_txs().await?;
// Consensus protocol
let consensus_task = if blockchain_config.consensus {
info!(target: "darkfid", "Starting consensus protocol task");
// Grab rewards recipient public key(address)
if blockchain_config.recipient.is_none() {
return Err(Error::ParseFailed("Recipient address missing"))
}
let recipient = match PublicKey::from_str(&blockchain_config.recipient.unwrap()) {
Ok(address) => address,
Err(_) => return Err(Error::InvalidAddress),
};
let task = StoppableTask::new();
task.clone().start(
// Weird hack to prevent lifetimes hell
async move { miner_task(&darkfid, &recipient).await },
|res| async {
match res {
Ok(()) | Err(Error::MinerTaskStopped) => { /* Do nothing */ }
Err(e) => error!(target: "darkfid", "Failed starting miner task: {}", e),
}
},
Error::MinerTaskStopped,
ex.clone(),
);
Some(task)
} else {
info!(target: "darkfid", "Not participating in consensus");
None
};
// Signal handling for graceful termination.
let (signals_handler, signals_task) = SignalHandler::new(ex)?;
signals_handler.wait_termination(signals_task).await?;
info!(target: "darkfid", "Caught termination signal, cleaning up and exiting...");
info!(target: "darkfid", "Stopping JSON-RPC server...");
rpc_task.stop().await;
info!(target: "darkfid", "Stopping syncing P2P network...");
sync_p2p.stop().await;
if blockchain_config.consensus {
info!(target: "darkfid", "Stopping consensus P2P network...");
consensus_p2p.unwrap().stop().await;
info!(target: "darkfid", "Stopping consensus task...");
consensus_task.unwrap().stop().await;
}
info!(target: "darkfid", "Flushing sled database...");
let flushed_bytes = sled_db.flush_async().await?;
info!(target: "darkfid", "Flushed {} bytes", flushed_bytes);
Ok(())
}

View File

@@ -1,292 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::{collections::HashMap, str::FromStr};
use darkfi_sdk::crypto::ContractId;
use darkfi_serial::{deserialize_async, serialize_async};
use log::{debug, error};
use tinyjson::JsonValue;
use darkfi::{
blockchain::contract_store::SMART_CONTRACT_ZKAS_DB_NAME,
rpc::jsonrpc::{
ErrorCode::{InternalError, InvalidParams, ParseError},
JsonError, JsonResponse, JsonResult,
},
util::encoding::base64,
};
use crate::{server_error, Darkfid, RpcError};
impl Darkfid {
// RPCAPI:
// Queries the blockchain database for a block in the given slot.
// Returns a readable block upon success.
//
// **Params:**
// * `array[0]`: `u64` slot ID (as string)
//
// **Returns:**
// * [`BlockInfo`](https://darkrenaissance.github.io/darkfi/development/darkfi/consensus/block/struct.BlockInfo.html)
// struct serialized into base64.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.get_slot", "params": ["0"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": {...}, "id": 1}
pub async fn blockchain_get_slot(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
let slot = match params[0].get::<String>().unwrap().parse::<u64>() {
Ok(v) => v,
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
let blocks = match self.validator.blockchain.get_blocks_by_slot(&[slot]) {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::blockchain_get_slot", "Failed fetching block by slot: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
if blocks.is_empty() {
return server_error(RpcError::UnknownSlot, id, None)
}
let block = base64::encode(&serialize_async(&blocks[0]).await);
JsonResponse::new(JsonValue::String(block), id).into()
}
// RPCAPI:
// Queries the blockchain database for a given transaction.
// Returns a serialized `Transaction` object.
//
// **Params:**
// * `array[0]`: Hex-encoded transaction hash string
//
// **Returns:**
// * Serialized [`Transaction`](https://darkrenaissance.github.io/darkfi/development/darkfi/tx/struct.Transaction.html)
// object encoded with base64
//
// --> {"jsonrpc": "2.0", "method": "blockchain.get_tx", "params": ["TxHash"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "ABCD...", "id": 1}
pub async fn blockchain_get_tx(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 {
return JsonError::new(InvalidParams, None, id).into()
}
let tx_hash = params[0].get::<String>().unwrap();
let tx_hash = match blake3::Hash::from_hex(tx_hash) {
Ok(v) => v,
Err(_) => return JsonError::new(ParseError, None, id).into(),
};
let txs = match self.validator.blockchain.transactions.get(&[tx_hash], true) {
Ok(txs) => txs,
Err(e) => {
error!(target: "darkfid::rpc::blockchain_get_tx", "Failed fetching tx by hash: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
// This would be an logic error somewhere
assert_eq!(txs.len(), 1);
// and strict was used during .get()
let tx = txs[0].as_ref().unwrap();
let tx_enc = base64::encode(&serialize_async(tx).await);
JsonResponse::new(JsonValue::String(tx_enc), id).into()
}
// RPCAPI:
// Queries the blockchain database to find the last known slot
//
// **Params:**
// * `None`
//
// **Returns:**
// * `u64` ID of the last known slot, as string
//
// --> {"jsonrpc": "2.0", "method": "blockchain.last_known_slot", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "1234", "id": 1}
pub async fn blockchain_last_known_slot(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
let blockchain = self.validator.blockchain.clone();
let Ok(last_slot) = blockchain.last() else {
return JsonError::new(InternalError, None, id).into()
};
JsonResponse::new(JsonValue::Number(last_slot.0 as f64), id).into()
}
// RPCAPI:
// Initializes a subscription to new incoming blocks.
// Once a subscription is established, `darkfid` will send JSON-RPC notifications of
// new incoming blocks to the subscriber.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_blocks", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_blocks", "params": [`blockinfo`]}
pub async fn blockchain_subscribe_blocks(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
self.subscribers.get("blocks").unwrap().clone().into()
}
// RPCAPI:
// Initializes a subscription to new incoming transactions.
// Once a subscription is established, `darkfid` will send JSON-RPC notifications of
// new incoming transactions to the subscriber.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_txs", "params": [`tx_hash`]}
pub async fn blockchain_subscribe_txs(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
self.subscribers.get("txs").unwrap().clone().into()
}
// RPCAPI:
// Initializes a subscription to new incoming proposals, asuming node participates
// in consensus. Once a subscription is established, `darkfid` will send JSON-RPC
// notifications of new incoming proposals to the subscriber.
//
// --> {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "method": "blockchain.subscribe_proposals", "params": [`blockinfo`]}
pub async fn blockchain_subscribe_proposals(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
// Since proposals subscriber is only active if we participate to consensus,
// we have to check if it actually exists in the subscribers map.
let proposals_subscriber = self.subscribers.get("proposals");
if proposals_subscriber.is_none() {
error!(target: "darkfid::rpc::blockchain_subscribe_proposals", "Proposals subscriber not found");
return JsonError::new(InternalError, None, id).into()
}
proposals_subscriber.unwrap().clone().into()
}
// RPCAPI:
// Performs a lookup of zkas bincodes for a given contract ID and returns all of
// them, including their namespace.
//
// **Params:**
// * `array[0]`: base58-encoded contract ID string
//
// **Returns:**
// * `array[n]`: Pairs of: `zkas_namespace` string, serialized
// [`ZkBinary`](https://darkrenaissance.github.io/darkfi/development/darkfi/zkas/decoder/struct.ZkBinary.html)
// object
//
// --> {"jsonrpc": "2.0", "method": "blockchain.lookup_zkas", "params": ["6Ef42L1KLZXBoxBuCDto7coi9DA2D2SRtegNqNU4sd74"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": [["Foo", "ABCD..."], ["Bar", "EFGH..."]], "id": 1}
pub async fn blockchain_lookup_zkas(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
let contract_id = params[0].get::<String>().unwrap();
let contract_id = match ContractId::from_str(contract_id) {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Error decoding string to ContractId: {}", e);
return JsonError::new(InvalidParams, None, id).into()
}
};
let blockchain = self.validator.blockchain.clone();
let Ok(zkas_db) = blockchain.contracts.lookup(
&blockchain.sled_db,
&contract_id,
SMART_CONTRACT_ZKAS_DB_NAME,
) else {
error!(
target: "darkfid::rpc::blockchain_lookup_zkas", "Did not find zkas db for ContractId: {}",
contract_id
);
return server_error(RpcError::ContractZkasDbNotFound, id, None)
};
let mut ret = vec![];
for i in zkas_db.iter() {
debug!(target: "darkfid::rpc::blockchain_lookup_zkas", "Iterating over zkas db");
let Ok((zkas_ns, zkas_bytes)) = i else {
error!(target: "darkfid::rpc::blockchain_lookup_zkas", "Internal sled error iterating db");
return JsonError::new(InternalError, None, id).into()
};
let Ok(zkas_ns) = deserialize_async(&zkas_ns).await else {
return JsonError::new(InternalError, None, id).into()
};
let zkas_bincode = base64::encode(&zkas_bytes);
ret.push(JsonValue::Array(vec![
JsonValue::String(zkas_ns),
JsonValue::String(zkas_bincode),
]));
}
JsonResponse::new(JsonValue::Array(ret), id).into()
}
// RPCAPI:
// Returns the `chain_id` used for merge mining. A 32-byte hash of the genesis block.
//
// --> {"jsonrpc": "2.0", "method": "merge_mining_get_chain_id", "params": [], "id": 0}
// <-- {"jsonrpc": "2.0", "result": {"chain_id": 02f8...7863"}, "id": 0}
pub async fn merge_mining_get_chain_id(&self, id: u16, _params: JsonValue) -> JsonResult {
let chain_id = match self.validator.blockchain.genesis() {
Ok((_, v)) => v,
Err(e) => {
error!(
target: "darkfid::rpc::merge_mining_get_chain_id",
"[RPC] Error looking up genesis block: {}", e,
);
return JsonError::new(InternalError, None, id).into()
}
};
JsonResponse::new(
JsonValue::Object(HashMap::from([(
"chain_id".to_string(),
chain_id.to_hex().to_string().into(),
)])),
id,
)
.into()
}
}

View File

@@ -1,220 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_serial::deserialize_async;
use log::error;
use tinyjson::JsonValue;
use darkfi::{
rpc::jsonrpc::{
ErrorCode::{InternalError, InvalidParams},
JsonError, JsonResponse, JsonResult,
},
tx::Transaction,
util::encoding::base64,
};
use super::Darkfid;
use crate::{server_error, RpcError};
impl Darkfid {
// RPCAPI:
// Simulate a network state transition with the given transaction.
// Returns `true` if the transaction is valid, otherwise, a corresponding
// error.
//
// --> {"jsonrpc": "2.0", "method": "tx.simulate", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": true, "id": 1}
pub async fn tx_simulate(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_simulate", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
let tx_enc = params[0].get::<String>().unwrap().trim();
let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
error!(target: "darkfid::rpc::tx_simulate", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_simulate", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
// Simulate state transition
let current_slot = self.validator.consensus.time_keeper.current_slot();
let result = self.validator.add_transactions(&[tx], current_slot, false).await;
if result.is_err() {
error!(
target: "darkfid::rpc::tx_simulate", "Failed to validate state transition: {}",
result.err().unwrap()
);
return server_error(RpcError::TxSimulationFail, id, None)
};
JsonResponse::new(JsonValue::Boolean(true), id).into()
}
// RPCAPI:
// Broadcast a given transaction to the P2P network.
// The function will first simulate the state transition in order to see
// if the transaction is actually valid, and in turn it will return an
// error if this is the case. Otherwise, a transaction ID will be returned.
//
// --> {"jsonrpc": "2.0", "method": "tx.broadcast", "params": ["base64encodedTX"], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "txID...", "id": 1}
pub async fn tx_broadcast(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if params.len() != 1 || !params[0].is_string() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_broadcast", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
// Try to deserialize the transaction
let tx_enc = params[0].get::<String>().unwrap().trim();
let tx_bytes = match base64::decode(tx_enc) {
Some(v) => v,
None => {
error!(target: "darkfid::rpc::tx_broadcast", "Failed decoding base64 transaction");
return server_error(RpcError::ParseError, id, None)
}
};
let tx: Transaction = match deserialize_async(&tx_bytes).await {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_broadcast", "Failed deserializing bytes into Transaction: {}", e);
return server_error(RpcError::ParseError, id, None)
}
};
if self.consensus_p2p.is_some() {
// Consensus participants can directly perform
// the state transition check and append to their
// pending transactions store.
if self.validator.append_tx(&tx).await.is_err() {
error!(target: "darkfid::rpc::tx_broadcast", "Failed to append transaction to mempool");
return server_error(RpcError::TxSimulationFail, id, None)
}
} else {
// We'll perform the state transition check here.
let current_slot = self.validator.consensus.time_keeper.current_slot();
let result = self.validator.add_transactions(&[tx.clone()], current_slot, false).await;
if result.is_err() {
error!(
target: "darkfid::rpc::tx_broadcast", "Failed to validate state transition: {}",
result.err().unwrap()
);
return server_error(RpcError::TxSimulationFail, id, None)
};
}
self.sync_p2p.broadcast(&tx).await;
if self.sync_p2p.channels().await.is_empty() {
error!(target: "darkfid::rpc::tx_broadcast", "Failed broadcasting tx, no connected channels");
return server_error(RpcError::TxBroadcastFail, id, None)
}
let tx_hash = tx.hash().unwrap().to_string();
JsonResponse::new(JsonValue::String(tx_hash), id).into()
}
// RPCAPI:
// Queries the node pending transactions store to retrieve all transactions.
// Returns a vector of hex-encoded transaction hashes.
//
// --> {"jsonrpc": "2.0", "method": "tx.pending", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
pub async fn tx_pending(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_pending", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
let pending_txs = match self.validator.blockchain.get_pending_txs() {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
let pending_txs: Vec<JsonValue> =
pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
JsonResponse::new(JsonValue::Array(pending_txs), id).into()
}
// RPCAPI:
// Queries the node pending transactions store to remove all transactions.
// Returns a vector of hex-encoded transaction hashes.
//
// --> {"jsonrpc": "2.0", "method": "tx.clean_pending", "params": [], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "[TxHash,...]", "id": 1}
pub async fn tx_clean_pending(&self, id: u16, params: JsonValue) -> JsonResult {
let params = params.get::<Vec<JsonValue>>().unwrap();
if !params.is_empty() {
return JsonError::new(InvalidParams, None, id).into()
}
if !*self.validator.synced.read().await {
error!(target: "darkfid::rpc::tx_clean_pending", "Blockchain is not synced");
return server_error(RpcError::NotSynced, id, None)
}
let pending_txs = match self.validator.blockchain.get_pending_txs() {
Ok(v) => v,
Err(e) => {
error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
}
};
if let Err(e) = self.validator.blockchain.remove_pending_txs(&pending_txs) {
error!(target: "darkfid::rpc::tx_clean_pending", "Failed fetching pending txs: {}", e);
return JsonError::new(InternalError, None, id).into()
};
let pending_txs: Vec<JsonValue> =
pending_txs.iter().map(|x| JsonValue::String(x.hash().unwrap().to_string())).collect();
JsonResponse::new(JsonValue::Array(pending_txs), id).into()
}
}

View File

@@ -67,7 +67,6 @@ ENV TARGET_PRFX="--target=" RUST_TARGET="${RISCV_TARGET}"
RUN make ${BINS} && mkdir compiled-bins && \
(if [ -e zkas ]; then cp -a zkas compiled-bins/; fi;) && \
(if [ -e darkfid ]; then cp -a darkfid compiled-bins/; fi;) && \
(if [ -e darkfid2 ]; then cp -a darkfid2 compiled-bins/; fi;) && \
(if [ -e faucetd ]; then cp -a faucetd compiled-bins/; fi;) && \
(if [ -e darkirc ]; then cp -a darkirc compiled-bins/; fi;) && \
(if [ -e "genev-cli" ]; then cp -a genev-cli compiled-bins/; fi;) && \

View File

@@ -48,7 +48,6 @@ RUN sed -e 's,^#RUSTFLAGS ,RUSTFLAGS ,' -i Makefile
RUN make clean && make ${BINS} && mkdir compiled-bins && \
(if [ -e zkas ]; then cp -a zkas compiled-bins/; fi;) && \
(if [ -e darkfid ]; then cp -a darkfid compiled-bins/; fi;) && \
(if [ -e darkfid2 ]; then cp -a darkfid2 compiled-bins/; fi;) && \
(if [ -e faucetd ]; then cp -a faucetd compiled-bins/; fi;) && \
(if [ -e darkirc ]; then cp -a darkirc compiled-bins/; fi;) && \
(if [ -e "genev-cli" ]; then cp -a genev-cli compiled-bins/; fi;) && \

View File

@@ -1,253 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::fmt;
use darkfi_sdk::{
crypto::{MerkleNode, MerkleTree},
pasta::pallas,
};
use darkfi_serial::{async_trait, serialize, SerialDecodable, SerialEncodable};
use super::{
constants::{BLOCK_MAGIC_BYTES, BLOCK_VERSION},
LeadInfo,
};
use crate::{impl_p2p_message, net::Message, tx::Transaction, util::time::Timestamp};
/// This struct represents a tuple of the form (version, previous, epoch, slot, timestamp, merkle_root).
#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
pub struct Header {
/// Block version
pub version: u8,
/// Previous block hash
pub previous: blake3::Hash,
/// Epoch
pub epoch: u64,
/// Slot UID
pub slot: u64,
/// Block creation timestamp
pub timestamp: Timestamp,
/// Root of the transaction hashes merkle tree
pub root: MerkleNode,
}
impl Header {
pub fn new(
previous: blake3::Hash,
epoch: u64,
slot: u64,
timestamp: Timestamp,
root: MerkleNode,
) -> Self {
let version = BLOCK_VERSION;
Self { version, previous, epoch, slot, timestamp, root }
}
/// Generate the genesis block.
pub fn genesis_header(genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Self {
let tree = MerkleTree::new(100);
let root = tree.root(0).unwrap();
Self::new(genesis_data, 0, 0, genesis_ts, root)
}
/// Calculate the header hash
pub fn headerhash(&self) -> blake3::Hash {
blake3::hash(&serialize(self))
}
}
impl Default for Header {
fn default() -> Self {
Header::new(
blake3::hash(b""),
0,
0,
Timestamp::current_time(),
MerkleNode::from(pallas::Base::zero()),
)
}
}
/// This struct represents a tuple of the form (`magic`, `header`, `counter`, `txs`, `lead_info`).
/// The header and transactions are stored as hashes, serving as pointers to
/// the actual data in the sled database.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct Block {
/// Block magic bytes
pub magic: [u8; 4],
/// Block header
pub header: blake3::Hash,
/// Trasaction hashes
pub txs: Vec<blake3::Hash>,
/// Lead Info
pub lead_info: LeadInfo,
}
impl_p2p_message!(Block, "block");
impl Block {
pub fn new(
previous: blake3::Hash,
epoch: u64,
slot: u64,
txs: Vec<blake3::Hash>,
root: MerkleNode,
lead_info: LeadInfo,
) -> Self {
let magic = BLOCK_MAGIC_BYTES;
let timestamp = Timestamp::current_time();
let header = Header::new(previous, epoch, slot, timestamp, root);
let header = header.headerhash();
Self { magic, header, txs, lead_info }
}
/// Generate the genesis block.
pub fn genesis_block(genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Self {
let magic = BLOCK_MAGIC_BYTES;
let header = Header::genesis_header(genesis_ts, genesis_data);
let header = header.headerhash();
let lead_info = LeadInfo::default();
Self { magic, header, txs: vec![], lead_info }
}
/// Calculate the block hash
pub fn blockhash(&self) -> blake3::Hash {
blake3::hash(&serialize(self))
}
}
/// Auxiliary structure used for blockchain syncing.
#[derive(Debug, SerialEncodable, SerialDecodable)]
pub struct BlockOrder {
/// Slot UID
pub slot: u64,
/// Block headerhash of that slot
pub block: blake3::Hash,
}
impl_p2p_message!(BlockOrder, "blockorder");
/// Structure representing full block data.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct BlockInfo {
/// BlockInfo magic bytes
pub magic: [u8; 4],
/// Block header data
pub header: Header,
/// Transactions payload
pub txs: Vec<Transaction>,
/// Lead Info,
pub lead_info: LeadInfo,
}
impl Default for BlockInfo {
fn default() -> Self {
let magic = BLOCK_MAGIC_BYTES;
Self { magic, header: Header::default(), txs: vec![], lead_info: LeadInfo::default() }
}
}
impl_p2p_message!(BlockInfo, "blockinfo");
impl BlockInfo {
pub fn new(header: Header, txs: Vec<Transaction>, lead_info: LeadInfo) -> Self {
let magic = BLOCK_MAGIC_BYTES;
Self { magic, header, txs, lead_info }
}
/// Calculate the block hash
pub fn blockhash(&self) -> blake3::Hash {
let block: Block = self.clone().into();
block.blockhash()
}
}
impl From<BlockInfo> for Block {
fn from(block_info: BlockInfo) -> Self {
let txs = block_info.txs.iter().map(|x| blake3::hash(&serialize(x))).collect();
Self {
magic: block_info.magic,
header: block_info.header.headerhash(),
txs,
lead_info: block_info.lead_info,
}
}
}
/// Auxiliary structure used for blockchain syncing
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct BlockResponse {
/// Response blocks.
pub blocks: Vec<BlockInfo>,
}
impl_p2p_message!(BlockResponse, "blockresponse");
/// This struct represents a block proposal, used for consensus.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct BlockProposal {
/// Block hash
pub hash: blake3::Hash,
/// Block header hash
pub header: blake3::Hash,
/// Block data
pub block: BlockInfo,
}
impl BlockProposal {
#[allow(clippy::too_many_arguments)]
pub fn new(header: Header, txs: Vec<Transaction>, lead_info: LeadInfo) -> Self {
let block = BlockInfo::new(header, txs, lead_info);
let hash = block.blockhash();
let header = block.header.headerhash();
Self { hash, header, block }
}
}
impl PartialEq for BlockProposal {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash &&
self.header == other.header &&
self.block.header == other.block.header &&
self.block.txs == other.block.txs
}
}
impl fmt::Display for BlockProposal {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_fmt(format_args!(
"BlockProposal {{ leader public key: {}, hash: {}, header: {}, epoch: {}, slot: {}, txs: {} }}",
self.block.lead_info.public_key,
self.hash,
self.header,
self.block.header.epoch,
self.block.header.slot,
self.block.txs.len()
))
}
}
impl_p2p_message!(BlockProposal, "proposal");
impl From<BlockProposal> for BlockInfo {
fn from(block: BlockProposal) -> BlockInfo {
block.block
}
}

View File

@@ -1,206 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use crate::{util::time::Timestamp, Result};
use log::debug;
use std::{thread, time::Duration};
use url::Url;
pub enum Ticks {
GENESIS { e: u64, sl: u64 }, //genesis epoch
NEWSLOT { e: u64, sl: u64 }, // new slot
NEWEPOCH { e: u64, sl: u64 }, // new epoch
TOCKS, //tocks, or slot is ending
IDLE, // idle clock state
OUTOFSYNC, //clock, and blockchain are out of sync
}
const BB_SL: u64 = u64::MAX - 1; //big bang slot time (need to be negative value)
const BB_E: u64 = 0; //big bang epoch time.
#[derive(Debug)]
pub struct Clock {
pub sl: u64, // relative slot index (zero-based) [0-len[
pub e: u64, // epoch index (zero-based) [0-\inf[
pub tick_len: u64, // tick length in time (seconds)
pub sl_len: u64, // slot length in ticks
pub e_len: u64, // epoch length in slots
pub peers: Vec<Url>,
pub genesis_time: Timestamp,
}
impl Clock {
pub fn new(
e_len: Option<u64>,
sl_len: Option<u64>,
tick_len: Option<u64>,
peers: Vec<Url>,
) -> Self {
let gt: Timestamp = Timestamp::current_time();
Self {
sl: BB_SL, //necessary for genesis slot
e: BB_E,
tick_len: tick_len.unwrap_or(22), // 22 seconds
sl_len: sl_len.unwrap_or(22), // ~8 minutes
e_len: e_len.unwrap_or(3), // 24.2 minutes
peers,
genesis_time: gt,
}
}
pub fn get_sl_len(&self) -> u64 {
self.sl_len
}
pub fn get_e_len(&self) -> u64 {
self.e_len
}
async fn time(&self) -> Result<Timestamp> {
//TODO (fix) add more than ntp server to time, and take the avg
Ok(Timestamp::current_time())
}
/// returns time since genesis in seconds.
async fn time_to_genesis(&self) -> Timestamp {
//TODO this value need to be assigned to kickoff time.
let genesis_time = self.genesis_time.0;
let abs_time = self.time().await.unwrap();
Timestamp(abs_time.0 - genesis_time)
}
/// return absolute tick to genesis, and relative tick index in the slot.
async fn tick_time(&self) -> (u64, u64, u64) {
let time = self.time_to_genesis().await.0;
let tick_abs: u64 = time / self.tick_len;
let tick_rel: u64 = time % self.tick_len;
(time, tick_rel, tick_abs)
}
/// return true if the clock is at the begining (before 2/3 of the slot).
async fn ticking(&self) -> bool {
let (abs, rel, _) = self.tick_time().await;
debug!(target: "consensus::clock", "abs time to genesis ticks: {}, rel ticks: {}", abs, rel);
rel < (self.tick_len) * 2 / 3
}
pub async fn sync(&mut self) -> Result<()> {
let e = self.epoch_abs().await;
let sl = self.slot_relative().await;
self.sl = sl;
self.e = e;
Ok(())
}
/// returns absolute zero based slot index
async fn slot_abs(&self) -> u64 {
let sl_abs = self.tick_time().await.0 / self.sl_len;
debug!(target: "consensus::clock", "[slot_abs] slot len: {} - slot abs: {}", self.sl_len, sl_abs);
sl_abs
}
/// returns relative zero based slot index
async fn slot_relative(&self) -> u64 {
let e_abs = self.slot_abs().await % self.e_len;
debug!(target: "consensus::clock", "[slot_relative] slot len: {} - slot relative: {}", self.sl_len, e_abs);
e_abs
}
/// returns absolute zero based epoch index.
async fn epoch_abs(&self) -> u64 {
let res = self.slot_abs().await / self.e_len;
debug!(target: "consensus::clock", "[epoch_abs] epoch len: {} - epoch abs: {}", self.e_len, res);
res
}
/// return the ticks phase with corresponding phase parameters
///
/// the Ticks enum can include epoch index, and relative slot index (zero-based)
pub async fn ticks(&mut self) -> Ticks {
// also debug the failing function.
let e = self.epoch_abs().await;
let sl = self.slot_relative().await;
if self.ticking().await {
debug!(
target: "consensus::clock",
"e/e`: {}/{} sl/sl`: {}/{}, BB_E/BB_SL: {}/{}",
e, self.e, sl, self.sl, BB_E, BB_SL
);
if e == self.e && e == BB_E && self.sl == BB_SL {
self.sl = sl + 1; // 0
self.e = e; // 0
debug!(target: "consensus::clock", "new genesis");
Ticks::GENESIS { e, sl }
} else if e == self.e && sl == self.sl + 1 {
self.sl = sl;
debug!(target: "consensus::clock", "new slot");
Ticks::NEWSLOT { e, sl }
} else if e == self.e + 1 && sl == 0 {
self.e = e;
self.sl = sl;
debug!(target: "consensus::clock", "new epoch");
Ticks::NEWEPOCH { e, sl }
} else if e == self.e && sl == self.sl {
debug!(target: "consensus::clock", "clock is idle");
thread::sleep(Duration::from_millis(100));
Ticks::IDLE
} else {
debug!(target: "consensus::clock", "clock is out of sync");
//clock is out of sync
Ticks::OUTOFSYNC
}
} else {
debug!(target: "consensus::clock", "tocks");
Ticks::TOCKS
}
}
}
#[cfg(test)]
mod tests {
use super::{Clock, Ticks};
use futures::executor::block_on;
use std::{thread, time::Duration};
#[test]
fn clock_works() {
let clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
//block th for 3 secs
thread::sleep(Duration::from_millis(1000));
let ttg = block_on(clock.time_to_genesis()).0;
assert!((1..2).contains(&ttg));
}
fn _clock_ticking() {
let clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
//block th for 3 secs
thread::sleep(Duration::from_millis(1000));
assert!(block_on(clock.ticking()));
thread::sleep(Duration::from_millis(1000));
assert!(block_on(clock.ticking()));
}
fn _clock_ticks() {
let mut clock = Clock::new(Some(9), Some(9), Some(9), vec![]);
//
let tick: Ticks = block_on(clock.ticks());
assert!(matches!(tick, Ticks::GENESIS { e: 0, sl: 0 }));
thread::sleep(Duration::from_millis(3000));
let tock: Ticks = block_on(clock.ticks());
assert!(matches!(tock, Ticks::TOCKS));
}
}

View File

@@ -1,6 +0,0 @@
-- Wallet definitions for consensus lead coins.
-- The consensus lead coin we have and can use
CREATE TABLE IF NOT EXISTS consensus_coin (
coin BLOB
);

View File

@@ -1,129 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use lazy_static::lazy_static;
use crate::{consensus::Float10, util::time::Timestamp};
lazy_static! {
/// Genesis hash for the mainnet chain
pub static ref MAINNET_GENESIS_HASH_BYTES: blake3::Hash = blake3::hash(b"darkfi_mainnet");
// NOTE: On initial network bootstrap, genesis timestamp should be equal to boostrap timestamp.
// On network restart only change bootstrap timestamp to schedule when nodes become active.
/// Genesis timestamp for the mainnet chain
pub static ref MAINNET_GENESIS_TIMESTAMP: Timestamp = Timestamp(1650887115);
/// Bootstrap timestamp for the mainnet chain
pub static ref MAINNET_BOOTSTRAP_TIMESTAMP: Timestamp = Timestamp(1650887115);
/// Total sum of initial staking coins for the mainnet chain
pub static ref MAINNET_INITIAL_DISTRIBUTION: u64 = 0;
/// Genesis hash for the testnet chain
pub static ref TESTNET_GENESIS_HASH_BYTES: blake3::Hash = blake3::hash(b"darkfi_testnet");
/// Genesis timestamp for the testnet chain
pub static ref TESTNET_GENESIS_TIMESTAMP: Timestamp = Timestamp(1677531600);
/// Bootstrap timestamp for the testnet chain
pub static ref TESTNET_BOOTSTRAP_TIMESTAMP: Timestamp = Timestamp(1677531600);
/// Total sum of initial staking coins for the testnet chain
pub static ref TESTNET_INITIAL_DISTRIBUTION: u64 = 1000;
// Commonly used Float10
pub static ref FLOAT10_EPSILON: Float10 = Float10::try_from("1").unwrap();
pub static ref FLOAT10_NEG_TWO: Float10 = Float10::try_from("-2").unwrap();
pub static ref FLOAT10_NEG_ONE: Float10 = Float10::try_from("-1").unwrap();
pub static ref FLOAT10_ZERO: Float10 = Float10::try_from("0").unwrap();
pub static ref FLOAT10_ONE: Float10 = Float10::try_from("1").unwrap();
pub static ref FLOAT10_TWO: Float10 = Float10::try_from("2").unwrap();
pub static ref FLOAT10_THREE: Float10 = Float10::try_from("3").unwrap();
pub static ref FLOAT10_FIVE: Float10 = Float10::try_from("5").unwrap();
pub static ref FLOAT10_NINE: Float10 = Float10::try_from("9").unwrap();
pub static ref FLOAT10_TEN: Float10 = Float10::try_from("10").unwrap();
// Consensus parameters
pub static ref KP: Float10 = Float10::try_from("0.18").unwrap();
pub static ref KI: Float10 = Float10::try_from("0.02").unwrap();
pub static ref KD: Float10 = Float10::try_from("-0.1").unwrap();
pub static ref PID_OUT_STEP: Float10 = Float10::try_from("0.1").unwrap();
pub static ref MAX_DER: Float10 = Float10::try_from("0.1").unwrap();
pub static ref MIN_DER: Float10 = Float10::try_from("-0.1").unwrap();
pub static ref MAX_F: Float10 = Float10::try_from("0.99").unwrap();
pub static ref MIN_F: Float10 = Float10::try_from("0.01").unwrap();
}
/// Block version number
pub const BLOCK_VERSION: u8 = 1;
/// Block magic bytes
pub const BLOCK_MAGIC_BYTES: [u8; 4] = [0x11, 0x6d, 0x75, 0x1f];
/// Block info magic bytes
pub const BLOCK_INFO_MAGIC_BYTES: [u8; 4] = [0x90, 0x44, 0xf1, 0xf6];
/// Number of slots in one epoch
pub const EPOCH_LENGTH: usize = 10;
/// Slot time in seconds
pub const SLOT_TIME: u64 = 90;
/// Finalization sync period duration (should be >=2/3 of slot time)
pub const FINAL_SYNC_DUR: u64 = 60;
/// Max resync retries duration in epochs
pub const SYNC_RETRIES_DURATION: u64 = 2;
/// Max resync retries
pub const SYNC_MAX_RETRIES: u64 = 10;
/// Transactions included in a block cap
pub const TXS_CAP: usize = 50;
/// Block leader reward
pub const REWARD: u64 = 1;
/// Leader proofs k for zk proof rows (rows=2^k)
pub const LEADER_PROOF_K: u32 = 13;
// TODO: Describe these constants
pub const RADIX_BITS: usize = 76;
pub const P: &str = "28948022309329048855892746252171976963363056481941560715954676764349967630337";
pub const LOTTERY_HEAD_START: u64 = 1;
pub const PRF_NULLIFIER_PREFIX: u64 = 0;
pub const PI_COMMITMENT_X_INDEX: usize = 1;
pub const PI_COMMITMENT_Y_INDEX: usize = 2;
pub const PI_COMMITMENT_ROOT: usize = 5;
pub const PI_NULLIFIER_INDEX: usize = 7;
pub const PI_MU_Y_INDEX: usize = 8;
pub const PI_MU_RHO_INDEX: usize = 10;
pub const PI_SIGMA1_INDEX: usize = 12;
pub const PI_SIGMA2_INDEX: usize = 13;
pub const GENESIS_TOTAL_STAKE: u64 = 1;
pub const LEADER_HISTORY_LOG: &str = "/tmp/lead_history.log";
pub const F_HISTORY_LOG: &str = "/tmp/f_history.log";
pub const LOTTERY_HISTORY_LOG: &str = "/tmp/lottery_history.log";
// Wallet SQL table constant names. These have to represent the SQL schema.
pub const CONSENSUS_COIN_TABLE: &str = "consensus_coin";
pub const CONSENSUS_COIN_COL: &str = "coin";

View File

@@ -1,87 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use crate::zkas::{Opcode, VarType, ZkBinary};
/// Calculate the gas use for verifying a given zkas circuit.
/// This function assumes that the zkbin was properly decoded.
pub fn circuit_gas_use(zkbin: &ZkBinary) -> u64 {
let mut accumulator: u64 = 0;
// Constants each with a cost of 10
accumulator += 10 * zkbin.constants.len() as u64;
// Literals each with a cost of 10 (for now there's only 1 type of literal)
accumulator += 10 * zkbin.literals.len() as u64;
// Witnesses have cost by type
for witness in &zkbin.witnesses {
let cost = match witness {
VarType::Dummy => unreachable!(),
VarType::EcPoint => 20,
VarType::EcFixedPoint => unreachable!(),
VarType::EcFixedPointShort => unreachable!(),
VarType::EcFixedPointBase => unreachable!(),
VarType::EcNiPoint => 20,
VarType::Base => 10,
VarType::BaseArray => unreachable!(),
VarType::Scalar => 20,
VarType::ScalarArray => unreachable!(),
VarType::MerklePath => 40,
VarType::Uint32 => 10,
VarType::Uint64 => 10,
VarType::Any => 10,
};
accumulator += cost;
}
// Opcodes depending on how heavy they are
for opcode in &zkbin.opcodes {
let cost = match opcode.0 {
Opcode::Noop => unreachable!(),
Opcode::EcAdd => 30,
Opcode::EcMul => 30,
Opcode::EcMulBase => 30,
Opcode::EcMulShort => 30,
Opcode::EcMulVarBase => 30,
Opcode::EcGetX => 5,
Opcode::EcGetY => 5,
Opcode::PoseidonHash => 20 + 10 * opcode.1.len() as u64,
Opcode::MerkleRoot => 50,
Opcode::BaseAdd => 15,
Opcode::BaseMul => 15,
Opcode::BaseSub => 15,
Opcode::WitnessBase => 10,
Opcode::RangeCheck => 60,
Opcode::LessThanStrict => 100,
Opcode::LessThanLoose => 100,
Opcode::BoolCheck => 20,
Opcode::CondSelect => 10,
Opcode::ZeroCondSelect => 10,
Opcode::ConstrainEqualBase => 10,
Opcode::ConstrainEqualPoint => 20,
Opcode::ConstrainInstance => 10,
Opcode::DebugPrint => 100,
};
accumulator += cost;
}
accumulator
}

View File

@@ -1,509 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
crypto::{
pedersen::{pedersen_commitment_base, pedersen_commitment_u64},
poseidon_hash,
util::fp_mod_fv,
MerkleNode, MerkleTree, SecretKey,
},
pasta::{arithmetic::CurveAffine, group::Curve, pallas},
};
use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
use halo2_proofs::{arithmetic::Field, circuit::Value};
use log::info;
use rand::rngs::OsRng;
use super::constants::EPOCH_LENGTH;
use crate::{
consensus::{constants, utils::fbig2base, Float10, TransferStx, TxRcpt},
zk::{
proof::{Proof, ProvingKey},
vm::ZkCircuit,
vm_heap::Witness,
},
zkas::ZkBinary,
Result,
};
use std::{
fs::File,
io::{prelude::*, BufWriter},
};
pub const MERKLE_DEPTH_LEAD_COIN: usize = 32;
pub const MERKLE_DEPTH: u8 = 32;
pub const ZERO: pallas::Base = pallas::Base::zero();
pub const ONE: pallas::Base = pallas::Base::one();
pub const PREFIX_EVL: u64 = 2;
pub const PREFIX_SEED: u64 = 3;
pub const PREFIX_CM: u64 = 4;
pub const PREFIX_PK: u64 = 5;
pub const PREFIX_SN: u64 = 6;
// TODO: Unify item names with the names in the ZK proof (those are more descriptive)
/// Structure representing the consensus leader coin
#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
pub struct LeadCoin {
/// Coin's stake value
pub value: u64,
/// Coin creation slot.
pub slot: u64,
/// Coin nonce
pub nonce: pallas::Base,
/// Commitment for coin1
pub coin1_commitment: pallas::Point,
/// Merkle root of coin1 commitment
pub coin1_commitment_root: MerkleNode,
/// Coin commitment position
pub coin1_commitment_pos: u32,
/// Merkle path to the coin1's commitment
pub coin1_commitment_merkle_path: Vec<MerkleNode>,
/// coin1 sk
pub coin1_sk: pallas::Base,
/// Merkle root of the `coin1` secret key
pub coin1_sk_root: MerkleNode,
/// coin1 sk position in merkle tree
pub coin1_sk_pos: u32,
/// Merkle path to the secret key of `coin1`
pub coin1_sk_merkle_path: Vec<MerkleNode>,
/// coin1 commitment blinding factor
pub coin1_blind: pallas::Scalar,
}
impl LeadCoin {
/// Create a new `LeadCoin` object using given parameters.
#[allow(clippy::too_many_arguments)]
pub fn new(
// emulation of global random oracle output from previous epoch randomness.
//eta: pallas::Base,
// Stake value
value: u64,
// Slot absolute index
slot: u64,
// coin1 sk
coin1_sk: pallas::Base,
// Merkle root of the `coin_1` secret key in the Merkle tree of secret keys
coin1_sk_root: MerkleNode,
// sk pos
coin1_sk_pos: usize,
// Merkle path to the secret key of `coin_1` in the Merkle tree of secret keys
coin1_sk_merkle_path: Vec<MerkleNode>,
// coin1 nonce
seed: pallas::Base,
// Merkle tree of coin commitments
coin_commitment_tree: &mut MerkleTree,
) -> Self {
// Generate random blinding values for commitments:
let coin1_blind = pallas::Scalar::random(&mut OsRng);
//let coin2_blind = pallas::Scalar::random(&mut OsRng);
// pk
let pk = Self::util_pk(coin1_sk_root, slot);
let coin1_commitment = Self::commitment(pk, pallas::Base::from(value), seed, coin1_blind);
// Hash its coordinates to get a base field element
let c1_cm_coords = coin1_commitment.to_affine().coordinates().unwrap();
let c1_base_msg = [*c1_cm_coords.x(), *c1_cm_coords.y()];
let coin1_commitment_base = poseidon_hash(c1_base_msg);
// Append the element to the Merkle tree
coin_commitment_tree.append(MerkleNode::from(coin1_commitment_base));
let coin1_commitment_pos = coin_commitment_tree.mark().unwrap();
let coin1_commitment_root = coin_commitment_tree.root(0).unwrap();
let coin1_commitment_merkle_path =
coin_commitment_tree.witness(coin1_commitment_pos, 0).unwrap();
Self {
value,
slot,
nonce: seed,
coin1_commitment,
coin1_commitment_root,
coin1_commitment_pos: u32::try_from(u64::from(coin1_commitment_pos)).unwrap(),
coin1_commitment_merkle_path,
coin1_sk,
coin1_sk_root,
coin1_sk_pos: u32::try_from(coin1_sk_pos).unwrap(),
coin1_sk_merkle_path,
coin1_blind,
}
}
pub fn sn(&self) -> pallas::Base {
let sn_msg = [pallas::Base::from(PREFIX_SN), self.coin1_sk_root.inner(), self.nonce, ZERO];
poseidon_hash(sn_msg)
}
pub fn election_seeds_u64(eta: pallas::Base, slotu64: u64) -> (pallas::Base, pallas::Base) {
Self::election_seeds(eta, pallas::Base::from(slotu64))
}
/// Derive election seeds from given parameters
pub fn election_seeds(eta: pallas::Base, slot: pallas::Base) -> (pallas::Base, pallas::Base) {
info!(target: "consensus::leadcoin", "election_seeds: eta: {:?}, slot: {:?}", eta, slot);
let election_seed_nonce = pallas::Base::from(3);
let election_seed_lead = pallas::Base::from(22);
// mu_y
let lead_msg = [election_seed_lead, eta, slot];
let lead_mu = poseidon_hash(lead_msg);
// mu_rho
let nonce_msg = [election_seed_nonce, eta, slot];
let nonce_mu = poseidon_hash(nonce_msg);
(lead_mu, nonce_mu)
}
/// Create a vector of `pallas::Base` elements from the `LeadCoin` to be
/// used as public inputs for the ZK proof.
pub fn public_inputs(
&self,
sigma1: pallas::Base,
sigma2: pallas::Base,
current_eta: pallas::Base,
current_slot: pallas::Base,
derived_blind: pallas::Scalar,
) -> Vec<pallas::Base> {
// pk
let pk = self.pk();
// coin 1-2 cm/commitment
let c1_cm_coord = self.coin1_commitment.to_affine().coordinates().unwrap();
let c2_cm_coord = self.derived_commitment(derived_blind).to_affine().coordinates().unwrap();
// lottery seed
let seed_msg =
[pallas::Base::from(PREFIX_SEED), self.coin1_sk_root.inner(), self.nonce, ZERO];
let seed = poseidon_hash(seed_msg);
// y
let (y_mu, rho_mu) = Self::election_seeds(current_eta, current_slot);
let y_msg = [seed, y_mu];
let y = poseidon_hash(y_msg);
// rho
let rho_msg = [seed, rho_mu];
let rho = poseidon_hash(rho_msg);
let public_inputs = vec![
pk,
*c1_cm_coord.x(),
*c1_cm_coord.y(),
*c2_cm_coord.x(),
*c2_cm_coord.y(),
self.coin1_commitment_root.inner(),
self.coin1_sk_root.inner(),
self.sn(),
y_mu,
y,
rho_mu,
rho,
sigma1,
sigma2,
];
public_inputs
}
fn util_pk(sk_root: MerkleNode, slot: u64) -> pallas::Base {
let pk_msg =
[pallas::Base::from(PREFIX_PK), sk_root.inner(), pallas::Base::from(slot), ZERO];
poseidon_hash(pk_msg)
}
/// calculate coin public key: hash of root coin secret key
/// and creation slot.
pub fn pk(&self) -> pallas::Base {
Self::util_pk(self.coin1_sk_root, self.slot)
}
fn util_derived_rho(sk_root: MerkleNode, nonce: pallas::Base) -> pallas::Base {
let rho_msg = [pallas::Base::from(PREFIX_EVL), sk_root.inner(), nonce, ZERO];
poseidon_hash(rho_msg)
}
/// calculate derived coin nonce: hash of root coin secret key
/// and old nonce
pub fn derived_rho(&self) -> pallas::Base {
Self::util_derived_rho(self.coin1_sk_root, self.nonce)
}
pub fn headstart() -> pallas::Base {
let headstart = constants::MIN_F.clone() * Float10::try_from(constants::P).unwrap();
fbig2base(headstart)
}
pub fn is_leader(
&self,
sigma1: pallas::Base,
sigma2: pallas::Base,
current_eta: pallas::Base,
current_slot: pallas::Base,
) -> bool {
let y_seed =
[pallas::Base::from(PREFIX_SEED), self.coin1_sk_root.inner(), self.nonce, ZERO];
let y_seed_hash = poseidon_hash(y_seed);
let (y_mu, _) = Self::election_seeds(current_eta, current_slot);
let y_msg = [y_seed_hash, y_mu];
let y = poseidon_hash(y_msg);
let value = pallas::Base::from(self.value);
let headstart = Self::headstart();
let target = sigma1 * value + sigma2 * value * value + headstart;
let y_t_str = format!("{:?},{:?}\n", y, target);
let f =
File::options().append(true).create(true).open(constants::LOTTERY_HISTORY_LOG).unwrap();
{
let mut writer = BufWriter::new(f);
let _ = writer.write(&y_t_str.into_bytes()).unwrap();
}
info!(target: "consensus::leadcoin", "is_leader(): y = {:?}", y);
info!(target: "consensus::leadcoin", "is_leader(): T = {:?}", target);
y < target
}
fn commitment(
pk: pallas::Base,
value: pallas::Base,
seed: pallas::Base,
blind: pallas::Scalar,
) -> pallas::Point {
let commit_msg = [pallas::Base::from(PREFIX_CM), pk, value, seed];
// Create commitment to coin
let commit_v = poseidon_hash(commit_msg);
pedersen_commitment_base(commit_v, blind)
}
/// calculated derived coin commitment
pub fn derived_commitment(&self, blind: pallas::Scalar) -> pallas::Point {
let pk = self.pk();
let rho = self.derived_rho();
Self::commitment(pk, pallas::Base::from(self.value + constants::REWARD), rho, blind)
}
/// the new coin to be minted after the current coin is spent
/// in lottery.
pub fn derive_coin(
&self,
coin_commitment_tree: &mut MerkleTree,
derived_blind: pallas::Scalar,
) -> LeadCoin {
info!(target: "consensus::leadcoin", "derive_coin(): Deriving new coin!");
let derived_c1_rho = self.derived_rho();
let derived_c1_cm = self.derived_commitment(derived_blind);
let derived_c1_cm_coord = derived_c1_cm.to_affine().coordinates().unwrap();
let derived_c1_cm_msg = [*derived_c1_cm_coord.x(), *derived_c1_cm_coord.y()];
let derived_c1_cm_base = poseidon_hash(derived_c1_cm_msg);
coin_commitment_tree.append(MerkleNode::from(derived_c1_cm_base));
let leaf_pos = coin_commitment_tree.mark().unwrap();
let commitment_root = coin_commitment_tree.root(0).unwrap();
let commitment_merkle_path = coin_commitment_tree.witness(leaf_pos, 0).unwrap();
LeadCoin {
value: self.value + constants::REWARD,
slot: self.slot,
nonce: derived_c1_rho,
coin1_commitment: derived_c1_cm,
coin1_commitment_root: commitment_root,
coin1_commitment_pos: u32::try_from(u64::from(leaf_pos)).unwrap(),
coin1_commitment_merkle_path: commitment_merkle_path,
coin1_sk: self.coin1_sk,
coin1_sk_root: self.coin1_sk_root,
coin1_sk_pos: self.coin1_sk_pos,
coin1_sk_merkle_path: self.coin1_sk_merkle_path.clone(),
coin1_blind: derived_blind,
}
}
/// Try to create a ZK proof of consensus leadership
pub fn create_lead_proof(
&self,
sigma1: pallas::Base,
sigma2: pallas::Base,
eta: pallas::Base,
slot: pallas::Base, //current slot index.
pk: &ProvingKey,
derived_blind: pallas::Scalar,
) -> (Result<Proof>, Vec<pallas::Base>) {
let (y_mu, rho_mu) = Self::election_seeds(eta, slot);
let bincode = include_bytes!("../../proof/lead.zk.bin");
let zkbin = ZkBinary::decode(bincode).unwrap();
let headstart = Self::headstart();
let coin1_commitment_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
self.coin1_commitment_merkle_path.clone().try_into().unwrap();
let coin1_sk_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
self.coin1_sk_merkle_path.clone().try_into().unwrap();
let witnesses = vec![
Witness::MerklePath(Value::known(coin1_commitment_merkle_path)),
Witness::Uint32(Value::known(self.coin1_commitment_pos)),
Witness::Uint32(Value::known(self.coin1_sk_pos)),
Witness::Base(Value::known(self.coin1_sk)),
Witness::Base(Value::known(self.coin1_sk_root.inner())),
Witness::MerklePath(Value::known(coin1_sk_merkle_path)),
Witness::Base(Value::known(pallas::Base::from(self.slot))),
Witness::Base(Value::known(self.nonce)),
Witness::Scalar(Value::known(self.coin1_blind)),
Witness::Base(Value::known(pallas::Base::from(self.value))),
Witness::Scalar(Value::known(derived_blind)),
Witness::Base(Value::known(rho_mu)),
Witness::Base(Value::known(y_mu)),
Witness::Base(Value::known(sigma1)),
Witness::Base(Value::known(sigma2)),
Witness::Base(Value::known(headstart)),
];
let circuit = ZkCircuit::new(witnesses, &zkbin);
let public_inputs = self.public_inputs(sigma1, sigma2, eta, slot, derived_blind);
(Ok(Proof::create(pk, &[circuit], &public_inputs, &mut OsRng).unwrap()), public_inputs)
}
#[allow(clippy::too_many_arguments)]
pub fn create_xfer_proof(
&self,
pk: &ProvingKey,
change_coin: TxRcpt,
change_pk: pallas::Base, //change coin public key
transfered_coin: TxRcpt,
transfered_pk: pallas::Base, // recipient coin's public key
sigma1: pallas::Base,
sigma2: pallas::Base,
current_eta: pallas::Base,
current_slot: pallas::Base,
derived_blind: pallas::Scalar,
) -> Result<TransferStx> {
assert!(change_coin.value + transfered_coin.value == self.value && self.value > 0);
let bincode = include_bytes!("../../proof/tx.zk.bin");
let zkbin = ZkBinary::decode(bincode)?;
let retval = pallas::Base::from(change_coin.value);
let xferval = pallas::Base::from(transfered_coin.value);
let pos: u32 = self.coin1_commitment_pos;
let value = pallas::Base::from(self.value);
let coin1_sk_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
self.coin1_sk_merkle_path.clone().try_into().unwrap();
let coin1_commitment_merkle_path: [MerkleNode; MERKLE_DEPTH_LEAD_COIN] =
self.coin1_commitment_merkle_path.clone().try_into().unwrap();
let witnesses = vec![
// coin (1) burned coin
Witness::Base(Value::known(self.coin1_commitment_root.inner())),
Witness::Base(Value::known(self.coin1_sk_root.inner())),
Witness::Base(Value::known(self.coin1_sk)),
Witness::MerklePath(Value::known(coin1_sk_merkle_path)),
Witness::Uint32(Value::known(self.coin1_sk_pos)),
Witness::Base(Value::known(self.nonce)),
Witness::Scalar(Value::known(self.coin1_blind)),
Witness::Base(Value::known(value)),
Witness::MerklePath(Value::known(coin1_commitment_merkle_path)),
Witness::Uint32(Value::known(pos)),
Witness::Base(Value::known(self.sn())),
// coin (3)
Witness::Base(Value::known(change_pk)),
Witness::Base(Value::known(change_coin.rho)),
Witness::Scalar(Value::known(change_coin.opening)),
Witness::Base(Value::known(retval)),
// coin (4)
Witness::Base(Value::known(transfered_pk)),
Witness::Base(Value::known(transfered_coin.rho)),
Witness::Scalar(Value::known(transfered_coin.opening)),
Witness::Base(Value::known(xferval)),
];
let circuit = ZkCircuit::new(witnesses, &zkbin);
let proof = Proof::create(
pk,
&[circuit],
&self.public_inputs(sigma1, sigma2, current_eta, current_slot, derived_blind),
&mut OsRng,
)?;
let cm3_msg_in = [
pallas::Base::from(PREFIX_CM),
change_pk,
pallas::Base::from(change_coin.value),
change_coin.rho,
];
let cm3_msg = poseidon_hash(cm3_msg_in);
let cm3 = pedersen_commitment_base(cm3_msg, change_coin.opening);
let cm4_msg_in = [
pallas::Base::from(PREFIX_CM),
transfered_pk,
pallas::Base::from(transfered_coin.value),
transfered_coin.rho,
];
let cm4_msg = poseidon_hash(cm4_msg_in);
let cm4 = pedersen_commitment_base(cm4_msg, transfered_coin.opening);
let tx = TransferStx {
coin_commitment: self.coin1_commitment,
coin_pk: self.pk(),
coin_root_sk: self.coin1_sk_root,
change_coin_commitment: cm3,
transfered_coin_commitment: cm4,
nullifier: self.sn(),
slot: pallas::Base::from(self.slot),
root: self.coin1_commitment_root,
proof,
};
Ok(tx)
}
}
/// This struct holds the secrets for creating LeadCoins during one epoch.
pub struct LeadCoinSecrets {
pub secret_keys: Vec<SecretKey>,
pub merkle_roots: Vec<MerkleNode>,
pub merkle_paths: Vec<Vec<MerkleNode>>,
}
impl LeadCoinSecrets {
/// Generate epoch coins secret keys.
/// First clot coin secret key is sampled at random, while the secret keys of the
/// remaining slots derive from the previous slot secret.
/// Clarification:
/// ```plaintext
/// sk[0] -> random,
/// sk[1] -> derive_function(sk[0]),
/// ...
/// sk[n] -> derive_function(sk[n-1]),
/// ```
pub fn generate() -> Self {
let mut tree = MerkleTree::new(EPOCH_LENGTH);
let mut sks = Vec::with_capacity(EPOCH_LENGTH);
let mut root_sks = Vec::with_capacity(EPOCH_LENGTH);
let mut path_sks = Vec::with_capacity(EPOCH_LENGTH);
let mut prev_sk = SecretKey::from(pallas::Base::one());
for i in 0..EPOCH_LENGTH {
let secret = if i == 0 {
pedersen_commitment_u64(1, pallas::Scalar::random(&mut OsRng))
} else {
pedersen_commitment_u64(1, fp_mod_fv(prev_sk.inner()))
};
let secret_coords = secret.to_affine().coordinates().unwrap();
let secret_msg = [*secret_coords.x(), *secret_coords.y()];
let secret_key = SecretKey::from(poseidon_hash(secret_msg));
sks.push(secret_key);
prev_sk = secret_key;
let node = MerkleNode::from(secret_key.inner());
tree.append(node);
let leaf_pos = tree.mark().unwrap();
let root = tree.root(0).unwrap();
let path = tree.witness(leaf_pos, 0).unwrap();
root_sks.push(root);
path_sks.push(path);
}
Self { secret_keys: sks, merkle_roots: root_sks, merkle_paths: path_sks }
}
}

View File

@@ -1,111 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
crypto::{schnorr::Signature, Keypair, PublicKey},
pasta::pallas,
};
use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
use log::error;
use crate::{
zk::proof::{Proof, VerifyingKey},
Result,
};
// TODO: Replace 'Lead' terms with 'Producer' to make it more clear that
// we refer to block producer.
/// This struct represents [`Block`](super::Block) leader information used by the consensus protocol.
#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
pub struct LeadInfo {
/// Block producer signature
pub signature: Signature,
/// Block producer public_key
pub public_key: PublicKey, // TODO: remove this(to be derived by proof)
/// Block producer slot competing coins public inputs
pub public_inputs: Vec<pallas::Base>,
/// Leader coin creation slot
pub coin_slot: u64,
/// Leader coin creation eta
pub coin_eta: pallas::Base,
/// Leader NIZK proof
pub proof: LeadProof,
/// Block producer leaders count
pub leaders: u64,
}
impl Default for LeadInfo {
/// Default LeadInfo used in genesis block generation
fn default() -> Self {
let keypair = Keypair::default();
let signature = Signature::dummy();
let public_inputs = vec![];
let coin_slot = 0;
let coin_eta = pallas::Base::zero();
let proof = LeadProof::default();
let leaders = 0;
Self {
signature,
public_key: keypair.public,
public_inputs,
coin_slot,
coin_eta,
proof,
leaders,
}
}
}
impl LeadInfo {
#[allow(clippy::too_many_arguments)]
pub fn new(
signature: Signature,
public_key: PublicKey,
public_inputs: Vec<pallas::Base>,
coin_slot: u64,
coin_eta: pallas::Base,
proof: LeadProof,
leaders: u64,
) -> Self {
Self { signature, public_key, public_inputs, coin_slot, coin_eta, proof, leaders }
}
}
/// Wrapper over the Proof, for future additions.
#[derive(Default, Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
pub struct LeadProof {
/// Leadership proof
pub proof: Proof,
}
impl LeadProof {
pub fn verify(&self, vk: &VerifyingKey, public_inputs: &[pallas::Base]) -> Result<()> {
if let Err(e) = self.proof.verify(vk, public_inputs) {
error!(target: "consensus::lead_info", "Verification of consensus lead proof failed: {}", e);
return Err(e.into())
}
Ok(())
}
}
impl From<Proof> for LeadProof {
fn from(proof: Proof) -> Self {
Self { proof }
}
}

View File

@@ -1,78 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/// Block definition
pub mod block;
pub use block::{Block, BlockInfo, BlockProposal, Header};
/// Constants
pub mod constants;
pub use constants::{
TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_TIMESTAMP,
TESTNET_INITIAL_DISTRIBUTION,
};
/// Consensus block leader information
pub mod lead_info;
pub use lead_info::{LeadInfo, LeadProof};
/// Consensus state
pub mod state;
/// Consensus validator state
pub mod validator;
pub use validator::{ValidatorState, ValidatorStatePtr};
/// Fee calculations
pub mod fees;
/// P2P net protocols
pub mod proto;
/// async tasks to utilize the protocols
pub mod task;
/// Lamport clock
pub mod clock;
pub use clock::{Clock, Ticks};
/// Consensus participation coin functions and definitions
pub mod lead_coin;
pub use lead_coin::LeadCoin;
/// Utility types
pub mod types;
pub use types::Float10;
/// Utility functions
pub mod utils;
/// Wallet functions
pub mod wallet;
/// transfered tx proof with public inputs.
pub mod stx;
pub use stx::TransferStx;
/// encrypted receipient coin info
pub mod rcpt;
pub use rcpt::{EncryptedTxRcpt, TxRcpt};
/// transfer transaction
pub mod tx;
pub use tx::Tx;

View File

@@ -1,33 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/// Block proposal protocol
mod protocol_proposal;
pub use protocol_proposal::ProtocolProposal;
/// Transaction broadcast protocol
mod protocol_tx;
pub use protocol_tx::ProtocolTx;
/// Validator + Replicator blockchain sync protocol
mod protocol_sync;
pub use protocol_sync::ProtocolSync;
/// Validator consensus sync protocol
mod protocol_sync_consensus;
pub use protocol_sync_consensus::ProtocolSyncConsensus;

View File

@@ -1,134 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::sync::Arc;
use async_trait::async_trait;
use log::{debug, error, trace};
use smol::Executor;
use url::Url;
use crate::{
consensus::{BlockProposal, ValidatorStatePtr},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
Result,
};
pub struct ProtocolProposal {
proposal_sub: MessageSubscription<BlockProposal>,
jobsman: ProtocolJobsManagerPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
channel_address: Url,
}
impl ProtocolProposal {
pub async fn init(
channel: ChannelPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
) -> Result<ProtocolBasePtr> {
debug!(target: "consensus::protocol_proposal::init()", "Adding ProtocolProposal to the protocol registry");
let msg_subsystem = channel.message_subsystem();
msg_subsystem.add_dispatch::<BlockProposal>().await;
let proposal_sub = channel.subscribe_msg::<BlockProposal>().await?;
Ok(Arc::new(Self {
proposal_sub,
jobsman: ProtocolJobsManager::new("ProposalProtocol", channel.clone()),
state,
p2p,
channel_address: channel.address().clone(),
}))
}
async fn handle_receive_proposal(self: Arc<Self>) -> Result<()> {
debug!(target: "consensus::protocol_proposal::handle_receive_proposal()", "START");
let exclude_list = vec![self.channel_address.clone()];
loop {
let proposal = match self.proposal_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_proposal::handle_receive_proposal()",
"recv fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_proposal::handle_receive_proposal()",
"recv: {}", proposal);
trace!(
target: "consensus::protocol_proposal::handle_receive_proposal()",
"Full proposal: {:?}",
proposal
);
let proposal_copy = (*proposal).clone();
// Verify we have the proposal already
let mut lock = self.state.write().await;
if lock.consensus.proposal_exists(&proposal_copy.hash) {
debug!(
target: "consensus::protocol_proposal::handle_receive_proposal()",
"Proposal already received."
);
continue
}
match lock.receive_proposal(&proposal_copy, None).await {
Ok(broadcast) => {
if broadcast {
// Broadcast proposal to rest of nodes
self.p2p.broadcast_with_exclude(&proposal_copy, &exclude_list).await;
}
}
Err(e) => {
error!(
target: "consensus::protocol_proposal::handle_receive_proposal()",
"receive_proposal error: {}",
e
);
continue
}
}
}
}
}
#[async_trait]
impl ProtocolBase for ProtocolProposal {
async fn start(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "consensus::protocol_proposal::start()", "START");
self.jobsman.clone().start(executor.clone());
self.jobsman.clone().spawn(self.clone().handle_receive_proposal(), executor.clone()).await;
debug!(target: "consensus::protocol_proposal::start()", "END");
Ok(())
}
fn name(&self) -> &'static str {
"ProtocolProposal"
}
}

View File

@@ -1,374 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::sync::Arc;
use async_trait::async_trait;
use log::{debug, error, info};
use smol::Executor;
use darkfi_sdk::blockchain::Slot;
use crate::{
consensus::{
block::{BlockInfo, BlockOrder, BlockResponse},
state::{SlotRequest, SlotResponse},
ValidatorStatePtr,
},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
Result,
};
// Constant defining how many blocks we send during syncing.
const BATCH: u64 = 10;
pub struct ProtocolSync {
channel: ChannelPtr,
request_sub: MessageSubscription<BlockOrder>,
slot_request_sub: MessageSubscription<SlotRequest>,
block_sub: MessageSubscription<BlockInfo>,
slots_sub: MessageSubscription<Slot>,
jobsman: ProtocolJobsManagerPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
consensus_mode: bool,
}
impl ProtocolSync {
pub async fn init(
channel: ChannelPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
consensus_mode: bool,
) -> Result<ProtocolBasePtr> {
let msg_subsystem = channel.message_subsystem();
msg_subsystem.add_dispatch::<BlockOrder>().await;
msg_subsystem.add_dispatch::<SlotRequest>().await;
msg_subsystem.add_dispatch::<BlockInfo>().await;
msg_subsystem.add_dispatch::<Slot>().await;
let request_sub = channel.subscribe_msg::<BlockOrder>().await?;
let slot_request_sub = channel.subscribe_msg::<SlotRequest>().await?;
let block_sub = channel.subscribe_msg::<BlockInfo>().await?;
let slots_sub = channel.subscribe_msg::<Slot>().await?;
Ok(Arc::new(Self {
channel: channel.clone(),
request_sub,
slot_request_sub,
block_sub,
slots_sub,
jobsman: ProtocolJobsManager::new("SyncProtocol", channel),
state,
p2p,
consensus_mode,
}))
}
async fn handle_receive_request(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_sync::handle_receive_request()",
"START"
);
loop {
let order = match self.request_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_request()",
"recv fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync::handle_receive_request()",
"received {:?}",
order
);
// Extra validations can be added here
/*
let key = order.slot;
let blocks = match self.state.read().await.blockchain.get_blocks_after(key, BATCH) {
Ok(v) => v,
Err(e) => {
error!(
target: "consensus::protocol_sync::handle_receive_request()",
"get_blocks_after fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync::handle_receive_request()",
"Found {} blocks",
blocks.len()
);
*/
let blocks = vec![BlockInfo::default()];
let response = BlockResponse { blocks };
if let Err(e) = self.channel.send(&response).await {
error!(
target: "consensus::protocol_sync::handle_receive_request()",
"channel send fail: {}",
e
)
};
}
}
async fn handle_receive_block(self: Arc<Self>) -> Result<()> {
debug!(target: "consensus::protocol_sync::handle_receive_block()", "START");
let _exclude_list = [self.channel.address()];
loop {
let info = match self.block_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"recv fail: {}",
e
);
continue
}
};
// Check if node has finished syncing its blockchain
if !self.state.read().await.synced {
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"Node still syncing blockchain, skipping..."
);
continue
}
// Check if node started participating in consensus.
// Consensus-mode enabled nodes have already performed these steps,
// during proposal finalization. They still listen to this sub,
// in case they go out of sync and become a none-consensus node.
if self.consensus_mode {
let lock = self.state.read().await;
let current = lock.consensus.time_keeper.current_slot();
let participating = lock.consensus.participating;
if let Some(slot) = participating {
if current >= slot {
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"node runs in consensus mode, skipping..."
);
continue
}
}
}
info!(
target: "consensus::protocol_sync::handle_receive_block()",
"Received block: {}",
info.blockhash()
);
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"Processing received block"
);
/*
let info_copy = (*info).clone();
match self.state.write().await.receive_finalized_block(info_copy.clone()).await {
Ok(v) => {
if v {
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"block processed successfully, broadcasting..."
);
self.p2p.broadcast_with_exclude(&info_copy, &exclude_list).await;
}
}
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_block()",
"error processing finalized block: {}",
e
);
}
};
*/
}
}
async fn handle_receive_slot_request(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"START"
);
loop {
let request = match self.slot_request_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"recv fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"received {:?}",
request
);
// Extra validations can be added here
let key = request.slot;
let slots = match self.state.read().await.blockchain.get_slots_after(key, BATCH) {
Ok(v) => v,
Err(e) => {
error!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"get_slots_after fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"Found {} slots",
slots.len()
);
let response = SlotResponse { slots };
if let Err(e) = self.channel.send(&response).await {
error!(
target: "consensus::protocol_sync::handle_receive_slot_request()",
"channel send fail: {}",
e
)
};
}
}
async fn handle_receive_slot(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"START"
);
let exclude_list = vec![self.channel.address().clone()];
loop {
let slot = match self.slots_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"recv fail: {}",
e
);
continue
}
};
// Check if node has finished syncing its blockchain
if !self.state.read().await.synced {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"Node still syncing blockchain, skipping..."
);
continue
}
// Check if node started participating in consensus.
// Consensus-mode enabled nodes have already performed these steps,
// during proposal finalization. They still listen to this sub,
// in case they go out of sync and become a none-consensus node.
if self.consensus_mode {
let lock = self.state.read().await;
let current = lock.consensus.time_keeper.current_slot();
let participating = lock.consensus.participating;
if let Some(slot) = participating {
if current >= slot {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"node runs in consensus mode, skipping..."
);
continue
}
}
}
info!(
target: "consensus::protocol_sync::handle_receive_slot()",
"Received slot: {}",
slot.id
);
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"Processing received slot"
);
let slot_copy = (*slot).clone();
match self.state.write().await.receive_finalized_slots(slot_copy.clone()).await {
Ok(v) => {
if v {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"slot processed successfully, broadcasting..."
);
self.p2p.broadcast_with_exclude(&slot_copy, &exclude_list).await;
}
}
Err(e) => {
debug!(
target: "consensus::protocol_sync::handle_receive_slot()",
"error processing finalized slot: {}",
e
);
}
};
}
}
}
#[async_trait]
impl ProtocolBase for ProtocolSync {
async fn start(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "consensus::protocol_sync::start()", "START");
self.jobsman.clone().start(executor.clone());
self.jobsman.clone().spawn(self.clone().handle_receive_request(), executor.clone()).await;
self.jobsman
.clone()
.spawn(self.clone().handle_receive_slot_request(), executor.clone())
.await;
self.jobsman.clone().spawn(self.clone().handle_receive_block(), executor.clone()).await;
self.jobsman.clone().spawn(self.clone().handle_receive_slot(), executor.clone()).await;
debug!(target: "consensus::protocol_sync::start()", "END");
Ok(())
}
fn name(&self) -> &'static str {
"ProtocolSync"
}
}

View File

@@ -1,206 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::sync::Arc;
use async_trait::async_trait;
use log::{debug, error};
use smol::Executor;
use crate::{
consensus::{
state::{ConsensusRequest, ConsensusResponse, ConsensusSyncRequest, ConsensusSyncResponse},
ValidatorStatePtr,
},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
Result,
};
pub struct ProtocolSyncConsensus {
channel: ChannelPtr,
request_sub: MessageSubscription<ConsensusRequest>,
sync_request_sub: MessageSubscription<ConsensusSyncRequest>,
jobsman: ProtocolJobsManagerPtr,
state: ValidatorStatePtr,
}
impl ProtocolSyncConsensus {
pub async fn init(
channel: ChannelPtr,
state: ValidatorStatePtr,
_p2p: P2pPtr,
) -> Result<ProtocolBasePtr> {
let msg_subsystem = channel.message_subsystem();
msg_subsystem.add_dispatch::<ConsensusRequest>().await;
msg_subsystem.add_dispatch::<ConsensusSyncRequest>().await;
let request_sub = channel.subscribe_msg::<ConsensusRequest>().await?;
let sync_request_sub = channel.subscribe_msg::<ConsensusSyncRequest>().await?;
Ok(Arc::new(Self {
channel: channel.clone(),
request_sub,
sync_request_sub,
jobsman: ProtocolJobsManager::new("SyncConsensusProtocol", channel),
state,
}))
}
async fn handle_receive_request(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_request()",
"START"
);
loop {
let req = match self.request_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_request()",
"recv fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_request()",
"received {:?}",
req
);
// Extra validations can be added here.
let lock = self.state.read().await;
let bootstrap_slot = lock.consensus.bootstrap_slot;
let current_slot = lock.consensus.time_keeper.current_slot();
let mut forks = vec![];
for fork in &lock.consensus.forks {
forks.push(fork.clone().into());
}
let pending_txs = match lock.blockchain.get_pending_txs() {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_request()",
"Failed querying pending txs store: {}",
e
);
vec![]
}
};
let slots = lock.consensus.slots.clone();
let mut f_history = vec![];
for f in &lock.consensus.f_history {
let f_str = format!("{:}", f);
f_history.push(f_str);
}
let mut err_history = vec![];
for err in &lock.consensus.err_history {
let err_str = format!("{:}", err);
err_history.push(err_str);
}
let nullifiers = lock.consensus.nullifiers.clone();
let response = ConsensusResponse {
bootstrap_slot,
current_slot,
forks,
pending_txs,
slots,
f_history,
err_history,
nullifiers,
};
if let Err(e) = self.channel.send(&response).await {
error!(
target: "consensus::protocol_sync_consensus::handle_receive_request()",
"channel send fail: {}",
e
);
};
}
}
async fn handle_receive_sync_request(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
"START"
);
loop {
let req = match self.sync_request_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
"recv fail: {}",
e
);
continue
}
};
debug!(
target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
"received {:?}",
req
);
// Extra validations can be added here.
let lock = self.state.read().await;
let bootstrap_slot = lock.consensus.bootstrap_slot;
let proposing = lock.consensus.proposing;
let is_empty = lock.consensus.slots_is_empty();
let response = ConsensusSyncResponse { bootstrap_slot, proposing, is_empty };
if let Err(e) = self.channel.send(&response).await {
error!(
target: "consensus::protocol_sync_consensus::handle_receive_sync_request()",
"channel send fail: {}",
e
);
};
}
}
}
#[async_trait]
impl ProtocolBase for ProtocolSyncConsensus {
async fn start(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(
target: "consensus::protocol_sync_consensus::start()",
"START"
);
self.jobsman.clone().start(executor.clone());
self.jobsman.clone().spawn(self.clone().handle_receive_request(), executor.clone()).await;
self.jobsman
.clone()
.spawn(self.clone().handle_receive_sync_request(), executor.clone())
.await;
debug!(
target: "consensus::protocol_sync_consensus::start()",
"END"
);
Ok(())
}
fn name(&self) -> &'static str {
"ProtocolSyncConsensus"
}
}

View File

@@ -1,122 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use smol::Executor;
use url::Url;
use crate::{
consensus::ValidatorStatePtr,
impl_p2p_message,
net::{
ChannelPtr, Message, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
tx::Transaction,
Result,
};
impl_p2p_message!(Transaction, "tx");
pub struct ProtocolTx {
tx_sub: MessageSubscription<Transaction>,
jobsman: ProtocolJobsManagerPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
channel_address: Url,
}
impl ProtocolTx {
pub async fn init(
channel: ChannelPtr,
state: ValidatorStatePtr,
p2p: P2pPtr,
) -> Result<ProtocolBasePtr> {
debug!(
target: "consensus::protocol_tx::init()",
"Adding ProtocolTx to the protocol registry"
);
let msg_subsystem = channel.message_subsystem();
msg_subsystem.add_dispatch::<Transaction>().await;
let tx_sub = channel.subscribe_msg::<Transaction>().await?;
Ok(Arc::new(Self {
tx_sub,
jobsman: ProtocolJobsManager::new("TxProtocol", channel.clone()),
state,
p2p,
channel_address: channel.address().clone(),
}))
}
async fn handle_receive_tx(self: Arc<Self>) -> Result<()> {
debug!(
target: "consensus::protocol_tx::handle_receive_tx()",
"START"
);
let exclude_list = vec![self.channel_address.clone()];
loop {
let tx = match self.tx_sub.receive().await {
Ok(v) => v,
Err(e) => {
debug!(
target: "consensus::protocol_tx::handle_receive_tx()",
"recv fail: {}",
e
);
continue
}
};
// Check if node has finished syncing its blockchain
if !self.state.read().await.synced {
debug!(
target: "consensus::protocol_tx::handle_receive_tx()",
"Node still syncing blockchain, skipping..."
);
continue
}
let tx_copy = (*tx).clone();
// Nodes use unconfirmed_txs vector as seen_txs pool.
if self.state.write().await.append_tx(tx_copy.clone()).await {
self.p2p.broadcast_with_exclude(&tx_copy, &exclude_list).await;
}
}
}
}
#[async_trait]
impl ProtocolBase for ProtocolTx {
async fn start(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "consensus::protocol_tx::start()", "START");
self.jobsman.clone().start(executor.clone());
self.jobsman.clone().spawn(self.clone().handle_receive_tx(), executor.clone()).await;
debug!(target: "consensus::protocol_tx::start()", "END");
Ok(())
}
fn name(&self) -> &'static str {
"ProtocolTx"
}
}

View File

@@ -1,95 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use crypto_api_chachapoly::ChachaPolyIetf;
use darkfi_sdk::{
crypto::{
diffie_hellman::{kdf_sapling, sapling_ka_agree},
keypair::PublicKey,
SecretKey,
},
pasta::pallas,
};
use darkfi_serial::{async_trait, Decodable, Encodable, SerialDecodable, SerialEncodable};
use rand::rngs::OsRng;
use crate::Error;
/// transfered lead coin is rcpt into two coins,
/// first coin is transfered rcpt coin.
/// second coin is the change returning to sender, or different address.
#[derive(Debug, Clone, Copy, Eq, PartialEq, SerialEncodable, SerialDecodable)]
pub struct TxRcpt {
/// rcpt coin nonce
pub rho: pallas::Base,
/// rcpt coin commitment opening
pub opening: pallas::Scalar,
/// rcpt coin value
pub value: u64,
}
pub const PLAINTEXT_SIZE: usize = 32 + 32 + 8;
pub const AEAD_TAG_SIZE: usize = 16;
pub const CIPHER_SIZE: usize = PLAINTEXT_SIZE + AEAD_TAG_SIZE;
impl TxRcpt {
/// encrypt received coin, by recipient public key
pub fn encrypt(&self, public: &PublicKey) -> EncryptedTxRcpt {
let ephem_secret = SecretKey::random(&mut OsRng);
let ephem_public = PublicKey::from_secret(ephem_secret);
let shared_secret = sapling_ka_agree(&ephem_secret, public);
let key = kdf_sapling(&shared_secret, &ephem_public);
let mut input = Vec::new();
self.encode(&mut input).unwrap();
let mut ciphertext = [0u8; CIPHER_SIZE];
assert_eq!(
ChachaPolyIetf::aead_cipher()
.seal_to(&mut ciphertext, &input, &[], key.as_ref(), &[0u8; 12])
.unwrap(),
CIPHER_SIZE
);
EncryptedTxRcpt { ciphertext, ephem_public }
}
}
#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
pub struct EncryptedTxRcpt {
ciphertext: [u8; CIPHER_SIZE],
ephem_public: PublicKey,
}
impl EncryptedTxRcpt {
pub fn decrypt(&self, secret: &SecretKey) -> TxRcpt {
let shared_secret = sapling_ka_agree(secret, &self.ephem_public);
let key = kdf_sapling(&shared_secret, &self.ephem_public);
let mut plaintext = [0; CIPHER_SIZE];
assert_eq!(
ChachaPolyIetf::aead_cipher()
.open_to(&mut plaintext, &self.ciphertext, &[], key.as_ref(), &[0u8; 12])
.map_err(|_| Error::TxRcptDecryptionError)
.unwrap(),
PLAINTEXT_SIZE
);
TxRcpt::decode(&plaintext[..]).unwrap()
}
}

View File

@@ -1,898 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
blockchain::{PidOutput, PreviousSlot, Slot},
crypto::MerkleTree,
pasta::{group::ff::PrimeField, pallas},
};
use darkfi_serial::{async_trait, deserialize, serialize, SerialDecodable, SerialEncodable};
use log::info;
use rand::{thread_rng, Rng};
use super::{
constants,
lead_coin::{LeadCoin, LeadCoinSecrets},
utils::fbig2base,
Block, BlockProposal, Float10,
};
use crate::{
blockchain::Blockchain,
impl_p2p_message,
net::Message,
tx::Transaction,
util::time::{TimeKeeper, Timestamp},
wallet::WalletPtr,
Error, Result,
};
use std::{
fs::File,
io::{prelude::*, BufWriter},
};
/// This struct represents the information required by the consensus algorithm
pub struct ConsensusState {
/// Wallet interface
pub wallet: WalletPtr,
/// Canonical (finalized) blockchain
pub blockchain: Blockchain,
/// Network bootstrap timestamp
pub bootstrap_ts: Timestamp,
/// Helper structure to calculate time related operations
pub time_keeper: TimeKeeper,
/// Genesis block hash
pub genesis_block: blake3::Hash,
/// Total sum of initial staking coins
pub initial_distribution: u64,
/// Flag to enable single-node mode
pub single_node: bool,
/// Slot the network was bootstrapped
pub bootstrap_slot: u64,
/// Participating start slot
pub participating: Option<u64>,
/// Node is able to propose proposals
pub proposing: bool,
/// Last slot node check for finalization
pub checked_finalization: u64,
/// Fork chains containing block proposals
pub forks: Vec<Fork>,
/// Current epoch
pub epoch: u64,
/// Hot/live slots
pub slots: Vec<Slot>,
/// Last slot leaders count
pub previous_leaders: u64,
/// Controller output history
pub f_history: Vec<Float10>,
/// Controller proportional error history
pub err_history: Vec<Float10>,
// TODO: Aren't these already in db after finalization?
/// Canonical competing coins
pub coins: Vec<LeadCoin>,
/// Canonical coin commitments tree
pub coins_tree: MerkleTree,
/// Canonical seen nullifiers from proposals
pub nullifiers: Vec<pallas::Base>,
}
impl ConsensusState {
pub fn new(
wallet: WalletPtr,
blockchain: Blockchain,
bootstrap_ts: Timestamp,
genesis_ts: Timestamp,
genesis_data: blake3::Hash,
initial_distribution: u64,
single_node: bool,
) -> Self {
let genesis_block = Block::genesis_block(genesis_ts, genesis_data).blockhash();
let time_keeper =
TimeKeeper::new(genesis_ts, constants::EPOCH_LENGTH as u64, constants::SLOT_TIME, 0);
Self {
wallet,
blockchain,
bootstrap_ts,
time_keeper,
genesis_block,
initial_distribution,
single_node,
bootstrap_slot: 0,
participating: None,
proposing: false,
checked_finalization: 0,
forks: vec![],
epoch: 0,
slots: vec![],
previous_leaders: 0,
f_history: vec![constants::FLOAT10_ZERO.clone()],
err_history: vec![constants::FLOAT10_ZERO.clone(), constants::FLOAT10_ZERO.clone()],
coins: vec![],
coins_tree: MerkleTree::new(constants::EPOCH_LENGTH * 100),
nullifiers: vec![],
}
}
/// Finds the last slot a proposal or block was generated.
pub fn last_slot(&self) -> Result<u64> {
let mut slot = 0;
for chain in &self.forks {
for state_checkpoint in &chain.sequence {
if state_checkpoint.proposal.block.header.slot > slot {
slot = state_checkpoint.proposal.block.header.slot;
}
}
}
// We return here in case proposals exist,
// so we don't query the sled database.
if slot > 0 {
return Ok(slot)
}
let (last_slot, _) = self.blockchain.last()?;
Ok(last_slot)
}
/// Set participating slot to next.
pub fn set_participating(&mut self) -> Result<()> {
self.participating = Some(self.time_keeper.current_slot() + 1);
Ok(())
}
/// Generate current slot
fn generate_slot(
&mut self,
fork_hashes: Vec<blake3::Hash>,
fork_previous_hashes: Vec<blake3::Hash>,
sigma1: pallas::Base,
sigma2: pallas::Base,
) {
let id = self.time_keeper.current_slot();
let previous = PreviousSlot::new(0, fork_hashes, fork_previous_hashes, 0.0);
let pid = PidOutput::new(0.0, 0.0, sigma1, sigma2);
let slot = Slot::new(id, previous, pid, self.get_last_eta(), 0, 0);
info!(target: "consensus::state", "generate_slot: {:?}", slot);
self.slots.push(slot);
}
// Initialize node lead coins and set current epoch and eta.
pub async fn init_coins(&mut self) -> Result<()> {
self.epoch = self.time_keeper.current_epoch();
self.coins = self.create_coins().await?;
self.update_forks_checkpoints();
Ok(())
}
/// Check if new epoch has started and generate slot.
/// Returns flag to signify if epoch has changed.
pub async fn epoch_changed(
&mut self,
fork_hashes: Vec<blake3::Hash>,
fork_previous_hashes: Vec<blake3::Hash>,
sigma1: pallas::Base,
sigma2: pallas::Base,
) -> Result<bool> {
self.generate_slot(fork_hashes, fork_previous_hashes, sigma1, sigma2);
let epoch = self.time_keeper.current_epoch();
if epoch <= self.epoch {
return Ok(false)
}
self.epoch = epoch;
Ok(true)
}
/// Return 2-term target approximation sigma coefficients.
pub fn sigmas(&mut self) -> (pallas::Base, pallas::Base) {
let f = self.win_inv_prob_with_full_stake();
let total_stake = self.total_stake();
let total_sigma = Float10::try_from(total_stake).unwrap();
self.calc_sigmas(f, total_sigma)
}
fn calc_sigmas(&self, f: Float10, total_sigma: Float10) -> (pallas::Base, pallas::Base) {
info!(target: "consensus::state", "sigmas(): f: {}", f);
info!(target: "consensus::state", "sigmas(): total network stake: {:}", total_sigma);
let one = constants::FLOAT10_ONE.clone();
let neg_one = constants::FLOAT10_NEG_ONE.clone();
let two = constants::FLOAT10_TWO.clone();
let field_p = Float10::try_from(constants::P).unwrap();
let x = one - f;
let c = x.ln();
let neg_c = neg_one * c;
let sigma1_fbig = neg_c.clone() /
(total_sigma.clone() + constants::FLOAT10_EPSILON.clone()) *
field_p.clone();
info!(target: "consensus::state", "sigma1_fbig: {:}", sigma1_fbig);
let sigma1 = fbig2base(sigma1_fbig);
let sigma2_fbig = (neg_c / (total_sigma + constants::FLOAT10_EPSILON.clone()))
.powf(two.clone()) *
(field_p / two);
info!(target: "consensus::state", "sigma2_fbig: {:}", sigma2_fbig);
let sigma2 = fbig2base(sigma2_fbig);
(sigma1, sigma2)
}
/// Generate coins for provided sigmas.
/// NOTE: The strategy here is having a single competing coin per slot.
// TODO: DRK coin need to be burned, and consensus coin to be minted.
async fn create_coins(&mut self) -> Result<Vec<LeadCoin>> {
// TODO: cleanup LeadCoinSecrets, no need to keep a vector
let (seeds, epoch_secrets) = {
let mut rng = thread_rng();
let mut seeds: Vec<u64> = Vec::with_capacity(constants::EPOCH_LENGTH);
for _ in 0..constants::EPOCH_LENGTH {
seeds.push(rng.gen());
}
(seeds, LeadCoinSecrets::generate())
};
// LeadCoin matrix containing node competing coins.
let mut coins: Vec<LeadCoin> = Vec::with_capacity(constants::EPOCH_LENGTH);
// Retrieve coin from wallet
// NOTE: In future this will be retrieved from the money contract.
// Execute the query and see if we find any rows
let query_str = format!("SELECT * FROM {}", constants::CONSENSUS_COIN_TABLE);
let wallet_conn = self.wallet.conn.lock().await;
let mut stmt = wallet_conn.prepare(&query_str)?;
let coin = stmt.query_row((), |row| {
let bytes: Vec<u8> = row.get(constants::CONSENSUS_COIN_COL)?;
let coin = deserialize(&bytes).unwrap();
Ok(coin)
});
stmt.finalize()?;
let coin = match coin {
Ok(c) => c,
Err(_) => {
// If no records are found, we generate a new coin and save it to the database
info!(target: "consensus::state", "create_coins(): No LeadCoin was found in DB, generating new one...");
// Temporarily, we compete with fixed stake.
// This stake should be based on how many nodes we want to run, and they all
// must sum to initial distribution total coins.
//let stake = self.initial_distribution;
let c = LeadCoin::new(
0,
self.time_keeper.current_slot(),
epoch_secrets.secret_keys[0].inner(),
epoch_secrets.merkle_roots[0],
0,
epoch_secrets.merkle_paths[0].clone(),
pallas::Base::from(seeds[0]),
&mut self.coins_tree,
);
let query_str = format!(
"INSERT INTO {} ({}) VALUES (?1);",
constants::CONSENSUS_COIN_TABLE,
constants::CONSENSUS_COIN_COL
);
let mut stmt = wallet_conn.prepare(&query_str)?;
stmt.execute([serialize(&c)])?;
c
}
};
info!(target: "consensus::state", "create_coins(): Will use LeadCoin with value: {}", coin.value);
coins.push(coin);
Ok(coins)
}
/// Leadership reward, assuming constant reward
/// TODO (res) implement reward mechanism with accord to DRK,DARK token-economics
fn reward(&self) -> u64 {
constants::REWARD
}
/// Auxillary function to calculate total slot rewards.
fn slot_rewards(&self) -> u64 {
// Retrieve existing blocks excluding genesis
let blocks = (self.blockchain.len() as u64) - 1;
// Retrieve longest fork length, to include those proposals in the calculation
let max_fork_length = self.longest_chain_length() as u64;
// Calculate rewarded slots
let rewarded_slots = blocks + max_fork_length;
rewarded_slots * self.reward()
}
/// Network total stake, assuming constant reward.
/// Only used for fine-tuning. At genesis epoch first slot, of absolute index 0,
/// if no stake was distributed, the total stake would be 0.
/// To avoid division by zero, we asume total stake at first division is GENESIS_TOTAL_STAKE(1).
fn total_stake(&self) -> u64 {
let total_stake = self.slot_rewards() + self.initial_distribution;
if total_stake == 0 {
return constants::GENESIS_TOTAL_STAKE
}
total_stake
}
fn f_err(&mut self) -> Float10 {
info!(target: "consensus::state", "Previous leaders: {}", self.previous_leaders);
// Write counter to file
let mut count_str: String = self.previous_leaders.to_string();
count_str.push(',');
let f =
File::options().append(true).create(true).open(constants::LEADER_HISTORY_LOG).unwrap();
{
let mut writer = BufWriter::new(f);
let _ = writer.write(&count_str.into_bytes()).unwrap();
}
// Calculate feedback
let feedback = Float10::try_from(self.previous_leaders as i64).unwrap();
// Reset previous leaders counter
self.previous_leaders = 0;
let target = constants::FLOAT10_ONE.clone();
target - feedback
}
fn discrete_pid(&mut self) -> Float10 {
let k1 = constants::KP.clone() + constants::KI.clone() + constants::KD.clone();
let k2 = constants::FLOAT10_NEG_ONE.clone() * constants::KP.clone() +
constants::FLOAT10_NEG_TWO.clone() * constants::KD.clone();
let k3 = constants::KD.clone();
let f_len = self.f_history.len();
let err = self.f_err();
let err_len = self.err_history.len();
let ret = self.f_history[f_len - 1].clone() +
k1.clone() * err.clone() +
k2.clone() * self.err_history[err_len - 1].clone() +
k3.clone() * self.err_history[err_len - 2].clone();
info!(target: "consensus::state", "pid::f-1: {:}", self.f_history[f_len - 1].clone());
info!(target: "consensus::state", "pid::err: {:}", err);
info!(target: "consensus::state", "pid::err-1: {}", self.err_history[err_len - 1].clone());
info!(target: "consensus::state", "pid::err-2: {}", self.err_history[err_len - 2].clone());
info!(target: "consensus::state", "pid::k1: {}", k1);
info!(target: "consensus::state", "pid::k2: {}", k2);
info!(target: "consensus::state", "pid::k3: {}", k3);
self.err_history.push(err);
ret
}
/// the probability inverse of winning lottery having all the stake
/// returns f
fn win_inv_prob_with_full_stake(&mut self) -> Float10 {
let mut f = self.discrete_pid();
if f <= constants::FLOAT10_ZERO.clone() {
f = constants::MIN_F.clone()
} else if f >= constants::FLOAT10_ONE.clone() {
f = constants::MAX_F.clone()
}
// log f history
let file =
File::options().append(true).create(true).open(constants::F_HISTORY_LOG).unwrap();
{
let mut f_history = format!("{:}", f);
f_history.push(',');
let mut writer = BufWriter::new(file);
let _ = writer.write(&f_history.into_bytes()).unwrap();
}
self.f_history.push(f.clone());
f
}
/// Check that the participant/stakeholder coins win the slot lottery.
/// If the stakeholder has multiple competing winning coins, only the highest value
/// coin is selected, since the stakeholder can't give more than one proof per block/slot.
/// * 'sigma1', 'sigma2': slot sigmas
/// Returns: (check: bool, idx: usize) where idx is the winning coin's index
pub fn is_slot_leader(
&mut self,
sigma1: pallas::Base,
sigma2: pallas::Base,
) -> (bool, i64, usize) {
// Check if node can produce proposals
if !self.proposing {
return (false, 0, 0)
}
let fork_index = self.longest_chain_index();
let competing_coins = if fork_index == -1 {
self.coins.clone()
} else {
self.forks[fork_index as usize].sequence.last().unwrap().coins.clone()
};
// If on single-node mode, node always proposes by extending the
// single fork it holds.
if self.single_node {
return (true, fork_index, 0)
}
let mut won = false;
let mut highest_stake = 0;
let mut highest_stake_idx = 0;
let total_stake = self.total_stake();
for (winning_idx, coin) in competing_coins.iter().enumerate() {
info!(target: "consensus::state", "is_slot_leader: coin stake: {:?}", coin.value);
info!(target: "consensus::state", "is_slot_leader: total stake: {}", total_stake);
info!(target: "consensus::state", "is_slot_leader: relative stake: {}", (coin.value as f64) / total_stake as f64);
let first_winning = coin.is_leader(
sigma1,
sigma2,
self.get_last_eta(),
pallas::Base::from(self.time_keeper.current_slot()),
);
if first_winning && !won {
highest_stake_idx = winning_idx;
}
won |= first_winning;
if won && coin.value > highest_stake {
highest_stake = coin.value;
highest_stake_idx = winning_idx;
}
}
(won, fork_index, highest_stake_idx)
}
/// Finds the longest forkchain the node holds and
/// returns its index.
pub fn longest_chain_index(&self) -> i64 {
let mut length = 0;
let mut index = -1;
if !self.forks.is_empty() {
for (i, chain) in self.forks.iter().enumerate() {
if chain.sequence.len() > length {
length = chain.sequence.len();
index = i as i64;
}
}
}
index
}
/// Finds the length of longest fork chain the node holds.
pub fn longest_chain_length(&self) -> usize {
let mut max = 0;
for fork in &self.forks {
if fork.sequence.len() > max {
max = fork.sequence.len();
}
}
max
}
/// Given a proposal, find the index of the fork chain it extends.
pub fn find_extended_chain_index(&mut self, proposal: &BlockProposal) -> Result<i64> {
// We iterate through all forks to find which fork to extend
let mut chain_index = -1;
let mut state_checkpoint_index = 0;
for (c_index, chain) in self.forks.iter().enumerate() {
// Traverse sequence in reverse
for (sc_index, state_checkpoint) in chain.sequence.iter().enumerate().rev() {
if proposal.block.header.previous == state_checkpoint.proposal.hash {
chain_index = c_index as i64;
state_checkpoint_index = sc_index;
break
}
}
if chain_index != -1 {
break
}
}
// If no fork was found, we check with canonical
if chain_index == -1 {
let (last_slot, last_block) = self.blockchain.last()?;
if proposal.block.header.previous != last_block ||
proposal.block.header.slot <= last_slot
{
info!(target: "consensus::state", "find_extended_chain_index(): Proposal doesn't extend any known chain");
return Ok(-2)
}
// Proposal extends canonical chain
return Ok(-1)
}
// Found fork chain
let chain = &self.forks[chain_index as usize];
// Proposal extends fork at last proposal
if state_checkpoint_index == (chain.sequence.len() - 1) {
return Ok(chain_index)
}
info!(target: "consensus::state", "find_extended_chain_index(): Proposal to fork a forkchain was received.");
let mut chain = self.forks[chain_index as usize].clone();
// We keep all proposals until the one it extends
chain.sequence.drain((state_checkpoint_index + 1)..);
self.forks.push(chain);
Ok(self.forks.len() as i64 - 1)
}
/// Search the chains we're holding for the given proposal.
pub fn proposal_exists(&self, input_proposal: &blake3::Hash) -> bool {
for chain in self.forks.iter() {
for state_checkpoint in chain.sequence.iter().rev() {
if input_proposal == &state_checkpoint.proposal.hash {
return true
}
}
}
false
}
/// Utility function to extract leader selection lottery randomness(eta),
/// defined as the hash of the last block, converted to pallas base.
pub fn get_last_eta(&self) -> pallas::Base {
let (_, hash) = self.blockchain.last().unwrap();
let mut bytes: [u8; 32] = *hash.as_bytes();
// Read first 254 bits
bytes[30] = 0;
bytes[31] = 0;
pallas::Base::from_repr(bytes).unwrap()
}
/// Auxillary function to retrieve slot of provided slot UID.
pub fn get_slot(&self, id: u64) -> Result<Slot> {
// Check hot/live slotz
for slot in self.slots.iter().rev() {
if slot.id == id {
return Ok(slot.clone())
}
}
// Check if slot is finalized
if let Ok(slots) = self.blockchain.get_slots_by_id(&[id]) {
if !slots.is_empty() {
if let Some(known_slot) = &slots[0] {
return Ok(known_slot.clone())
}
}
}
Err(Error::SlotNotFound(id))
}
/// Auxillary function to check if node has seen current or previous slots.
/// This check ensures that either the slots exist in memory or node has seen the finalization of these slots.
pub fn slots_is_empty(&self) -> bool {
let current_slot = self.time_keeper.current_slot();
if self.get_slot(current_slot).is_ok() {
return false
}
let previous_slot = current_slot - 1;
self.get_slot(previous_slot).is_err()
}
/// Auxillary function to update all fork state checkpoints to nodes coins current canonical states.
/// Note: This function should only be invoked once on nodes' coins creation.
pub fn update_forks_checkpoints(&mut self) {
for fork in &mut self.forks {
for state_checkpoint in &mut fork.sequence {
state_checkpoint.coins = self.coins.clone();
state_checkpoint.coins_tree = self.coins_tree.clone();
}
}
}
/// Retrieve current forks last proposal hashes and their previous
/// hashes. If node holds no fork, retrieve last canonical hash.
pub fn fork_hashes(&self) -> (Vec<blake3::Hash>, Vec<blake3::Hash>) {
let mut hashes = vec![];
let mut previous_hashes = vec![];
for fork in &self.forks {
let proposal = &fork.sequence.last().unwrap().proposal;
hashes.push(proposal.hash);
previous_hashes.push(proposal.block.header.previous);
}
if hashes.is_empty() {
hashes.push(self.genesis_block);
previous_hashes.push(self.genesis_block);
}
(hashes, previous_hashes)
}
/// Auxiliary structure to reset consensus state for a resync
pub fn reset(&mut self) {
self.participating = None;
self.proposing = false;
self.forks = vec![];
self.slots = vec![];
self.previous_leaders = 0;
self.f_history = vec![constants::FLOAT10_ZERO.clone()];
self.err_history = vec![constants::FLOAT10_ZERO.clone(), constants::FLOAT10_ZERO.clone()];
self.nullifiers = vec![];
}
}
/// Auxiliary structure used for consensus syncing.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct ConsensusRequest {}
impl_p2p_message!(ConsensusRequest, "consensusrequest");
/// Auxiliary structure used for consensus syncing.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct ConsensusResponse {
/// Slot the network was bootstrapped
pub bootstrap_slot: u64,
/// Current slot
pub current_slot: u64,
/// Hot/live data used by the consensus algorithm
pub forks: Vec<ForkInfo>,
/// Pending transactions
pub pending_txs: Vec<Transaction>,
/// Hot/live slots
pub slots: Vec<Slot>,
// TODO: When Float10 supports encoding/decoding this should be
// replaced by directly using Vec<Float10>
/// Controller output history
pub f_history: Vec<String>,
/// Controller proportional error history
pub err_history: Vec<String>,
/// Seen nullifiers from proposals
pub nullifiers: Vec<pallas::Base>,
}
impl_p2p_message!(ConsensusResponse, "consensusresponse");
/// Auxiliary structure used for consensus syncing.
#[derive(Debug, SerialEncodable, SerialDecodable)]
pub struct ConsensusSyncRequest {}
impl_p2p_message!(ConsensusSyncRequest, "consensussyncrequest");
/// Auxiliary structure used for consensus syncing.
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct ConsensusSyncResponse {
/// Node known bootstrap slot
pub bootstrap_slot: u64,
/// Node is able to propose proposals
pub proposing: bool,
/// Node has hot/live slots
pub is_empty: bool,
}
impl_p2p_message!(ConsensusSyncResponse, "consensussyncresponse");
impl_p2p_message!(Slot, "slot");
/// Auxiliary structure used for slots syncing
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct SlotRequest {
/// Slot UID
pub slot: u64,
}
impl_p2p_message!(SlotRequest, "slotrequest");
/// Auxiliary structure used for slots syncing
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct SlotResponse {
/// Response blocks.
pub slots: Vec<Slot>,
}
impl_p2p_message!(SlotResponse, "slotresponse");
/// Auxiliary structure used to keep track of consensus state checkpoints.
#[derive(Debug, Clone)]
pub struct StateCheckpoint {
/// Block proposal
pub proposal: BlockProposal,
/// Node competing coins current state
pub coins: Vec<LeadCoin>,
/// Coin commitments tree current state
pub coins_tree: MerkleTree,
/// Seen nullifiers from proposals current state
pub nullifiers: Vec<pallas::Base>,
}
impl StateCheckpoint {
pub fn new(
proposal: BlockProposal,
coins: Vec<LeadCoin>,
coins_tree: MerkleTree,
nullifiers: Vec<pallas::Base>,
) -> Self {
Self { proposal, coins, coins_tree, nullifiers }
}
}
/// Auxiliary structure used for forked consensus state checkpoints syncing
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct StateCheckpointInfo {
/// Block proposal
pub proposal: BlockProposal,
/// Seen nullifiers from proposals current state
pub nullifiers: Vec<pallas::Base>,
}
impl From<StateCheckpoint> for StateCheckpointInfo {
fn from(state_checkpoint: StateCheckpoint) -> Self {
Self { proposal: state_checkpoint.proposal, nullifiers: state_checkpoint.nullifiers }
}
}
impl From<StateCheckpointInfo> for StateCheckpoint {
fn from(state_checkpoint_info: StateCheckpointInfo) -> Self {
Self {
proposal: state_checkpoint_info.proposal,
coins: vec![],
coins_tree: MerkleTree::new(constants::EPOCH_LENGTH * 100),
nullifiers: state_checkpoint_info.nullifiers,
}
}
}
/// This struct represents a sequence of consensus state checkpoints.
#[derive(Debug, Clone)]
pub struct Fork {
pub genesis_block: blake3::Hash,
pub sequence: Vec<StateCheckpoint>,
}
impl Fork {
pub fn new(genesis_block: blake3::Hash, initial_state_checkpoint: StateCheckpoint) -> Self {
Self { genesis_block, sequence: vec![initial_state_checkpoint] }
}
/// Insertion of a valid state checkpoint.
pub fn add(&mut self, state_checkpoint: &StateCheckpoint) {
if self.check_state_checkpoint(state_checkpoint, self.sequence.last().unwrap()) {
self.sequence.push(state_checkpoint.clone());
}
}
/// A fork chain is considered valid when every state checkpoint is valid,
/// based on the `check_state_checkpoint` function
pub fn check_chain(&self) -> bool {
for (index, state_checkpoint) in self.sequence[1..].iter().enumerate() {
if !self.check_state_checkpoint(state_checkpoint, &self.sequence[index]) {
return false
}
}
true
}
/// A state checkpoint is considered valid when its proposal parent hash is equal to the
/// hash of the previous checkpoint's proposal and their slots are incremental,
/// excluding the genesis block proposal.
pub fn check_state_checkpoint(
&self,
state_checkpoint: &StateCheckpoint,
previous: &StateCheckpoint,
) -> bool {
if state_checkpoint.proposal.block.header.previous == self.genesis_block {
info!(target: "consensus::state", "check_checkpoint(): Genesis block proposal provided.");
return false
}
if state_checkpoint.proposal.block.header.previous != previous.proposal.hash ||
state_checkpoint.proposal.block.header.slot <= previous.proposal.block.header.slot
{
info!(target: "consensus::state", "check_checkpoint(): Provided state checkpoint proposal is invalid.");
return false
}
// TODO: validate rest checkpoint info(like nullifiers)
true
}
}
/// Auxiliary structure used for forks syncing
#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
pub struct ForkInfo {
pub genesis_block: blake3::Hash,
pub sequence: Vec<StateCheckpointInfo>,
}
impl From<Fork> for ForkInfo {
fn from(fork: Fork) -> Self {
let mut sequence = vec![];
for state_checkpoint in fork.sequence {
sequence.push(state_checkpoint.into());
}
Self { genesis_block: fork.genesis_block, sequence }
}
}
impl From<ForkInfo> for Fork {
fn from(fork_info: ForkInfo) -> Self {
let mut sequence = vec![];
for checkpoint in fork_info.sequence {
sequence.push(checkpoint.into());
}
Self { genesis_block: fork_info.genesis_block, sequence }
}
}
#[cfg(test)]
mod tests {
use crate::{
consensus::{
state::{Blockchain, ConsensusState},
utils::fbig2base,
Float10, TESTNET_BOOTSTRAP_TIMESTAMP, TESTNET_GENESIS_HASH_BYTES,
TESTNET_GENESIS_TIMESTAMP, TESTNET_INITIAL_DISTRIBUTION,
},
wallet::WalletDb,
};
#[test]
fn calc_sigmas_test() {
smol::block_on(async {
// Generate dummy state
let wallet = WalletDb::new(None, None).unwrap();
let sled_db = sled::Config::new().temporary(true).open().unwrap();
let blockchain = Blockchain::new(&sled_db).unwrap();
let state = ConsensusState::new(
wallet,
blockchain,
*TESTNET_BOOTSTRAP_TIMESTAMP,
*TESTNET_GENESIS_TIMESTAMP,
*TESTNET_GENESIS_HASH_BYTES,
*TESTNET_INITIAL_DISTRIBUTION,
true,
);
let precision_diff = Float10::try_from(
"10000000000000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let precision_diff_base = fbig2base(precision_diff);
let f = Float10::try_from("0.01").unwrap();
let total_stake = Float10::try_from("100").unwrap();
let (sigma1, sigma2) = state.calc_sigmas(f, total_stake);
let sigma1_rhs = Float10::try_from(
"2909373465034095801035568917399197865646520818579502832252119592405565440",
)
.unwrap();
let sigma1_rhs_base = fbig2base(sigma1_rhs);
let sigma2_rhs = Float10::try_from(
"9137556389643100714432609642916129738741963230846798778430644027392",
)
.unwrap();
let sigma2_rhs_base = fbig2base(sigma2_rhs);
let sigma1_delta = if sigma1_rhs_base > sigma1 {
sigma1_rhs_base - sigma1
} else {
sigma1 - sigma1_rhs_base
};
let sigma2_delta = if sigma2_rhs_base > sigma2 {
sigma2_rhs_base - sigma2
} else {
sigma2 - sigma2_rhs_base
};
//note! test cases were generated by low precision python scripts.
//https://github.com/ertosns/lotterysim/blob/master/pallas_unittests.csv
assert!(sigma1_delta < precision_diff_base);
assert!(sigma2_delta < precision_diff_base);
});
}
}

View File

@@ -1,79 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
crypto::MerkleNode,
pasta::{arithmetic::CurveAffine, group::Curve, pallas},
};
use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
use crate::{
zk::{proof::VerifyingKey, Proof},
Error, Result,
};
#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
pub struct TransferStx {
/// sender's coin, or coin1_commitment in zk
pub coin_commitment: pallas::Point,
/// sender's coin pk
pub coin_pk: pallas::Base,
/// sender's coin sk's root
pub coin_root_sk: MerkleNode,
/// coin3_commitment in zk
pub change_coin_commitment: pallas::Point,
/// coin4_commitment in zk
pub transfered_coin_commitment: pallas::Point,
/// nullifiers coin1_nullifier
pub nullifier: pallas::Base,
/// sk coin creation slot
pub slot: pallas::Base,
/// root to coin's commitments
pub root: MerkleNode,
/// transfer proof
pub proof: Proof,
}
impl TransferStx {
/// verify the transfer proof.
pub fn verify(&self, vk: VerifyingKey) -> Result<()> {
if self.proof.verify(&vk, &self.public_inputs()).is_err() {
return Err(Error::TransferTxVerification)
}
Ok(())
}
/// arrange public inputs from Stxfer
pub fn public_inputs(&self) -> Vec<pallas::Base> {
let cm1 = self.coin_commitment.to_affine().coordinates().unwrap();
let cm3 = self.change_coin_commitment.to_affine().coordinates().unwrap();
let cm4 = self.transfered_coin_commitment.to_affine().coordinates().unwrap();
vec![
self.coin_pk,
*cm1.x(),
*cm1.y(),
*cm3.x(),
*cm3.y(),
*cm4.x(),
*cm4.y(),
self.root.inner(),
self.coin_root_sk.inner(),
self.nullifier,
]
}
}

View File

@@ -1,123 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use crate::{
consensus::{
block::{BlockOrder, BlockResponse},
state::{SlotRequest, SlotResponse},
ValidatorStatePtr,
},
net, Result,
};
use log::{debug, info, warn};
/// async task used for block syncing.
pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Result<()> {
info!(target: "consensus::block_sync", "Starting blockchain sync...");
// Getting a random connected channel to ask from peers
match p2p.random_channel().await {
Some(channel) => {
let msg_subsystem = channel.message_subsystem();
// Communication setup for slots
msg_subsystem.add_dispatch::<SlotResponse>().await;
let slot_response_sub = channel.subscribe_msg::<SlotResponse>().await?;
// Communication setup for blocks
msg_subsystem.add_dispatch::<BlockResponse>().await;
let block_response_sub = channel.subscribe_msg::<BlockResponse>().await?;
// Node loops until both slots and blocks have been synced
let mut slots_synced = false;
let mut blocks_synced = false;
loop {
// Node sends the last known slot of the canonical blockchain
// and loops until the response is the same slot (used to utilize batch requests).
let mut last = state.read().await.blockchain.last_slot()?;
info!(target: "consensus::block_sync", "Last known slot: {:?}", last.id);
loop {
// Node creates a `SlotRequest` and sends it
let request = SlotRequest { slot: last.id };
channel.send(&request).await?;
// Node stores response data.
let resp = slot_response_sub.receive().await?;
// Verify and store retrieveds
debug!(target: "consensus::block_sync", "block_sync_task(): Processing received slots");
state.write().await.receive_slots(&resp.slots).await?;
let last_received = state.read().await.blockchain.last_slot()?;
info!(target: "consensus::block_sync", "Last received slot: {:?}", last_received.id);
if last.id == last_received.id {
break
}
blocks_synced = false;
last = last_received;
}
// We force a recheck of slots after blocks have been synced
if blocks_synced {
slots_synced = true;
}
// Node sends the last known block hash of the canonical blockchain
// and loops until the response is the same block (used to utilize
// batch requests).
let mut last = state.read().await.blockchain.last()?;
info!(target: "consensus::block_sync", "Last known block: {:?} - {:?}", last.0, last.1);
loop {
// Node creates a `BlockOrder` and sends it
let order = BlockOrder { slot: last.0, block: last.1 };
channel.send(&order).await?;
// Node stores response data.
let _resp = block_response_sub.receive().await?;
// Verify and store retrieved blocks
debug!(target: "consensus::block_sync", "block_sync_task(): Processing received blocks");
//state.write().await.receive_sync_blocks(&resp.blocks).await?;
let last_received = state.read().await.blockchain.last()?;
info!(target: "consensus::block_sync", "Last received block: {:?} - {:?}", last_received.0, last_received.1);
if last == last_received {
blocks_synced = true;
break
}
slots_synced = false;
last = last_received;
}
if slots_synced && blocks_synced {
break
}
}
}
None => warn!(target: "consensus::block_sync", "Node is not connected to other nodes"),
};
state.write().await.synced = true;
info!(target: "consensus::block_sync", "Blockchain synced!");
Ok(())
}

View File

@@ -1,160 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use log::{info, warn};
use crate::{
consensus::{
state::{ConsensusRequest, ConsensusResponse, ConsensusSyncRequest, ConsensusSyncResponse},
Float10, ValidatorStatePtr,
},
net::P2pPtr,
system::sleep,
Result,
};
/// async task used for consensus state syncing.
/// Returns flag if node is not connected to other peers or consensus hasn't started,
/// so it can immediately start proposing proposals.
pub async fn consensus_sync_task(p2p: P2pPtr, state: ValidatorStatePtr) -> Result<bool> {
info!(target: "consensus::consensus_sync", "Starting consensus state sync...");
let current_slot = state.read().await.consensus.time_keeper.current_slot();
// Loop through connected channels
let channels = p2p.channels().await;
if channels.is_empty() {
warn!(target: "consensus::consensus_sync", "Node is not connected to other nodes");
let mut lock = state.write().await;
lock.consensus.bootstrap_slot = current_slot;
lock.consensus.init_coins().await?;
info!(target: "consensus::consensus_sync", "Consensus state synced!");
return Ok(true)
}
// Node iterates the channel peers to check if at least on peer has seen slots
let mut peer = None;
for channel in channels {
// Communication setup
let msg_subsystem = channel.message_subsystem();
msg_subsystem.add_dispatch::<ConsensusSyncResponse>().await;
let response_sub = channel.subscribe_msg::<ConsensusSyncResponse>().await?;
// Node creates a `ConsensusSyncRequest` and sends it
let request = ConsensusSyncRequest {};
channel.send(&request).await?;
// Node checks response
let response = response_sub.receive().await?;
if response.bootstrap_slot == current_slot {
warn!(target: "consensus::consensus_sync", "Network was just bootstraped, checking rest nodes");
continue
}
if !response.proposing {
warn!(target: "consensus::consensus_sync", "Node is not proposing, checking rest nodes");
continue
}
if response.is_empty {
warn!(target: "consensus::consensus_sync", "Node has not seen any slots, retrying...");
continue
}
// Keep peer to ask for consensus state
peer = Some(channel.clone());
break
}
// If no peer knows about any slots, that means that the network was bootstrapped or restarted
// and no node has started consensus.
if peer.is_none() {
warn!(target: "consensus::consensus_sync", "No node that has seen any slots was found, or network was just boostrapped.");
let mut lock = state.write().await;
lock.consensus.bootstrap_slot = current_slot;
lock.consensus.init_coins().await?;
info!(target: "consensus::consensus_sync", "Consensus state synced!");
return Ok(true)
}
let peer = peer.unwrap();
// Listen for next finalization
info!(target: "consensus::consensus_sync", "Waiting for next finalization...");
let subscriber = state.read().await.subscribers.get("blocks").unwrap().clone();
let subscription = subscriber.sub.subscribe().await;
subscription.receive().await;
subscription.unsubscribe().await;
// After finalization occurs, sync our consensus state.
// This ensures that the received state always consists of 1 fork with one proposal.
info!(target: "consensus::consensus_sync", "Finalization signal received, requesting consensus state...");
// Communication setup
let msg_subsystem = peer.message_subsystem();
msg_subsystem.add_dispatch::<ConsensusResponse>().await;
let response_sub = peer.subscribe_msg::<ConsensusResponse>().await?;
// Node creates a `ConsensusRequest` and sends it
peer.send(&ConsensusRequest {}).await?;
// Node verifies response came from a participating node.
// Extra validations can be added here.
let mut response = response_sub.receive().await?;
// Verify that peer has finished finalizing forks
loop {
if !response.forks.is_empty() {
warn!(target: "consensus::consensus_sync", "Peer has not finished finalization, retrying...");
sleep(1).await;
peer.send(&ConsensusRequest {}).await?;
response = response_sub.receive().await?;
continue
}
break
}
// Verify that the node has received all finalized blocks
loop {
if !state.read().await.blockchain.has_slot_order(response.current_slot)? {
warn!(target: "consensus::consensus_sync", "Node has not finished finalization, retrying...");
sleep(1).await;
continue
}
break
}
// Node stores response data.
let mut lock = state.write().await;
let mut forks = vec![];
for fork in &response.forks {
forks.push(fork.clone().into());
}
lock.consensus.bootstrap_slot = response.bootstrap_slot;
lock.consensus.forks = forks;
lock.append_pending_txs(&response.pending_txs).await;
lock.consensus.slots = response.slots.clone();
lock.consensus.previous_leaders = 1;
let mut f_history = vec![];
for f in &response.f_history {
let f_float = Float10::try_from(f.as_str()).unwrap();
f_history.push(f_float);
}
lock.consensus.f_history = f_history;
let mut err_history = vec![];
for err in &response.err_history {
let err_float = Float10::try_from(err.as_str()).unwrap();
err_history.push(err_float);
}
lock.consensus.err_history = err_history;
lock.consensus.nullifiers = response.nullifiers.clone();
lock.consensus.init_coins().await?;
info!(target: "consensus::consensus_sync", "Consensus state synced!");
Ok(false)
}

View File

@@ -1,28 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
// TODO: Handle ? with matches in these files. They should be robust.
mod block_sync;
pub use block_sync::block_sync_task;
mod consensus_sync;
pub use consensus_sync::consensus_sync_task;
mod proposal;
pub use proposal::proposal_task;

View File

@@ -1,335 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::sync::Arc;
use log::{debug, error, info, warn};
use super::consensus_sync_task;
use crate::{
consensus::{constants, ValidatorStatePtr},
net::P2pPtr,
system::sleep,
util::time::Timestamp,
Result,
};
/// async task used for participating in the consensus protocol
pub async fn proposal_task(
consensus_p2p: P2pPtr,
sync_p2p: P2pPtr,
state: ValidatorStatePtr,
ex: Arc<smol::Executor<'_>>,
) -> Result<()> {
// Check if network is configured to start in the future,
// otherwise wait for current or next slot finalization period for optimal sync conditions.
// NOTE: Network beign configured to start in the future should always be the case
// when bootstrapping or restarting a network.
let current_ts = Timestamp::current_time();
let bootstrap_ts = state.read().await.consensus.bootstrap_ts;
if current_ts < bootstrap_ts {
let diff = bootstrap_ts.0 - current_ts.0;
info!(target: "consensus::proposal", "consensus: Waiting for network bootstrap: {} seconds", diff);
sleep(diff).await;
} else {
let mut sleep_time = state.read().await.consensus.time_keeper.next_n_slot_start(1);
let sync_offset = constants::FINAL_SYNC_DUR;
loop {
if sleep_time > sync_offset {
sleep_time -= sync_offset;
break
}
info!(target: "consensus::proposal", "consensus: Waiting for next slot ({:?})", sleep_time);
sleep(sleep_time).await;
sleep_time = state.read().await.consensus.time_keeper.next_n_slot_start(1);
}
info!(target: "consensus::proposal", "consensus: Waiting for finalization sync period ({:?})", sleep_time);
sleep(sleep_time).await;
}
let mut retries = 0;
// Sync loop
loop {
// Resetting consensus state, so node can still follow the finalized blocks by
// the sync p2p network/protocols
state.write().await.consensus.reset();
// Checking sync retries
if retries > constants::SYNC_MAX_RETRIES {
error!(target: "consensus::proposal", "consensus: Node reached max sync retries ({}) due to not being able to follow up with consensus processing.", constants::SYNC_MAX_RETRIES);
warn!(target: "consensus::proposal", "consensus: Terminating consensus participation.");
break
}
// Node syncs its consensus state
match consensus_sync_task(consensus_p2p.clone(), state.clone()).await {
Ok(p) => {
// Check if node is not connected to other nodes and can
// start proposing immediately.
if p {
info!(target: "consensus::proposal", "consensus: Node can start proposing!");
state.write().await.consensus.proposing = p;
}
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Failed syncing consensus state: {}. Quitting consensus.", e);
// TODO: Perhaps notify over a channel in order to
// stop consensus p2p protocols.
return Ok(())
}
};
// Node modifies its participating slot to next.
match state.write().await.consensus.set_participating() {
Ok(()) => {
info!(target: "consensus::proposal", "consensus: Node will start participating in the next slot")
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Failed to set participation slot: {}", e)
}
}
// Record epoch we start the consensus loop
let start_epoch = state.read().await.consensus.time_keeper.current_epoch();
// Start executing consensus
consensus_loop(consensus_p2p.clone(), sync_p2p.clone(), state.clone(), ex.clone()).await;
// Reset retries counter if more epochs have passed than sync retries duration
let break_epoch = state.read().await.consensus.time_keeper.current_epoch();
if (break_epoch - start_epoch) > constants::SYNC_RETRIES_DURATION {
retries = 0;
}
// Increase retries count on consensus loop break
retries += 1;
}
Ok(())
}
/// Consensus protocol loop
async fn consensus_loop(
consensus_p2p: P2pPtr,
sync_p2p: P2pPtr,
state: ValidatorStatePtr,
ex: Arc<smol::Executor<'_>>,
) {
// Note: when a node can start produce proposals is only enforced in code,
// where we verify if the hardware can keep up with the consensus, by
// counting how many consecutive slots node successfully listened and process
// everything. Additionally, we check each proposer coin creation slot to be
// greater than an epoch length. Later, this will be enforced via contract,
// where it will be explicit when a node can produce proposals,
// and after which slot they can be considered as valid.
let mut listened_slots = 0;
let mut changed_status = false;
loop {
// Check if node can start proposing.
// This code ensures that we only change the status once
// and listened_slots doesn't increment further.
if listened_slots > constants::EPOCH_LENGTH {
if !changed_status {
info!(target: "consensus::proposal", "consensus: Node can start proposing!");
state.write().await.consensus.proposing = true;
changed_status = true;
}
} else {
listened_slots += 1;
}
// Node waits and execute consensus protocol propose period.
if propose_period(consensus_p2p.clone(), state.clone()).await {
// Node needs to resync
warn!(
target: "consensus::proposal",
"consensus: Node missed slot {} due to proposal processing, resyncing...",
state.read().await.consensus.time_keeper.current_slot()
);
break
}
// Node waits and execute consensus protocol finalization period.
if finalization_period(sync_p2p.clone(), state.clone(), ex.clone()).await {
// Node needs to resync
warn!(
target: "consensus::proposal",
"consensus: Node missed slot {} due to finalizated blocks processing, resyncing...",
state.read().await.consensus.time_keeper.current_slot()
);
break
}
}
}
/// async function to wait and execute consensus protocol propose period.
/// Propose period consists of 2 parts:
/// - Generate current slot
/// - Check if slot leader to generate and broadcast proposal
/// Returns flag in case node needs to resync.
async fn propose_period(consensus_p2p: P2pPtr, state: ValidatorStatePtr) -> bool {
// Node sleeps until next slot
let seconds_next_slot = state.read().await.consensus.time_keeper.next_n_slot_start(1);
info!(target: "consensus::proposal", "consensus: Waiting for next slot ({} sec)", seconds_next_slot);
sleep(seconds_next_slot).await;
// Keep a record of slot to verify if next slot got skipped during processing
let processing_slot = state.read().await.consensus.time_keeper.current_slot();
// Retrieve current forks last and second to last hash
let (fork_hashes, fork_previous_hashes) = state.read().await.consensus.fork_hashes();
// Retrieve slot sigmas
let (sigma1, sigma2) = state.write().await.consensus.sigmas();
// Node checks if epoch has changed and generate slot
let epoch_changed = state
.write()
.await
.consensus
.epoch_changed(fork_hashes, fork_previous_hashes, sigma1, sigma2)
.await;
match epoch_changed {
Ok(changed) => {
if changed {
info!(target: "consensus::proposal", "consensus: New epoch started: {}", state.read().await.consensus.epoch);
}
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Epoch check failed: {}", e);
return false
}
};
// Node checks if it's the slot leader to generate a new proposal
// for that slot.
let (won, fork_index, coin_index) =
state.write().await.consensus.is_slot_leader(sigma1, sigma2);
let result = if won {
state.write().await.propose(processing_slot, fork_index, coin_index, sigma1, sigma2).await
} else {
Ok(None)
};
let (proposal, coin, derived_blind) = match result {
Ok(pair) => {
if pair.is_none() {
info!(target: "consensus::proposal", "consensus: Node is not the slot lead");
return false
}
pair.unwrap()
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Block proposal failed: {}", e);
return false
}
};
// Node checks if it missed finalization period due to proposal creation
let next_slot_start = state.read().await.consensus.time_keeper.next_n_slot_start(1);
if next_slot_start <= constants::FINAL_SYNC_DUR {
warn!(
target: "consensus::proposal",
"consensus: Node missed slot {} finalization period due to proposal creation, resyncing...",
state.read().await.consensus.time_keeper.current_slot()
);
return true
}
// Node stores the proposal and broadcast to rest nodes
info!(target: "consensus::proposal", "consensus: Node is the slot leader: Proposed block: {}", proposal);
debug!(target: "consensus::proposal", "consensus: Full proposal: {:?}", proposal);
match state
.write()
.await
.receive_proposal(&proposal, Some((coin_index, coin, derived_blind)))
.await
{
Ok(_) => {
// Here we don't have to check to broadcast, because the flag
// will always be true, since the node is able to produce proposals
info!(target: "consensus::proposal", "consensus: Block proposal saved successfully");
// Broadcast proposal to other consensus nodes
consensus_p2p.broadcast(&proposal).await;
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Block proposal save failed: {}", e);
}
}
// Verify node didn't skip next slot
processing_slot != state.read().await.consensus.time_keeper.current_slot()
}
/// async function to wait and execute consensus protocol finalization period.
/// Returns flag in case node needs to resync.
async fn finalization_period(
_sync_p2p: P2pPtr,
state: ValidatorStatePtr,
_ex: Arc<smol::Executor<'_>>,
) -> bool {
// Node sleeps until finalization sync period starts
let next_slot_start = state.read().await.consensus.time_keeper.next_n_slot_start(1);
if next_slot_start > constants::FINAL_SYNC_DUR {
let seconds_sync_period = next_slot_start - constants::FINAL_SYNC_DUR;
info!(target: "consensus::proposal", "consensus: Waiting for finalization sync period ({} sec)", seconds_sync_period);
sleep(seconds_sync_period).await;
} else {
warn!(
target: "consensus::proposal",
"consensus: Node missed slot {} finalization period due to proposals processing, resyncing...",
state.read().await.consensus.time_keeper.current_slot()
);
return true
}
// Keep a record of slot to verify if next slot got skipped during processing
let completed_slot = state.read().await.consensus.time_keeper.current_slot();
// Check if any forks can be finalized
/*
match state.write().await.chain_finalization().await {
Ok((to_broadcast_block, to_broadcast_slots)) => {
// Broadcasting in background
if !to_broadcast_block.is_empty() || !to_broadcast_slots.is_empty() {
ex.spawn(async move {
// Broadcast finalized blocks info, if any:
info!(target: "consensus::proposal", "consensus: Broadcasting finalized blocks");
for info in to_broadcast_block {
sync_p2p.broadcast(&info).await;
}
// Broadcast finalized slots, if any:
info!(target: "consensus::proposal", "consensus: Broadcasting finalized slots");
for slot in to_broadcast_slots {
sync_p2p.broadcast(slot).await;
info!(target: "consensus::proposal", "consensus: Broadcasted slot");
// TODO: You can give an error if you query P2P and check if there are any connected channels
}
})
.detach();
} else {
info!(target: "consensus::proposal", "consensus: No finalized blocks or slots to broadcast");
}
}
Err(e) => {
error!(target: "consensus::proposal", "consensus: Finalization check failed: {}", e);
}
}
*/
// Verify node didn't skip next slot
completed_slot != state.read().await.consensus.time_keeper.current_slot()
}

View File

@@ -1,28 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
use crate::consensus::{EncryptedTxRcpt, TransferStx};
/// transfer transaction
#[derive(Debug, Clone, SerialDecodable, SerialEncodable)]
pub struct Tx {
pub xfer: TransferStx,
pub cipher: EncryptedTxRcpt,
}

View File

@@ -1,118 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! Type aliases used in the consensus codebase.
use std::ops::{Add, AddAssign, Div, Mul, Sub};
use dashu::{
base::Abs,
float::{round::mode::Zero, FBig, Repr},
};
use super::constants::RADIX_BITS;
const B: u64 = 10;
#[derive(Clone, PartialEq, PartialOrd, Debug)]
pub struct Float10(FBig<Zero, B>);
impl Float10 {
pub fn repr(&self) -> &Repr<B> {
self.0.repr()
}
pub fn abs(&self) -> Self {
Self(self.0.clone().abs())
}
pub fn powf(&self, exp: Self) -> Self {
Self(self.0.powf(&exp.0))
}
pub fn ln(&self) -> Self {
Self(self.0.ln())
}
}
impl Add for Float10 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0)
}
}
impl AddAssign for Float10 {
fn add_assign(&mut self, other: Self) {
*self = Self(self.0.clone() + other.0);
}
}
impl Sub for Float10 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0)
}
}
impl Mul for Float10 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(self.0 * other.0)
}
}
impl Div for Float10 {
type Output = Self;
fn div(self, other: Self) -> Self {
Self(self.0 / other.0)
}
}
impl std::fmt::Display for Float10 {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl TryFrom<&str> for Float10 {
type Error = crate::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Ok(Self(FBig::from_str_native(value)?.with_precision(RADIX_BITS).value()))
}
}
impl TryFrom<u64> for Float10 {
type Error = crate::Error;
fn try_from(value: u64) -> Result<Self, Self::Error> {
Ok(Self(FBig::from(value)))
}
}
impl TryFrom<i64> for Float10 {
type Error = crate::Error;
fn try_from(value: i64) -> Result<Self, Self::Error> {
Ok(Self(FBig::from(value)))
}
}

View File

@@ -1,111 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::pasta::{group::ff::PrimeField, pallas};
use dashu::integer::{IBig, Sign, UBig};
use log::debug;
use super::Float10;
pub fn fbig2ibig(f: Float10) -> IBig {
let rad = IBig::from(10);
let sig = f.repr().significand();
let exp = f.repr().exponent();
let val: IBig = if exp >= 0 {
sig.clone() * rad.pow(exp.unsigned_abs())
} else {
sig.clone() / rad.pow(exp.unsigned_abs())
};
val
}
/// note! nagative values in pallas field won't wraps, and won't
/// convert back to same value.
pub fn fbig2base(f: Float10) -> pallas::Base {
debug!(target: "consensus::utils", "fbig -> base (f): {}", f);
let val: IBig = fbig2ibig(f);
let (sign, word) = val.as_sign_words();
let mut words: [u64; 4] = [0, 0, 0, 0];
words[..word.len()].copy_from_slice(word);
match sign {
Sign::Positive => pallas::Base::from_raw(words),
Sign::Negative => pallas::Base::from_raw(words).neg(),
}
}
/// note! only support positive conversion, and zero.
/// used for testing purpose on non-negative values at the moment.
pub fn base2ibig(base: pallas::Base) -> IBig {
//
let byts: [u8; 32] = base.to_repr();
let words: [u64; 4] = [
u64::from_le_bytes(byts[0..8].try_into().expect("")),
u64::from_le_bytes(byts[8..16].try_into().expect("")),
u64::from_le_bytes(byts[16..24].try_into().expect("")),
u64::from_le_bytes(byts[24..32].try_into().expect("")),
];
let uparts = UBig::from_words(&words);
IBig::from_parts(Sign::Positive, uparts)
}
#[cfg(test)]
mod tests {
use dashu::integer::IBig;
use crate::consensus::{
types::Float10,
utils::{base2ibig, fbig2base, fbig2ibig},
};
use darkfi_sdk::pasta::pallas;
#[test]
fn dashu_fbig2ibig() {
let f = Float10::try_from("234234223.000").unwrap();
let i: IBig = fbig2ibig(f);
let sig = IBig::from(234234223);
assert_eq!(i, sig);
}
#[test]
fn dashu_test_base2ibig() {
//
let fbig: Float10 = Float10::try_from(
"289480223093290488558927462521719769633630564819415607159546767643499676303",
)
.unwrap();
let ibig = fbig2ibig(fbig.clone());
let res_base: pallas::Base = fbig2base(fbig.clone());
let res_ibig: IBig = base2ibig(res_base);
assert_eq!(res_ibig, ibig);
}
#[test]
fn dashu_test2_base2ibig() {
//assert that field wrapping for negative values won't hold during conversions.
let fbig: Float10 = Float10::try_from(
"-20065240046497827215558476051577517633529246907153511707181011345840062564.87",
)
.unwrap();
let ibig = fbig2ibig(fbig.clone());
let res_base: pallas::Base = fbig2base(fbig.clone());
let res_ibig: IBig = base2ibig(res_base);
assert_ne!(res_ibig, ibig);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,56 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use async_trait::async_trait;
use darkfi_sdk::crypto::{Keypair, PublicKey, SecretKey};
use darkfi_serial::deserialize;
use log::debug;
use crate::{wallet::WalletDb, Result};
const CONSENSUS_KEYS_TABLE: &str = "consensus_keys";
const CONSENSUS_KEYS_COLUMN_IS_DEFAULT: &str = "is_default";
#[async_trait]
pub trait ConsensusWallet {
async fn get_default_keypair(&self) -> Result<Keypair>;
}
#[async_trait]
impl ConsensusWallet for WalletDb {
async fn get_default_keypair(&self) -> Result<Keypair> {
debug!(target: "consensus::wallet", "Returning default keypair");
let wallet_conn = self.conn.lock().await;
let mut stmt = wallet_conn.prepare(&format!(
"SELECT * FROM {} WHERE {} = 1",
CONSENSUS_KEYS_TABLE, CONSENSUS_KEYS_COLUMN_IS_DEFAULT
))?;
let (public, secret): (PublicKey, SecretKey) = stmt.query_row((), |row| {
let p_bytes: Vec<u8> = row.get("public")?;
let s_bytes: Vec<u8> = row.get("secret")?;
let public = deserialize(&p_bytes).unwrap();
let secret = deserialize(&s_bytes).unwrap();
Ok((public, secret))
})?;
stmt.finalize()?;
Ok(Keypair { secret, public })
}
}

View File

@@ -1,54 +0,0 @@
[package]
name = "darkfi_consensus_contract"
version = "0.4.1"
authors = ["Dyne.org foundation <foundation@dyne.org>"]
license = "AGPL-3.0-only"
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
blake3 = "1.5.0"
darkfi-sdk = { path = "../../sdk" }
darkfi-serial = { path = "../../serial", features = ["derive", "crypto"] }
darkfi_money_contract = { path = "../money", features = ["no-entrypoint"] }
thiserror = "1.0.56"
# The following dependencies are used for the client API and
# probably shouldn't be in WASM
chacha20poly1305 = { version = "0.10.1", optional = true }
darkfi = { path = "../../../", features = ["zk", "rpc", "blockchain"], optional = true }
halo2_proofs = { version = "0.3.0", optional = true }
log = { version = "0.4.20", optional = true }
rand = { version = "0.8.5", optional = true }
# These are used just for the integration tests
[dev-dependencies]
smol = "1.3.0"
bs58 = "0.5.0"
darkfi = {path = "../../../", features = ["tx", "blockchain"]}
darkfi_money_contract = { path = "../money", features = ["client", "no-entrypoint"] }
darkfi-contract-test-harness = {path = "../test-harness"}
simplelog = "0.12.1"
sled = "0.34.7"
# We need to disable random using "custom" which makes the crate a noop
# so the wasm32-unknown-unknown target is enabled.
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.8", features = ["custom"] }
[features]
default = []
no-entrypoint = []
client = [
"darkfi",
"darkfi-serial/async",
"darkfi_money_contract/client",
"darkfi_money_contract/no-entrypoint",
"rand",
"chacha20poly1305",
"log",
"halo2_proofs",
]

View File

@@ -1,75 +0,0 @@
.POSIX:
# Cargo binary
CARGO = cargo +nightly
# Compile target for system binaries
RUST_TARGET = $(shell rustc -Vv | grep '^host: ' | cut -d' ' -f2)
# Uncomment when doing musl static builds
#RUSTFLAGS = -C target-feature=+crt-static -C link-self-contained=yes
# wasm build target
WASM_TARGET = wasm32-unknown-unknown
# Cargo package name
PKGNAME = $(shell grep '^name = ' Cargo.toml | cut -d' ' -f3 | tr -d '"')
# wasm contract binary
WASM_BIN = $(PKGNAME:=.wasm)
# zkas compiler binary
ZKAS = ../../../zkas
# zkas circuits
PROOFS_SRC = $(shell find proof -type f -name '*.zk')
PROOFS_BIN = $(PROOFS_SRC:=.bin)
# wasm source files
WASM_SRC = \
Cargo.toml \
../../../Cargo.toml \
../../../src/sdk/Cargo.toml \
../../../src/serial/Cargo.toml \
$(shell find src -type f -name '*.rs') \
$(shell find ../../sdk -type f -name '*.rs') \
$(shell find ../../serial -type f -name '*.rs')
all: $(WASM_BIN)
$(PROOFS_BIN): $(ZKAS) $(PROOFS_SRC)
$(ZKAS) $(basename $@) -o $@
$(WASM_BIN): $(WASM_SRC) $(PROOFS_BIN)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --target=$(WASM_TARGET) \
--release --package $(PKGNAME)
cp -f ../../../target/$(WASM_TARGET)/release/$@ $@
wasm-strip $@
test-stake-unstake: all
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) test --target=$(RUST_TARGET) \
--release --package $(PKGNAME) \
--features=no-entrypoint,client \
--test stake_unstake
test-genesis-stake-unstake: all
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) test --target=$(RUST_TARGET) \
--release --package $(PKGNAME) \
--features=no-entrypoint,client \
--test genesis_stake_unstake
test: test-genesis-stake-unstake test-stake-unstake
clippy: all
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clippy --target=$(WASM_TARGET) \
--release --package $(PKGNAME)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clippy --target=$(RUST_TARGET) \
--release --package $(PKGNAME) \
--features=no-entrypoint,client --tests
clean:
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(WASM_TARGET) \
--release --package $(PKGNAME)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clean --target=$(RUST_TARGET) \
--release --package $(PKGNAME)
rm -f $(PROOFS_BIN) $(WASM_BIN)
.PHONY: all test-genesis-stake-unstake test-stake-unstake test clippy clean

View File

@@ -1,66 +0,0 @@
k = 13;
field = "pallas";
constant "ConsensusBurn_V1" {
EcFixedPointShort VALUE_COMMIT_VALUE,
EcFixedPoint VALUE_COMMIT_RANDOM,
EcFixedPointBase NULLIFIER_K,
}
witness "ConsensusBurn_V1" {
# The value of this coin
Base value,
# The epoch this coin was minted on
Base epoch,
# Unique serial number corresponding to this coin
Base serial,
# Random blinding factor for value commitment
Scalar value_blind,
# Secret key used to derive nullifier and coins' public key
Base secret,
# Leaf position of the coin in the Merkle tree of coins
Uint32 leaf_pos,
# Merkle path to the coin
MerklePath path,
}
circuit "ConsensusBurn_V1" {
# Poseidon hash of the nullifier
nullifier = poseidon_hash(secret, serial);
constrain_instance(nullifier);
# Constrain the epoch this coin was minted on
constrain_instance(epoch);
# We derive coins' public key for the signature and
# constrain its coordinates:
pub = ec_mul_base(secret, NULLIFIER_K);
pub_x = ec_get_x(pub);
pub_y = ec_get_y(pub);
constrain_instance(pub_x);
constrain_instance(pub_y);
# Coin hash
C = poseidon_hash(
pub_x,
pub_y,
value,
epoch,
serial,
);
# Merkle root
root = merkle_root(leaf_pos, path, C);
constrain_instance(root);
# Pedersen commitment for coin's value
vcv = ec_mul_short(value, VALUE_COMMIT_VALUE);
vcr = ec_mul(value_blind, VALUE_COMMIT_RANDOM);
value_commit = ec_add(vcv, vcr);
# Since value_commit is a curve point, we fetch its coordinates
# and constrain them:
constrain_instance(ec_get_x(value_commit));
constrain_instance(ec_get_y(value_commit));
# At this point we've enforced all of our public inputs.
}

View File

@@ -1,48 +0,0 @@
k = 13;
field = "pallas";
constant "ConsensusMint_V1" {
EcFixedPointShort VALUE_COMMIT_VALUE,
EcFixedPoint VALUE_COMMIT_RANDOM,
}
witness "ConsensusMint_V1" {
# X coordinate for public key
Base pub_x,
# Y coordinate for public key
Base pub_y,
# The value of this coin
Base value,
# The epoch this coin was minted on
Base epoch,
# Unique serial number corresponding to this coin
Base serial,
# Random blinding factor for the value commitment
Scalar value_blind,
}
circuit "ConsensusMint_V1" {
# Constrain the epoch this coin was minted on
constrain_instance(epoch);
# Poseidon hash of the coin
C = poseidon_hash(
pub_x,
pub_y,
value,
epoch,
serial,
);
constrain_instance(C);
# Pedersen commitment for coin's value
vcv = ec_mul_short(value, VALUE_COMMIT_VALUE);
vcr = ec_mul(value_blind, VALUE_COMMIT_RANDOM);
value_commit = ec_add(vcv, vcr);
# Since the value commit is a curve point, we fetch its coordinates
# and constrain them:
constrain_instance(ec_get_x(value_commit));
constrain_instance(ec_get_y(value_commit));
# At this point we've enforced all of our public inputs.
}

View File

@@ -1,157 +0,0 @@
k = 13;
field = "pallas";
constant "ConsensusProposal_V1" {
EcFixedPointShort VALUE_COMMIT_VALUE,
EcFixedPoint VALUE_COMMIT_RANDOM,
EcFixedPointBase NULLIFIER_K,
}
witness "ConsensusProposal_V1" {
# Burnt coin secret key
Base input_secret_key,
# Unique serial number corresponding to the burnt coin
Base input_serial,
# The value of the burnt coin
Base input_value,
# The epoch the burnt coin was minted on
Base epoch,
# The reward value
Base reward,
# Random blinding factor for the value commitment
Scalar input_value_blind,
# Leaf position of the coin in the Merkle tree of coins
Uint32 leaf_pos,
# Merkle path to the coin
MerklePath path,
# Random blinding factor for the value commitment of the new coin
Scalar output_value_blind,
# Election seed y
Base mu_y,
# Election seed rho
Base mu_rho,
# Sigma1
Base sigma1,
# Sigma2
Base sigma2,
# Lottery headstart
Base headstart,
}
circuit "ConsensusProposal_V1" {
# Witnessed constants
ZERO = witness_base(0);
SERIAL_PREFIX = witness_base(2);
SEED_PREFIX = witness_base(3);
SECRET_PREFIX = witness_base(4);
# =============
# Burn old coin
# =============
# Poseidon hash of the nullifier
nullifier = poseidon_hash(input_secret_key, input_serial);
constrain_instance(nullifier);
# Constrain the epoch this coin was minted on.
# We use this as our timelock mechanism.
constrain_instance(epoch);
# We derive the coin's public key for the signature and
# VRF proof verification and constrain its coordinates:
input_pub = ec_mul_base(input_secret_key, NULLIFIER_K);
pub_x = ec_get_x(input_pub);
pub_y = ec_get_y(input_pub);
constrain_instance(pub_x);
constrain_instance(pub_y);
# Construct the burned coin
C = poseidon_hash(
pub_x,
pub_y,
input_value,
epoch,
input_serial,
);
# Merkle inclusion proof
root = merkle_root(leaf_pos, path, C);
constrain_instance(root);
# Pedersen commitment for burned coin's value
vcv = ec_mul_short(input_value, VALUE_COMMIT_VALUE);
vcr = ec_mul(input_value_blind, VALUE_COMMIT_RANDOM);
value_commit = ec_add(vcv, vcr);
# Since value_commit is a curve point, we fetch its coordinates
# and constrain them:
constrain_instance(ec_get_x(value_commit));
constrain_instance(ec_get_y(value_commit));
# =============
# Mint new coin
# =============
# Constrain reward value
constrain_instance(reward);
# Pedersen commitment for new coin's value (old value + reward)
output_value = base_add(input_value, reward);
nvcv = ec_mul_short(output_value, VALUE_COMMIT_VALUE);
nvcr = ec_mul(output_value_blind, VALUE_COMMIT_RANDOM);
output_value_commit = ec_add(nvcv, nvcr);
# Since the new value commit is also a curve point, we'll do the same
# coordinate dance:
constrain_instance(ec_get_x(output_value_commit));
constrain_instance(ec_get_y(output_value_commit));
# The serial of the new coin is derived from the old coin
output_serial = poseidon_hash(SERIAL_PREFIX, input_secret_key, input_serial);
# The secret key of the new coin is derived from old coin
output_secret_key = poseidon_hash(SECRET_PREFIX, input_secret_key);
output_pub = ec_mul_base(output_secret_key, NULLIFIER_K);
output_pub_x = ec_get_x(output_pub);
output_pub_y = ec_get_y(output_pub);
# Poseidon hash of the new coin
# In here we set the new epoch as ZERO, thus removing a
# potentially existing timelock.
output_coin = poseidon_hash(
output_pub_x,
output_pub_y,
output_value,
ZERO,
output_serial,
);
constrain_instance(output_coin);
# ============================
# Constrain lottery parameters
# ============================
# Coin y, constructed with the old serial for seeding:
seed = poseidon_hash(SEED_PREFIX, input_serial);
y = poseidon_hash(seed, mu_y);
constrain_instance(mu_y);
constrain_instance(y);
# Coin rho (seed):
rho = poseidon_hash(seed, mu_rho);
constrain_instance(mu_rho);
constrain_instance(rho);
# Calculate lottery target
term_1 = base_mul(sigma1, input_value);
term_2 = base_mul(sigma2, input_value);
shifted_term_2 = base_mul(term_2, input_value);
target = base_add(term_1, shifted_term_2);
shifted_target = base_add(target, headstart);
constrain_instance(sigma1);
constrain_instance(sigma2);
constrain_instance(headstart);
# Play lottery
less_than_strict(y, shifted_target);
# At this point we've enforced all of our public inputs.
}

View File

@@ -1,179 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{halo2::Value, Proof, ProvingKey, Witness, ZkCircuit},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{client::ConsensusNote, model::Coin};
use darkfi_sdk::{
bridgetree,
bridgetree::Hashable,
crypto::{
pasta_prelude::*, pedersen_commitment_u64, poseidon_hash, MerkleNode, Nullifier, PublicKey,
SecretKey,
},
pasta::pallas,
};
use rand::rngs::OsRng;
pub struct ConsensusMintOutputInfo {
pub value: u64,
pub epoch: u64,
pub public_key: PublicKey,
pub value_blind: pallas::Scalar,
pub serial: pallas::Base,
}
pub struct ConsensusBurnInputInfo {
pub leaf_position: bridgetree::Position,
pub merkle_path: Vec<MerkleNode>,
pub secret: SecretKey,
pub note: ConsensusNote,
pub value_blind: pallas::Scalar,
}
pub struct ConsensusMintRevealed {
pub epoch: u64,
pub coin: Coin,
pub value_commit: pallas::Point,
}
impl ConsensusMintRevealed {
pub fn to_vec(&self) -> Vec<pallas::Base> {
let valcom_coords = self.value_commit.to_affine().coordinates().unwrap();
// NOTE: It's important to keep these in the same order
// as the `constrain_instance` calls in the zkas code.
vec![self.epoch.into(), self.coin.inner(), *valcom_coords.x(), *valcom_coords.y()]
}
}
/// Create a ZK proof for minting a coin in the Consensus coin state.
pub fn create_consensus_mint_proof(
zkbin: &ZkBinary,
pk: &ProvingKey,
output: &ConsensusMintOutputInfo,
) -> Result<(Proof, ConsensusMintRevealed)> {
let epoch_pallas = pallas::Base::from(output.epoch);
let value_pallas = pallas::Base::from(output.value);
let value_commit = pedersen_commitment_u64(output.value, output.value_blind);
let (pub_x, pub_y) = output.public_key.xy();
let coin = Coin::from(poseidon_hash([pub_x, pub_y, value_pallas, epoch_pallas, output.serial]));
let public_inputs = ConsensusMintRevealed { epoch: output.epoch, coin, value_commit };
let prover_witnesses = vec![
Witness::Base(Value::known(pub_x)),
Witness::Base(Value::known(pub_y)),
Witness::Base(Value::known(value_pallas)),
Witness::Base(Value::known(epoch_pallas)),
Witness::Base(Value::known(output.serial)),
Witness::Scalar(Value::known(output.value_blind)),
];
let circuit = ZkCircuit::new(prover_witnesses, zkbin);
let proof = Proof::create(pk, &[circuit], &public_inputs.to_vec(), &mut OsRng)?;
Ok((proof, public_inputs))
}
pub struct ConsensusBurnRevealed {
pub nullifier: Nullifier,
pub epoch: u64,
pub signature_public: PublicKey,
pub merkle_root: MerkleNode,
pub value_commit: pallas::Point,
}
impl ConsensusBurnRevealed {
pub fn to_vec(&self) -> Vec<pallas::Base> {
let valcom_coords = self.value_commit.to_affine().coordinates().unwrap();
let sigpub_coords = self.signature_public.inner().to_affine().coordinates().unwrap();
let epoch_palas = pallas::Base::from(self.epoch);
// NOTE: It's important to keep these in the same order
// as the `constrain_instance` calls in the zkas code.
vec![
self.nullifier.inner(),
epoch_palas,
*sigpub_coords.x(),
*sigpub_coords.y(),
self.merkle_root.inner(),
*valcom_coords.x(),
*valcom_coords.y(),
]
}
}
pub fn create_consensus_burn_proof(
zkbin: &ZkBinary,
pk: &ProvingKey,
input: &ConsensusBurnInputInfo,
) -> Result<(Proof, ConsensusBurnRevealed, SecretKey)> {
let nullifier = Nullifier::from(poseidon_hash([input.secret.inner(), input.note.serial]));
let epoch = input.note.epoch;
let epoch_pallas = pallas::Base::from(epoch);
let value_pallas = pallas::Base::from(input.note.value);
let value_commit = pedersen_commitment_u64(input.note.value, input.value_blind);
let public_key = PublicKey::from_secret(input.secret);
let (pub_x, pub_y) = public_key.xy();
let coin = poseidon_hash([pub_x, pub_y, value_pallas, epoch_pallas, input.note.serial]);
let merkle_root = {
let position: u64 = input.leaf_position.into();
let mut current = MerkleNode::from(coin);
for (level, sibling) in input.merkle_path.iter().enumerate() {
let level = level as u8;
current = if position & (1 << level) == 0 {
MerkleNode::combine(level.into(), &current, sibling)
} else {
MerkleNode::combine(level.into(), sibling, &current)
};
}
current
};
let public_inputs = ConsensusBurnRevealed {
nullifier,
epoch,
signature_public: public_key,
merkle_root,
value_commit,
};
let prover_witnesses = vec![
Witness::Base(Value::known(value_pallas)),
Witness::Base(Value::known(epoch_pallas)),
Witness::Base(Value::known(input.note.serial)),
Witness::Scalar(Value::known(input.value_blind)),
Witness::Base(Value::known(input.secret.inner())),
Witness::Uint32(Value::known(u64::from(input.leaf_position).try_into().unwrap())),
Witness::MerklePath(Value::known(input.merkle_path.clone().try_into().unwrap())),
];
let circuit = ZkCircuit::new(prover_witnesses, zkbin);
let proof = Proof::create(pk, &[circuit], &public_inputs.to_vec(), &mut OsRng)?;
Ok((proof, public_inputs, input.secret))
}

View File

@@ -1,143 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{Proof, ProvingKey},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{
client::ConsensusNote,
model::{ClearInput, ConsensusOutput},
};
use darkfi_sdk::{
crypto::{note::AeadEncryptedNote, pasta_prelude::*, Keypair, PublicKey, DARK_TOKEN_ID},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::{
client::common::{create_consensus_mint_proof, ConsensusMintOutputInfo},
model::ConsensusGenesisStakeParamsV1,
};
pub struct ConsensusGenesisStakeCallDebris {
pub params: ConsensusGenesisStakeParamsV1,
pub proofs: Vec<Proof>,
}
/// Struct holding necessary information to build a `Consensus::GenesisStakeV1` contract call.
pub struct ConsensusGenesisStakeCallBuilder {
/// Signer keypair, this pubkey is in the clear input, and used to sign the tx.
pub keypair: Keypair,
/// Output pubkey, to whom the minted coin goes. The secret should be managed externally.
pub recipient: PublicKey,
/// Amount of tokens we want to mint and stake
pub amount: u64,
/// `ConsensusMint_V1` zkas circuit ZkBinary
pub mint_zkbin: ZkBinary,
/// Proving key for the `ConsensusMint_V1` zk circuit
pub mint_pk: ProvingKey,
}
impl ConsensusGenesisStakeCallBuilder {
pub fn build(&self) -> Result<ConsensusGenesisStakeCallDebris> {
// We just create the pedersen commitment blinds here. We simply
// enforce that the clear input and the anon output have the same
// commitments.
let value_blind = pallas::Scalar::random(&mut OsRng);
let token_blind = pallas::Base::random(&mut OsRng);
let reward_blind = pallas::Scalar::random(&mut OsRng);
// FIXME: The coin's serial number here is arbitrary, and allows grinding attacks.
let serial = pallas::Base::random(&mut OsRng);
self.build_with_params(value_blind, token_blind, reward_blind, serial)
}
pub fn build_with_params(
&self,
value_blind: pallas::Scalar,
token_blind: pallas::Base,
reward_blind: pallas::Scalar,
serial: pallas::Base,
) -> Result<ConsensusGenesisStakeCallDebris> {
debug!("Building Consensus::GenesisStakeV1 contract call");
let value = self.amount;
assert!(value != 0);
// In this call, we will build one clear input and one anonymous output.
// Only DARK_TOKEN_ID can be minted and staked on genesis slot.
let token_id = *DARK_TOKEN_ID;
// With genesis, our epoch is 0.
let epoch = 0;
// Parameters for the clear input
let c_input = ClearInput {
value,
token_id,
value_blind,
token_blind,
signature_public: self.keypair.public,
};
// Parameters for the anonymous output
let output = ConsensusMintOutputInfo {
value,
epoch,
public_key: self.recipient,
value_blind,
serial,
};
info!("Creating genesis stake mint proof for output");
let (proof, public_inputs) =
create_consensus_mint_proof(&self.mint_zkbin, &self.mint_pk, &output)?;
// Encrypted note
let note = ConsensusNote {
serial,
value: output.value,
epoch,
value_blind,
reward: 0,
reward_blind,
};
let encrypted_note = AeadEncryptedNote::encrypt(&note, &self.recipient, &mut OsRng)?;
let output = ConsensusOutput {
value_commit: public_inputs.value_commit,
coin: public_inputs.coin,
note: encrypted_note,
};
// We now fill this with necessary stuff
let params = ConsensusGenesisStakeParamsV1 { input: c_input, output };
let proofs = vec![proof];
// Now we should have all the params and zk proof.
// We return it all and let the caller deal with it.
let debris = ConsensusGenesisStakeCallDebris { params, proofs };
Ok(debris)
}
}

View File

@@ -1,44 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This module implements the client-side API for this contract's interaction.
//! What we basically do here is implement an API that creates the necessary
//! structures and is able to export them to create a DarkFi transaction
//! object that can be broadcasted to the network.
//!
//! Note that this API does not involve any wallet interaction, but only takes
//! the necessary objects provided by the caller. This is intentional, so we
//! are able to abstract away any wallet interfaces to client implementations.
/// Common functions
pub(crate) mod common;
/// `Consensus::GenesisStakeV1` API
pub mod genesis_stake_v1;
/// `Consensus::StakeV1` API
pub mod stake_v1;
/// Proposal transaction building API.
pub mod proposal_v1;
/// `Consensus::UnstakeRequestV1` API
pub mod unstake_request_v1;
/// `Consensus::UnstakeV1` API
pub mod unstake_v1;

View File

@@ -1,359 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
error::Error::CoinIsNotSlotProducer,
zk::{halo2::Value, Proof, ProvingKey, Witness, ZkCircuit},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{
client::{ConsensusNote, ConsensusOwnCoin},
model::{Coin, ConsensusInput, ConsensusOutput},
};
use darkfi_sdk::{
blockchain::Slot,
bridgetree::Hashable,
crypto::{
ecvrf::VrfProof, note::AeadEncryptedNote, pasta_prelude::*, pedersen_commitment_u64,
poseidon_hash, Keypair, MerkleNode, MerkleTree, Nullifier, PublicKey, SecretKey,
},
pasta::{group::ff::FromUniformBytes, pallas},
};
use log::{debug, error, info};
use rand::rngs::OsRng;
use crate::{
client::common::{ConsensusBurnInputInfo, ConsensusMintOutputInfo},
model::{
ConsensusProposalParamsV1, HEADSTART, MU_RHO_PREFIX, MU_Y_PREFIX, REWARD,
SECRET_KEY_PREFIX, SEED_PREFIX, SERIAL_PREFIX,
},
};
pub struct ConsensusProposalCallDebris {
/// Payload params
pub params: ConsensusProposalParamsV1,
/// ZK proofs
pub proofs: Vec<Proof>,
/// The new output keypair (used in the minted coin)
pub keypair: Keypair,
/// Secret key used to sign the transaction
pub signature_secret: SecretKey,
}
pub struct ConsensusProposalRevealed {
pub nullifier: Nullifier,
pub epoch: u64,
pub public_key: PublicKey,
pub merkle_root: MerkleNode,
pub input_value_commit: pallas::Point,
pub reward: u64,
pub output_value_commit: pallas::Point,
pub output_coin: Coin,
pub vrf_proof: VrfProof,
pub mu_y: pallas::Base,
pub y: pallas::Base,
pub mu_rho: pallas::Base,
pub rho: pallas::Base,
pub sigma1: pallas::Base,
pub sigma2: pallas::Base,
pub headstart: pallas::Base,
}
impl ConsensusProposalRevealed {
fn to_vec(&self) -> Vec<pallas::Base> {
let (pub_x, pub_y) = self.public_key.xy();
let input_value_coords = self.input_value_commit.to_affine().coordinates().unwrap();
let output_value_coords = self.output_value_commit.to_affine().coordinates().unwrap();
// NOTE: It's important to keep these in the same order
// as the `constrain_instance` calls in the zkas code.
vec![
self.nullifier.inner(),
pallas::Base::from(self.epoch),
pub_x,
pub_y,
self.merkle_root.inner(),
*input_value_coords.x(),
*input_value_coords.y(),
pallas::Base::from(self.reward),
*output_value_coords.x(),
*output_value_coords.y(),
self.output_coin.inner(),
self.mu_y,
self.y,
self.mu_rho,
self.rho,
self.sigma1,
self.sigma2,
self.headstart,
]
}
}
/// Struct holding necessary information to build a proposal transaction.
pub struct ConsensusProposalCallBuilder {
/// `ConsensusOwnCoin` we're given to use in this builder
pub owncoin: ConsensusOwnCoin,
/// Rewarded slot
pub slot: Slot,
/// Extending fork last proposal/block hash
pub fork_hash: blake3::Hash,
/// Extending fork second to last proposal/block hash
pub fork_previous_hash: blake3::Hash,
/// Merkle tree of coins used to create inclusion proofs
pub merkle_tree: MerkleTree,
/// `Proposal_V1` zkas circuit ZkBinary
pub proposal_zkbin: ZkBinary,
/// Proving key for the `Proposal_V1` zk circuit
pub proposal_pk: ProvingKey,
}
impl ConsensusProposalCallBuilder {
pub fn build(&self) -> Result<ConsensusProposalCallDebris> {
let input_value_blind = pallas::Scalar::random(&mut OsRng);
let output_reward_blind = pallas::Scalar::random(&mut OsRng);
self.build_with_params(input_value_blind, output_reward_blind)
}
pub fn build_with_params(
&self,
input_value_blind: pallas::Scalar,
output_reward_blind: pallas::Scalar,
) -> Result<ConsensusProposalCallDebris> {
info!("Building Consensus::ProposalBurnV1 contract call");
assert!(self.owncoin.note.value != 0);
debug!("Building Consensus::ProposalV1 anonymous input");
let merkle_path = self.merkle_tree.witness(self.owncoin.leaf_position, 0).unwrap();
let input = ConsensusBurnInputInfo {
leaf_position: self.owncoin.leaf_position,
merkle_path,
secret: self.owncoin.secret,
note: self.owncoin.note.clone(),
value_blind: input_value_blind,
};
debug!("Building Consensus::ProposalV1 anonymous output");
let output_value_blind = input.value_blind + output_reward_blind;
// The output's secret key is derived from the old secret key
let output_secret_key = poseidon_hash([SECRET_KEY_PREFIX, self.owncoin.secret.inner()]);
let output_keypair = Keypair::new(SecretKey::from(output_secret_key));
// The output's serial is derived from the old serial
let output_serial =
poseidon_hash([SERIAL_PREFIX, self.owncoin.secret.inner(), self.owncoin.note.serial]);
let output = ConsensusMintOutputInfo {
value: self.owncoin.note.value + REWARD,
epoch: 0, // We set the epoch as 0 here to eliminate a potential timelock
public_key: output_keypair.public,
value_blind: output_value_blind,
serial: output_serial,
};
info!("Building Consensus::ProposalV1 VRF proof");
let mut vrf_input = Vec::with_capacity(32 + blake3::OUT_LEN + 32);
vrf_input.extend_from_slice(&self.slot.last_nonce.to_repr());
vrf_input.extend_from_slice(self.fork_previous_hash.as_bytes());
vrf_input.extend_from_slice(&pallas::Base::from(self.slot.id).to_repr());
let vrf_proof = VrfProof::prove(input.secret, &vrf_input, &mut OsRng);
info!("Building Consensus::ProposalV1 ZK proof");
let (proof, public_inputs) = create_proposal_proof(
&self.proposal_zkbin,
&self.proposal_pk,
&input,
&output,
&self.slot,
&vrf_proof,
)?;
let tx_input = ConsensusInput {
epoch: input.note.epoch,
value_commit: public_inputs.input_value_commit,
nullifier: public_inputs.nullifier,
merkle_root: public_inputs.merkle_root,
signature_public: public_inputs.public_key,
};
// Output's encrypted note
let note = ConsensusNote {
serial: output.serial,
value: output.value,
epoch: output.epoch,
value_blind: output.value_blind,
reward: REWARD,
reward_blind: output_reward_blind,
};
let encrypted_note = AeadEncryptedNote::encrypt(&note, &output.public_key, &mut OsRng)?;
let tx_output = ConsensusOutput {
value_commit: public_inputs.output_value_commit,
coin: public_inputs.output_coin,
note: encrypted_note,
};
// Construct params
let params = ConsensusProposalParamsV1 {
input: tx_input,
output: tx_output,
reward: REWARD,
reward_blind: output_reward_blind,
fork_hash: self.fork_hash,
fork_previous_hash: self.fork_previous_hash,
vrf_proof,
y: public_inputs.y,
rho: public_inputs.rho,
};
// Construct debris
let debris = ConsensusProposalCallDebris {
params,
proofs: vec![proof],
keypair: output_keypair,
signature_secret: input.secret,
};
Ok(debris)
}
}
fn create_proposal_proof(
zkbin: &ZkBinary,
pk: &ProvingKey,
input: &ConsensusBurnInputInfo,
output: &ConsensusMintOutputInfo,
slot: &Slot,
vrf_proof: &VrfProof,
) -> Result<(Proof, ConsensusProposalRevealed)> {
// TODO: fork_hash to be used as part of rank constrain in the proof
// Calculate lottery parameters
let seed = poseidon_hash([SEED_PREFIX, input.note.serial]);
let mut eta = [0u8; 64];
eta[..blake3::OUT_LEN].copy_from_slice(vrf_proof.hash_output().as_bytes());
let eta = pallas::Base::from_uniform_bytes(&eta);
let mu_y = poseidon_hash([MU_Y_PREFIX, eta, pallas::Base::from(slot.id)]);
let y = poseidon_hash([seed, mu_y]);
let mu_rho = poseidon_hash([MU_RHO_PREFIX, eta, pallas::Base::from(slot.id)]);
let rho = poseidon_hash([seed, mu_rho]);
// Verify coin is the slot block producer
let value_pallas = pallas::Base::from(input.note.value);
let shifted_target =
slot.pid.sigma1 * value_pallas + slot.pid.sigma2 * value_pallas * value_pallas + HEADSTART;
if y >= shifted_target {
error!("MU_Y: {:?}", mu_y);
error!("Y: {:?}", y);
error!("TARGET: {:?}", shifted_target);
return Err(CoinIsNotSlotProducer)
}
// Derive the input's nullifier
let nullifier = Nullifier::from(poseidon_hash([input.secret.inner(), input.note.serial]));
// Create the value commitment for the input
let input_value_commit = pedersen_commitment_u64(input.note.value, input.value_blind);
// Merkle inclusion proof for the input
let public_key = PublicKey::from_secret(input.secret);
let (pub_x, pub_y) = public_key.xy();
let coin = poseidon_hash([
pub_x,
pub_y,
pallas::Base::from(input.note.value),
pallas::Base::from(input.note.epoch),
input.note.serial,
]);
let merkle_root = {
let position: u64 = input.leaf_position.into();
let mut current = MerkleNode::from(coin);
for (level, sibling) in input.merkle_path.iter().enumerate() {
let level = level as u8;
current = if position & (1 << level) == 0 {
MerkleNode::combine(level.into(), &current, sibling)
} else {
MerkleNode::combine(level.into(), sibling, &current)
};
}
current
};
// Derive the new output coin
let (output_x, output_y) = output.public_key.xy();
let output_coin = Coin::from(poseidon_hash([
output_x,
output_y,
pallas::Base::from(output.value),
pallas::Base::from(output.epoch),
output.serial,
]));
// Create the ZK proof
let public_inputs = ConsensusProposalRevealed {
nullifier,
epoch: input.note.epoch,
public_key,
merkle_root,
input_value_commit,
reward: REWARD,
output_value_commit: pedersen_commitment_u64(output.value, output.value_blind),
output_coin,
vrf_proof: *vrf_proof,
mu_y,
y,
mu_rho,
rho,
sigma1: slot.pid.sigma1,
sigma2: slot.pid.sigma2,
headstart: HEADSTART,
};
let prover_witnesses = vec![
Witness::Base(Value::known(input.secret.inner())),
Witness::Base(Value::known(input.note.serial)),
Witness::Base(Value::known(pallas::Base::from(input.note.value))),
Witness::Base(Value::known(pallas::Base::from(input.note.epoch))),
Witness::Base(Value::known(pallas::Base::from(REWARD))),
Witness::Scalar(Value::known(input.value_blind)),
Witness::Uint32(Value::known(u64::from(input.leaf_position).try_into().unwrap())),
Witness::MerklePath(Value::known(input.merkle_path.clone().try_into().unwrap())),
Witness::Scalar(Value::known(output.value_blind)),
Witness::Base(Value::known(public_inputs.mu_y)),
Witness::Base(Value::known(public_inputs.mu_rho)),
Witness::Base(Value::known(public_inputs.sigma1)),
Witness::Base(Value::known(public_inputs.sigma2)),
Witness::Base(Value::known(public_inputs.headstart)),
];
let circuit = ZkCircuit::new(prover_witnesses, zkbin);
let proof = Proof::create(pk, &[circuit], &public_inputs.to_vec(), &mut OsRng)?;
Ok((proof, public_inputs))
}

View File

@@ -1,117 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{Proof, ProvingKey},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{
client::{ConsensusNote, OwnCoin},
model::{ConsensusOutput, ConsensusStakeParamsV1, Input},
};
use darkfi_sdk::{
crypto::{note::AeadEncryptedNote, pasta_prelude::*, PublicKey, SecretKey, DARK_TOKEN_ID},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::client::common::{create_consensus_mint_proof, ConsensusMintOutputInfo};
pub struct ConsensusStakeCallDebris {
pub params: ConsensusStakeParamsV1,
pub proofs: Vec<Proof>,
pub signature_secret: SecretKey,
}
/// Struct holding necessary information to build a `Consensus::StakeV1` contract call.
pub struct ConsensusStakeCallBuilder {
/// `OwnCoin` we're given to use in this builder
pub coin: OwnCoin,
/// Epoch staked coin is minted
pub epoch: u64,
/// Blinding factor for value commitment
pub value_blind: pallas::Scalar,
/// The money `Input` (comes from the previous `Money::Stake` contract call)
pub money_input: Input,
/// `ConsensusMint_V1` zkas circuit ZkBinary
pub mint_zkbin: ZkBinary,
/// Proving key for the `ConsensusMint_V1` zk circuit
pub mint_pk: ProvingKey,
}
impl ConsensusStakeCallBuilder {
pub fn build(&self) -> Result<ConsensusStakeCallDebris> {
let serial = pallas::Base::random(&mut OsRng);
self.build_with_params(serial)
}
pub fn build_with_params(&self, serial: pallas::Base) -> Result<ConsensusStakeCallDebris> {
debug!("Building Consensus::StakeV1 contract call");
assert!(self.coin.note.value != 0);
assert!(self.coin.note.token_id == *DARK_TOKEN_ID);
debug!("Building anonymous output");
let public_key = PublicKey::from_secret(self.coin.secret);
let output = ConsensusMintOutputInfo {
value: self.coin.note.value,
epoch: self.epoch,
public_key,
value_blind: self.value_blind,
serial,
};
debug!("Finished building output");
info!("Creating stake mint proof for output");
let (proof, public_inputs) =
create_consensus_mint_proof(&self.mint_zkbin, &self.mint_pk, &output)?;
// Encrypted note
let note = ConsensusNote {
serial,
value: output.value,
epoch: self.epoch,
value_blind: self.value_blind,
reward: 0,
reward_blind: self.value_blind,
};
let encrypted_note = AeadEncryptedNote::encrypt(&note, &output.public_key, &mut OsRng)?;
let output = ConsensusOutput {
value_commit: public_inputs.value_commit,
coin: public_inputs.coin,
note: encrypted_note,
};
// We now fill this with necessary stuff
let params = ConsensusStakeParamsV1 { input: self.money_input.clone(), output };
let proofs = vec![proof];
// Now we should have all the params and zk proof.
// We return it all and let the caller deal with it.
let debris =
ConsensusStakeCallDebris { params, proofs, signature_secret: self.coin.secret };
Ok(debris)
}
}

View File

@@ -1,165 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{Proof, ProvingKey},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{
client::{ConsensusNote, ConsensusOwnCoin},
model::{ConsensusInput, ConsensusOutput, ConsensusUnstakeReqParamsV1},
};
use darkfi_sdk::{
crypto::{
note::AeadEncryptedNote, pasta_prelude::*, poseidon_hash, Keypair, MerkleTree, SecretKey,
},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::{
client::common::{
create_consensus_burn_proof, create_consensus_mint_proof, ConsensusBurnInputInfo,
ConsensusMintOutputInfo,
},
model::{SECRET_KEY_PREFIX, SERIAL_PREFIX},
};
pub struct ConsensusUnstakeRequestCallDebris {
/// Payload params
pub params: ConsensusUnstakeReqParamsV1,
/// ZK proofs
pub proofs: Vec<Proof>,
/// The new output keypair (used in the minted coin)
pub keypair: Keypair,
/// Secret key used to sign the transaction
pub signature_secret: SecretKey,
}
/// Struct holding necessary information to build a `Consensus::UnstakeRequestV1` contract call.
pub struct ConsensusUnstakeRequestCallBuilder {
/// `ConsensusOwnCoin` we're given to use in this builder
pub owncoin: ConsensusOwnCoin,
/// Epoch unstaked coin is minted
pub epoch: u64,
/// Merkle tree of coins used to create inclusion proofs
pub tree: MerkleTree,
/// `ConsensusBurn_V1` zkas circuit ZkBinary
pub burn_zkbin: ZkBinary,
/// Proving key for the `ConsensusBurn_V1` zk circuit
pub burn_pk: ProvingKey,
/// `ConsensusMint_V1` zkas circuit ZkBinary
pub mint_zkbin: ZkBinary,
/// Proving key for the `ConsensusMint_V1` zk circuit
pub mint_pk: ProvingKey,
}
impl ConsensusUnstakeRequestCallBuilder {
pub fn build(&self) -> Result<ConsensusUnstakeRequestCallDebris> {
let input_value_blind = pallas::Scalar::random(&mut OsRng);
self.build_with_params(input_value_blind)
}
pub fn build_with_params(
&self,
input_value_blind: pallas::Scalar,
) -> Result<ConsensusUnstakeRequestCallDebris> {
info!("Building Consensus::UnstakeRequestV1 contract call");
assert!(self.owncoin.note.value != 0);
debug!("Building Consensus::UnstakeRequestV1 anonymous input");
let merkle_path = self.tree.witness(self.owncoin.leaf_position, 0).unwrap();
let input = ConsensusBurnInputInfo {
leaf_position: self.owncoin.leaf_position,
merkle_path,
secret: self.owncoin.secret,
note: self.owncoin.note.clone(),
value_blind: input_value_blind,
};
debug!("Building Consensus::UnstakeRequestV1 anonymous output");
// The output's secret key is derived from the old secret key
let output_secret_key = poseidon_hash([SECRET_KEY_PREFIX, self.owncoin.secret.inner()]);
let output_keypair = Keypair::new(SecretKey::from(output_secret_key));
let output_serial =
poseidon_hash([SERIAL_PREFIX, self.owncoin.secret.inner(), self.owncoin.note.serial]);
// We create a new random keypair for the output
//let output_keypair = Keypair::random(&mut OsRng);
let output = ConsensusMintOutputInfo {
value: self.owncoin.note.value,
epoch: self.epoch,
public_key: output_keypair.public,
value_blind: input.value_blind,
serial: output_serial,
};
info!("Building Consensus::UnstakeRequestV1 Burn ZK proof");
let (burn_proof, public_inputs, signature_secret) =
create_consensus_burn_proof(&self.burn_zkbin, &self.burn_pk, &input)?;
let tx_input = ConsensusInput {
epoch: self.owncoin.note.epoch,
value_commit: public_inputs.value_commit,
nullifier: public_inputs.nullifier,
merkle_root: public_inputs.merkle_root,
signature_public: public_inputs.signature_public,
};
info!("Building Consensus::UnstakeRequestV1 Mint ZK proof");
let (mint_proof, public_inputs) =
create_consensus_mint_proof(&self.mint_zkbin, &self.mint_pk, &output)?;
// Encrypted note
let note = ConsensusNote {
serial: output_serial,
value: output.value,
epoch: output.epoch,
value_blind: input.value_blind,
reward: 0,
reward_blind: pallas::Scalar::ZERO,
};
let encrypted_note = AeadEncryptedNote::encrypt(&note, &output.public_key, &mut OsRng)?;
let tx_output = ConsensusOutput {
value_commit: public_inputs.value_commit,
coin: public_inputs.coin,
note: encrypted_note,
};
// We now fill this with necessary stuff
let params = ConsensusUnstakeReqParamsV1 { input: tx_input, output: tx_output };
// Construct debris
let debris = ConsensusUnstakeRequestCallDebris {
params,
proofs: vec![burn_proof, mint_proof],
keypair: output_keypair,
signature_secret,
};
Ok(debris)
}
}

View File

@@ -1,111 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{Proof, ProvingKey},
zkas::ZkBinary,
Result,
};
use darkfi_money_contract::{
client::ConsensusOwnCoin,
model::{ConsensusInput, ConsensusUnstakeParamsV1},
};
use darkfi_sdk::{
crypto::{pasta_prelude::*, MerkleTree, SecretKey},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::client::common::{create_consensus_burn_proof, ConsensusBurnInputInfo};
pub struct ConsensusUnstakeCallDebris {
/// Payload params
pub params: ConsensusUnstakeParamsV1,
/// ZK proofs
pub proofs: Vec<Proof>,
/// Secret key used to sign the transaction
pub signature_secret: SecretKey,
/// Value blind to be used in the `Money::UnstakeV1` call
pub value_blind: pallas::Scalar,
}
/// Struct holding necessary information to build a `Consensus::UnstakeV1` contract call.
pub struct ConsensusUnstakeCallBuilder {
/// `ConsensusOwnCoin` we're given to use in this builder
pub owncoin: ConsensusOwnCoin,
/// Merkle tree of coins used to create inclusion proofs
pub tree: MerkleTree,
/// `ConsensusBurn_V1` zkas circuit ZkBinary
pub burn_zkbin: ZkBinary,
/// Proving key for the `ConsensusBurn_V1` zk circuit
pub burn_pk: ProvingKey,
}
impl ConsensusUnstakeCallBuilder {
pub fn build(&self) -> Result<ConsensusUnstakeCallDebris> {
let input_value_blind = pallas::Scalar::random(&mut OsRng);
self.build_with_params(input_value_blind)
}
pub fn build_with_params(
&self,
input_value_blind: pallas::Scalar,
) -> Result<ConsensusUnstakeCallDebris> {
info!("Building Consensus::UnstakeV1 contract call");
assert!(self.owncoin.note.value != 0);
debug!("Building Consensus::UnstakeV1 anonymous input");
let merkle_path = self.tree.witness(self.owncoin.leaf_position, 0).unwrap();
let input = ConsensusBurnInputInfo {
leaf_position: self.owncoin.leaf_position,
merkle_path,
secret: self.owncoin.secret,
note: self.owncoin.note.clone(),
value_blind: input_value_blind,
};
info!("Building Consensus::UnstakeV1 Burn ZK proof");
let (proof, public_inputs, signature_secret) =
create_consensus_burn_proof(&self.burn_zkbin, &self.burn_pk, &input)?;
let tx_input = ConsensusInput {
epoch: self.owncoin.note.epoch,
value_commit: public_inputs.value_commit,
nullifier: public_inputs.nullifier,
merkle_root: public_inputs.merkle_root,
signature_public: public_inputs.signature_public,
};
// We now fill this with necessary stuff
let params = ConsensusUnstakeParamsV1 { input: tx_input };
// Construct debris
let debris = ConsensusUnstakeCallDebris {
params,
proofs: vec![proof],
signature_secret,
value_blind: input.value_blind,
};
Ok(debris)
}
}

View File

@@ -1,264 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
model::{ConsensusStakeUpdateV1, ConsensusUnstakeUpdateV1},
CONSENSUS_CONTRACT_DB_VERSION, CONSENSUS_CONTRACT_INFO_TREE,
CONSENSUS_CONTRACT_NULLIFIERS_TREE, CONSENSUS_CONTRACT_STAKED_COINS_TREE,
CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE,
CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COIN_MERKLE_TREE,
CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE,
};
use darkfi_sdk::{
crypto::{ContractId, MerkleTree},
dark_tree::DarkLeaf,
db::{db_init, db_lookup, db_set, zkas_db_set},
error::{ContractError, ContractResult},
msg,
util::set_return_data,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
model::{ConsensusGenesisStakeUpdateV1, ConsensusProposalUpdateV1},
ConsensusFunction,
};
/// `Consensus::GenesisStake` functions
mod genesis_stake_v1;
use genesis_stake_v1::{
consensus_genesis_stake_get_metadata_v1, consensus_genesis_stake_process_instruction_v1,
consensus_genesis_stake_process_update_v1,
};
/// `Consensus::Stake` functions
mod stake_v1;
use stake_v1::{
consensus_stake_get_metadata_v1, consensus_stake_process_instruction_v1,
consensus_stake_process_update_v1,
};
/// `Consensus::ProposalV1` functions
mod proposal_v1;
use proposal_v1::{
consensus_proposal_get_metadata_v1, consensus_proposal_process_instruction_v1,
consensus_proposal_process_update_v1,
};
/// `Consensus::UnstakeRequest` functions
mod unstake_request_v1;
use unstake_request_v1::{
consensus_unstake_request_get_metadata_v1, consensus_unstake_request_process_instruction_v1,
consensus_unstake_request_process_update_v1,
};
/// `Consensus::Unstake` functions
mod unstake_v1;
use unstake_v1::{
consensus_unstake_get_metadata_v1, consensus_unstake_process_instruction_v1,
consensus_unstake_process_update_v1,
};
darkfi_sdk::define_contract!(
init: init_contract,
exec: process_instruction,
apply: process_update,
metadata: get_metadata
);
/// This entrypoint function runs when the contract is (re)deployed and initialized.
/// We use this function to initialize all the necessary databases and prepare them
/// with initial data if necessary. This is also the place where we bundle the zkas
/// circuits that are to be used with functions provided by the contract.
fn init_contract(cid: ContractId, _ix: &[u8]) -> ContractResult {
// zkas circuits can simply be embedded in the wasm and set up by using
// respective db functions. The special `zkas db` operations exist in
// order to be able to verify the circuits being bundled and enforcing
// a specific tree inside sled, and also creation of VerifyingKey.
let consensus_mint_v1_bincode = include_bytes!("../proof/consensus_mint_v1.zk.bin");
let consensus_burn_v1_bincode = include_bytes!("../proof/consensus_burn_v1.zk.bin");
let consensus_proposal_v1_bincode = include_bytes!("../proof/consensus_proposal_v1.zk.bin");
// For that, we use `zkas_db_set` and pass in the bincode.
zkas_db_set(&consensus_mint_v1_bincode[..])?;
zkas_db_set(&consensus_burn_v1_bincode[..])?;
zkas_db_set(&consensus_proposal_v1_bincode[..])?;
// Set up a database tree to hold Merkle roots of all staked coins
// k=MerkleNode, v=[]
if db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE).is_err() {
db_init(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
}
// Set up a database tree to hold all staked coins ever seen
// k=Coin, v=[]
if db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE).is_err() {
db_init(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
}
// Set up a database tree to hold nullifiers of all spent coins
// k=Nullifier, v=[]
if db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE).is_err() {
db_init(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
}
// Set up a database tree to hold Merkle roots of all unstaked coins
// k=MerkleNode, v=[]
if db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE).is_err() {
db_init(cid, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE)?;
}
// Set up a database tree to hold all unstaked coins ever seen
// k=Coin, v=[]
if db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE).is_err() {
db_init(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE)?;
}
// Set up a database tree for arbitrary data
let info_db = match db_lookup(cid, CONSENSUS_CONTRACT_INFO_TREE) {
Ok(v) => v,
Err(_) => {
let info_db = db_init(cid, CONSENSUS_CONTRACT_INFO_TREE)?;
// Create the Merkle tree for staked and unstaked coins.
// We can simply reuse the same empty tree twice.
let coin_tree = MerkleTree::new(100);
let mut coin_tree_data = vec![];
coin_tree_data.write_u32(0)?;
coin_tree.encode(&mut coin_tree_data)?;
db_set(info_db, CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE, &coin_tree_data)?;
db_set(info_db, CONSENSUS_CONTRACT_UNSTAKED_COIN_MERKLE_TREE, &coin_tree_data)?;
info_db
}
};
// Update db version
db_set(info_db, CONSENSUS_CONTRACT_DB_VERSION, &serialize(&env!("CARGO_PKG_VERSION")))?;
Ok(())
}
/// This function is used by the wasm VM's host to fetch the necessary metadata
/// for verifying signatures and zk proofs. The payload given here are all the
/// contract calls in the transaction.
fn get_metadata(cid: ContractId, ix: &[u8]) -> ContractResult {
let (call_idx, calls): (u32, Vec<DarkLeaf<ContractCall>>) = deserialize(ix)?;
if call_idx >= calls.len() as u32 {
msg!("Error: call_idx >= calls.len()");
return Err(ContractError::Internal)
}
match ConsensusFunction::try_from(calls[call_idx as usize].data.data[0])? {
ConsensusFunction::GenesisStakeV1 => {
// We pass everything into the correct function, and it will return
// the metadata for us, which we can then copy into the host with
// the `set_return_data` function. On the host, this metadata will
// be used to do external verification (zk proofs, and signatures).
let metadata = consensus_genesis_stake_get_metadata_v1(cid, call_idx, calls)?;
Ok(set_return_data(&metadata)?)
}
ConsensusFunction::StakeV1 => {
let metadata = consensus_stake_get_metadata_v1(cid, call_idx, calls)?;
Ok(set_return_data(&metadata)?)
}
ConsensusFunction::ProposalV1 => {
let metadata = consensus_proposal_get_metadata_v1(cid, call_idx, calls)?;
Ok(set_return_data(&metadata)?)
}
ConsensusFunction::UnstakeRequestV1 => {
let metadata = consensus_unstake_request_get_metadata_v1(cid, call_idx, calls)?;
Ok(set_return_data(&metadata)?)
}
ConsensusFunction::UnstakeV1 => {
let metadata = consensus_unstake_get_metadata_v1(cid, call_idx, calls)?;
Ok(set_return_data(&metadata)?)
}
}
}
/// This function verifies a state transition and produces a state update
/// if everything is successful. This step should happen **after** the host
/// has successfully verified the metadata from `get_metadata()`.
fn process_instruction(cid: ContractId, ix: &[u8]) -> ContractResult {
let (call_idx, calls): (u32, Vec<DarkLeaf<ContractCall>>) = deserialize(ix)?;
if call_idx >= calls.len() as u32 {
msg!("Error: call_idx >= calls.len()");
return Err(ContractError::Internal)
}
match ConsensusFunction::try_from(calls[call_idx as usize].data.data[0])? {
ConsensusFunction::GenesisStakeV1 => {
// Again, we pass everything into the correct function.
// If it executes successfully, we'll get a state update
// which we can copy into the host using `set_return_data`.
// This update can then be written with `process_update()`
// if everything is in order.
let update_data = consensus_genesis_stake_process_instruction_v1(cid, call_idx, calls)?;
Ok(set_return_data(&update_data)?)
}
ConsensusFunction::StakeV1 => {
let update_data = consensus_stake_process_instruction_v1(cid, call_idx, calls)?;
Ok(set_return_data(&update_data)?)
}
ConsensusFunction::ProposalV1 => {
let update_data = consensus_proposal_process_instruction_v1(cid, call_idx, calls)?;
Ok(set_return_data(&update_data)?)
}
ConsensusFunction::UnstakeRequestV1 => {
let update_data =
consensus_unstake_request_process_instruction_v1(cid, call_idx, calls)?;
Ok(set_return_data(&update_data)?)
}
ConsensusFunction::UnstakeV1 => {
let update_data = consensus_unstake_process_instruction_v1(cid, call_idx, calls)?;
Ok(set_return_data(&update_data)?)
}
}
}
/// This function attempts to write a given state update provided the previous steps
/// of the contract call execution all were successful. It's the last in line, and
/// assumes that the transaction/call was successful. The payload given to the function
/// is the update data retrieved from `process_instruction()`.
fn process_update(cid: ContractId, update_data: &[u8]) -> ContractResult {
match ConsensusFunction::try_from(update_data[0])? {
ConsensusFunction::GenesisStakeV1 => {
let update: ConsensusGenesisStakeUpdateV1 = deserialize(&update_data[1..])?;
Ok(consensus_genesis_stake_process_update_v1(cid, update)?)
}
ConsensusFunction::StakeV1 => {
let update: ConsensusStakeUpdateV1 = deserialize(&update_data[1..])?;
Ok(consensus_stake_process_update_v1(cid, update)?)
}
ConsensusFunction::ProposalV1 => {
let update: ConsensusProposalUpdateV1 = deserialize(&update_data[1..])?;
Ok(consensus_proposal_process_update_v1(cid, update)?)
}
ConsensusFunction::UnstakeRequestV1 => {
let update: ConsensusProposalUpdateV1 = deserialize(&update_data[1..])?;
Ok(consensus_unstake_request_process_update_v1(cid, update)?)
}
ConsensusFunction::UnstakeV1 => {
let update: ConsensusUnstakeUpdateV1 = deserialize(&update_data[1..])?;
Ok(consensus_unstake_process_update_v1(cid, update)?)
}
}
}

View File

@@ -1,164 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
error::MoneyError, CONSENSUS_CONTRACT_INFO_TREE, CONSENSUS_CONTRACT_STAKED_COINS_TREE,
CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT, CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE,
CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE,
CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1,
};
use darkfi_sdk::{
crypto::{pasta_prelude::*, pedersen_commitment_u64, ContractId, MerkleNode, DARK_TOKEN_ID},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
merkle_add, msg,
pasta::pallas,
util::get_verifying_slot,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
model::{ConsensusGenesisStakeParamsV1, ConsensusGenesisStakeUpdateV1},
ConsensusFunction,
};
/// `get_metadata` function for `Consensus::GenesisStakeV1`
///
/// Here we gather the signature pubkey from the clear input in order
/// to verify the transaction, and we extract the necessary public inputs
/// that go into the `ConsensusMint_V1` proof verification.
pub(crate) fn consensus_genesis_stake_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusGenesisStakeParamsV1 = deserialize(&self_.data[1..])?;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// Public keys for the transaction signatures we have to verify
let signature_pubkeys = vec![params.input.signature_public];
// Genesis stake only happens on epoch 0
let epoch = pallas::Base::ZERO;
// Grab the pedersen commitment from the anonymous output
let value_coords = params.output.value_commit.to_affine().coordinates().unwrap();
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string(),
vec![epoch, params.output.coin.inner(), *value_coords.x(), *value_coords.y()],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Consensus::GenesisStakeV1`
pub(crate) fn consensus_genesis_stake_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusGenesisStakeParamsV1 = deserialize(&self_.data[1..])?;
// Verify this contract call is verified on the genesis slot (0).
let verifying_slot = get_verifying_slot();
if verifying_slot != 0 {
msg!(
"[ConsensusGenesisStakeV1] Error: Call is executed for slot {}, not genesis",
verifying_slot
);
return Err(MoneyError::GenesisCallNonGenesisSlot.into())
}
// Only DARK_TOKEN_ID can be minted and staked on genesis slot.
if params.input.token_id != *DARK_TOKEN_ID {
msg!("[ConsensusGenesisStakeV1] Error: Clear input used non-native token");
return Err(MoneyError::TransferClearInputNonNativeToken.into())
}
// Access the necessary databases where there is information to
// validate this state transition.
let staked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let unstaked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE)?;
// Check that the coin from the output hasn't existed before.
let coin = serialize(&params.output.coin);
if db_contains_key(staked_coins_db, &coin)? {
msg!("[ConsensusGenesisStakeV1] Error: Output coin was already seen in the set of staked coins");
return Err(MoneyError::DuplicateCoin.into())
}
// Check that the coin from the output hasn't existed before in unstake set.
if db_contains_key(unstaked_coins_db, &coin)? {
msg!("[ConsensusGenesisStakeV1] Error: Output coin was already seen in the set of unstaked coins");
return Err(MoneyError::DuplicateCoin.into())
}
// Verify that the value commitments match. In here we just confirm
// that the clear input and the anon output have the same commitment.
if pedersen_commitment_u64(params.input.value, params.input.value_blind) !=
params.output.value_commit
{
msg!("[ConsensusGenesisStakeV1] Error: Value commitment mismatch");
return Err(MoneyError::ValueMismatch.into())
}
// Create a state update.
let update = ConsensusGenesisStakeUpdateV1 { coin: params.output.coin };
let mut update_data = vec![];
update_data.write_u8(ConsensusFunction::StakeV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Consensus::GenesisStakeV1`
pub(crate) fn consensus_genesis_stake_process_update_v1(
cid: ContractId,
update: ConsensusGenesisStakeUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let info_db = db_lookup(cid, CONSENSUS_CONTRACT_INFO_TREE)?;
let staked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let staked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
msg!("[ConsensusGenesisStakeV1] Adding new coin to the set");
db_set(staked_coins_db, &serialize(&update.coin), &[])?;
msg!("[ConsensusGenesisStakeV1] Adding new coin to the Merkle tree");
let coins: Vec<_> = vec![MerkleNode::from(update.coin.inner())];
merkle_add(
info_db,
staked_coin_roots_db,
CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE,
&coins,
)?;
Ok(())
}

View File

@@ -1,249 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
error::MoneyError, CONSENSUS_CONTRACT_INFO_TREE, CONSENSUS_CONTRACT_NULLIFIERS_TREE,
CONSENSUS_CONTRACT_STAKED_COINS_TREE, CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE,
CONSENSUS_CONTRACT_ZKAS_PROPOSAL_NS_V1,
};
use darkfi_sdk::{
blockchain::Slot,
crypto::{pasta_prelude::*, pedersen_commitment_u64, poseidon_hash, ContractId, MerkleNode},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
merkle_add, msg,
pasta::{group::ff::FromUniformBytes, pallas},
util::{get_slot, get_verifying_slot, get_verifying_slot_epoch},
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
error::ConsensusError,
model::{
ConsensusProposalParamsV1, ConsensusProposalUpdateV1, GRACE_PERIOD, HEADSTART,
MU_RHO_PREFIX, MU_Y_PREFIX, REWARD, REWARD_PALLAS,
},
ConsensusFunction,
};
/// `get_metadata` function for `Consensus::ProposalV1`
pub(crate) fn consensus_proposal_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusProposalParamsV1 = deserialize(&self_.data[1..])?;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// Public keys for the transaction signatures we have to verify.
// The transaction should be signed with the same key that is used for
// the VRF proof, and also constrained in ZK by enforcing its derivation.
let signature_pubkeys = vec![params.input.signature_public];
// Grab the public key coordinates for the burnt coin
let (pub_x, pub_y) = &params.input.signature_public.xy();
// Grab the pedersen commitment for the burnt value
let input_value_coords = &params.input.value_commit.to_affine().coordinates().unwrap();
// Grab the pedersen commitment for the minted value
let output_value_coords = &params.output.value_commit.to_affine().coordinates().unwrap();
// Grab the slot to validate consensus params against
let v_slot = get_verifying_slot();
let Some(slot) = get_slot(v_slot)? else {
msg!("[ConsensusProposalV1] Error: Missing slot {} from db", v_slot);
return Err(ConsensusError::ProposalMissingSlot.into())
};
let slot: Slot = deserialize(&slot)?;
let slot_fp = pallas::Base::from(slot.id);
// Verify proposal extends a known fork
if !slot.previous.last_hashes.contains(&params.fork_hash) {
msg!("[ConsensusProposalV1] Error: Proposal extends unknown fork {}", params.fork_hash);
return Err(ConsensusError::ProposalExtendsUnknownFork.into())
}
// Verify sequence is correct
if !slot.previous.second_to_last_hashes.contains(&params.fork_previous_hash) {
let fork_prev = &params.fork_previous_hash;
msg!("[ConsensusProposalV1] Error: Proposal extends unknown fork {}", fork_prev);
return Err(ConsensusError::ProposalExtendsUnknownFork.into())
}
// Construct VRF input
let mut vrf_input = Vec::with_capacity(32 + blake3::OUT_LEN + 32);
vrf_input.extend_from_slice(&slot.last_nonce.to_repr());
vrf_input.extend_from_slice(params.fork_previous_hash.as_bytes());
vrf_input.extend_from_slice(&slot_fp.to_repr());
// Verify VRF proof
if !params.vrf_proof.verify(params.input.signature_public, &vrf_input) {
msg!("[ConsensusProposalV1] Error: VRF proof couldn't be verified");
return Err(ConsensusError::ProposalErroneousVrfProof.into())
}
// Construct eta
let mut eta = [0u8; 64];
eta[..blake3::OUT_LEN].copy_from_slice(params.vrf_proof.hash_output().as_bytes());
let eta = pallas::Base::from_uniform_bytes(&eta);
// Calculate election seeds
let mu_y = poseidon_hash([MU_Y_PREFIX, eta, slot_fp]);
let mu_rho = poseidon_hash([MU_RHO_PREFIX, eta, slot_fp]);
// Grab sigmas from slot
let (sigma1, sigma2) = (slot.pid.sigma1, slot.pid.sigma2);
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_PROPOSAL_NS_V1.to_string(),
vec![
params.input.nullifier.inner(),
pallas::Base::from(params.input.epoch),
*pub_x,
*pub_y,
params.input.merkle_root.inner(),
*input_value_coords.x(),
*input_value_coords.y(),
REWARD_PALLAS,
*output_value_coords.x(),
*output_value_coords.y(),
params.output.coin.inner(),
mu_y,
params.y,
mu_rho,
params.rho,
sigma1,
sigma2,
HEADSTART,
],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Consensus::ProposalV1`
pub(crate) fn consensus_proposal_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusProposalParamsV1 = deserialize(&self_.data[1..])?;
let input = &params.input;
let output = &params.output;
// Access the necessary databases where there is information to
// validate this state transition.
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let staked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let staked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
msg!("[ConsensusProposalV1] Validating anonymous input");
// The coin has passed through the grace period and is allowed to propose.
// Important to note is that the epoch has to be enforced through ZK. The
// only time when the epoch of the burned coin can be zero is when there
// was a previous winning proposal, or the coin was minted at genesis.
if input.epoch != 0 && get_verifying_slot_epoch() - input.epoch <= GRACE_PERIOD {
msg!("[ConsensusProposalV1] Error: Coin is not allowed to make proposals yet");
return Err(ConsensusError::CoinStillInGracePeriod.into())
}
// The Merkle root is used to know whether this is a coin that
// existed in a previous state.
if !db_contains_key(staked_coin_roots_db, &serialize(&input.merkle_root))? {
msg!("[ConsensusProposalV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifier should not already exist. It is the double-spend protection.
if db_contains_key(nullifiers_db, &serialize(&input.nullifier))? {
msg!("[ConsensusProposalV1] Error: Duplicate nullifier found");
return Err(MoneyError::DuplicateNullifier.into())
}
// Verify value commits match between burnt and mint inputs.
// Here we check that input+reward == output
let mut valcom_total = pallas::Point::identity();
valcom_total += input.value_commit;
valcom_total += pedersen_commitment_u64(REWARD, params.reward_blind);
valcom_total -= output.value_commit;
if valcom_total != pallas::Point::identity() {
msg!("[ConsensusProposalV1] Error: Value commitments do not result in identity");
return Err(MoneyError::ValueMismatch.into())
}
// Newly created coin for this call is in the output. Here we check that
// it hasn't existed before.
if db_contains_key(staked_coins_db, &serialize(&output.coin))? {
msg!("[ConsensusProposalV1] Error: Duplicate coin found in output");
return Err(MoneyError::DuplicateCoin.into())
}
// At this point the state transition has passed, so we create a state update
let update = ConsensusProposalUpdateV1 { nullifier: input.nullifier, coin: output.coin };
let mut update_data = vec![];
update_data.write_u8(ConsensusFunction::ProposalV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Consensus::ProposalV1`
pub(crate) fn consensus_proposal_process_update_v1(
cid: ContractId,
update: ConsensusProposalUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let info_db = db_lookup(cid, CONSENSUS_CONTRACT_INFO_TREE)?;
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let staked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let staked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
msg!("[ConsensusProposalV1] Adding new nullifier to the set");
db_set(nullifiers_db, &serialize(&update.nullifier), &[])?;
msg!("[ConsensusProposalV1] Adding new coin to the staked coin set");
db_set(staked_coins_db, &serialize(&update.coin), &[])?;
msg!("[ConsensusProposalV1] Adding new coin to the Merkle tree");
let coins: Vec<_> = vec![MerkleNode::from(update.coin.inner())];
merkle_add(
info_db,
staked_coin_roots_db,
CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE,
&coins,
)?;
Ok(())
}

View File

@@ -1,202 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
error::MoneyError,
model::{ConsensusStakeParamsV1, ConsensusStakeUpdateV1, MoneyStakeParamsV1},
MoneyFunction, CONSENSUS_CONTRACT_INFO_TREE, CONSENSUS_CONTRACT_STAKED_COINS_TREE,
CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT, CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE,
CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE,
CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1, MONEY_CONTRACT_COIN_ROOTS_TREE,
MONEY_CONTRACT_NULLIFIERS_TREE,
};
use darkfi_sdk::{
crypto::{pasta_prelude::*, ContractId, MerkleNode, PublicKey, MONEY_CONTRACT_ID},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
merkle_add, msg,
pasta::pallas,
util::get_verifying_slot_epoch,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::ConsensusFunction;
/// `get_metadata` function for `Consensus::StakeV1`
pub(crate) fn consensus_stake_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusStakeParamsV1 = deserialize(&self_.data[1..])?;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// We are already verifying this input's signature through `Money::Stake`,
// so it's redundant to verify it here again. However it's important to
// compare it with the previous call when we do the state transition to
// ensure they're the same.
let signature_pubkeys: Vec<PublicKey> = vec![];
// Grab the minting epoch of the verifying slot
let epoch = get_verifying_slot_epoch();
// Grab the pedersen commitment from the anonymous output
let output = &params.output;
let value_coords = output.value_commit.to_affine().coordinates().unwrap();
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string(),
vec![epoch.into(), output.coin.inner(), *value_coords.x(), *value_coords.y()],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Consensus::StakeV1`
pub(crate) fn consensus_stake_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize];
let params: ConsensusStakeParamsV1 = deserialize(&self_.data.data[1..])?;
// Check child call is money contract
if call_idx == 0 {
msg!("[ConsensusStakeV1] Error: child_call_idx will be out of bounds");
return Err(MoneyError::CallIdxOutOfBounds.into())
}
let child_call_indexes = &self_.children_indexes;
if child_call_indexes.len() != 1 {
msg!("[ConsensusStakeV1] Error: child_call_idx is missing");
return Err(MoneyError::StakeChildCallNotMoneyContract.into())
}
let child_call_idx = child_call_indexes[0];
// Verify child call corresponds to Money::StakeV1
let child = &calls[child_call_idx].data;
if child.contract_id.inner() != MONEY_CONTRACT_ID.inner() {
msg!("[ConsensusStakeV1] Error: Child contract call is not money contract");
return Err(MoneyError::StakeChildCallNotMoneyContract.into())
}
if child.data[0] != MoneyFunction::StakeV1 as u8 {
msg!("[ConsensusStakeV1] Error: Child call function mismatch");
return Err(MoneyError::ChildCallFunctionMismatch.into())
}
// Verify that the child call's input is the same as this one's
let child_params: MoneyStakeParamsV1 = deserialize(&child.data[1..])?;
let child_input = &child_params.input;
if child_input != &params.input {
msg!("[ConsensusStakeV1] Error: Child call input mismatch");
return Err(MoneyError::ChildCallInputMismatch.into())
}
// Access the necessary databases where there is information to
// validate this state transition.
let consensus_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let consensus_unstaked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE)?;
let money_nullifiers_db = db_lookup(*MONEY_CONTRACT_ID, MONEY_CONTRACT_NULLIFIERS_TREE)?;
let money_coin_roots_db = db_lookup(*MONEY_CONTRACT_ID, MONEY_CONTRACT_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
msg!("[ConsensusStakeV1] Validating anonymous output");
let input = &params.input;
let output = &params.output;
// Verify value commitments match
if output.value_commit != input.value_commit {
msg!("[ConsensusStakeV1] Error: Value commitments do not match");
return Err(MoneyError::ValueMismatch.into())
}
// The Merkle root is used to know whether this is a coin that
// existed in a previous state.
if !db_contains_key(money_coin_roots_db, &serialize(&input.merkle_root))? {
msg!("[ConsensusStakeV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifiers should not already exist. It is the double-mint protection.
if !db_contains_key(money_nullifiers_db, &serialize(&input.nullifier))? {
msg!("[ConsensusStakeV1] Error: Missing nullifier");
return Err(MoneyError::StakeMissingNullifier.into())
}
// Newly created coin for this call is in the output. Here we gather it,
// and we also check that it hasn't existed before.
let coin = serialize(&output.coin);
if db_contains_key(consensus_coins_db, &coin)? {
msg!("[ConsensusStakeV1] Error: Duplicate coin found in output");
return Err(MoneyError::DuplicateCoin.into())
}
// Check that the coin hasn't existed before in unstake set.
if db_contains_key(consensus_unstaked_coins_db, &coin)? {
msg!("[ConsensusStakeV1] Error: Unstaked coin found in output");
return Err(MoneyError::DuplicateCoin.into())
}
// Create a state update.
let update = ConsensusStakeUpdateV1 { coin: output.coin };
let mut update_data = vec![];
update_data.write_u8(ConsensusFunction::StakeV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Consensus::StakeV1`
pub(crate) fn consensus_stake_process_update_v1(
cid: ContractId,
update: ConsensusStakeUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let info_db = db_lookup(cid, CONSENSUS_CONTRACT_INFO_TREE)?;
let staked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COINS_TREE)?;
let staked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
msg!("[ConsensusStakeV1] Adding new coin to the set");
db_set(staked_coins_db, &serialize(&update.coin), &[])?;
msg!("[ConsensusStakeV1] Adding new coin to the Merkle tree");
let coins: Vec<_> = vec![MerkleNode::from(update.coin.inner())];
merkle_add(
info_db,
staked_coin_roots_db,
CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE,
&coins,
)?;
Ok(())
}

View File

@@ -1,193 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
error::MoneyError, model::ConsensusUnstakeReqParamsV1, CONSENSUS_CONTRACT_INFO_TREE,
CONSENSUS_CONTRACT_NULLIFIERS_TREE, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE,
CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_UNSTAKED_COIN_MERKLE_TREE, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE,
CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1, CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1,
};
use darkfi_sdk::{
crypto::{pasta_prelude::*, ContractId, MerkleNode},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
merkle_add, msg,
pasta::pallas,
util::get_verifying_slot_epoch,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
error::ConsensusError,
model::{ConsensusProposalUpdateV1, GRACE_PERIOD},
ConsensusFunction,
};
/// `get_metadata` function for `Consensus::UnstakeRequestV1`
pub(crate) fn consensus_unstake_request_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusUnstakeReqParamsV1 = deserialize(&self_.data[1..])?;
let input = &params.input;
let output = &params.output;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// Public keys for the transaction signatures we have to verify
let signature_pubkeys = vec![input.signature_public];
// Grab the pedersen commitments and signature pubkeys from the
// anonymous input
let value_coords = input.value_commit.to_affine().coordinates().unwrap();
let (sig_x, sig_y) = input.signature_public.xy();
// It is very important that these are in the same order as the
// `constrain_instance` calls in the zkas code.
// Otherwise verification will fail.
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1.to_string(),
vec![
input.nullifier.inner(),
input.epoch.into(),
sig_x,
sig_y,
input.merkle_root.inner(),
*value_coords.x(),
*value_coords.y(),
],
));
// Grab the minting epoch of the verifying slot
let epoch = get_verifying_slot_epoch();
// Grab the pedersen commitment from the anonymous output
let value_coords = output.value_commit.to_affine().coordinates().unwrap();
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string(),
vec![epoch.into(), output.coin.inner(), *value_coords.x(), *value_coords.y()],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Consensus::UnstakeRequestV1`
pub(crate) fn consensus_unstake_request_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusUnstakeReqParamsV1 = deserialize(&self_.data[1..])?;
let input = &params.input;
let output = &params.output;
// Access the necessary databases where there is information to
// validate this state transition.
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let unstaked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE)?;
let staked_coins_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
msg!("[ConsensusUnstakeRequestV1] Validating anonymous input");
// The coin has passed through the grace period and is allowed to request unstake.
if input.epoch != 0 && get_verifying_slot_epoch() - input.epoch <= GRACE_PERIOD {
msg!("[ConsensusUnstakeRequestV1] Error: Coin is not allowed to request unstake yet");
return Err(ConsensusError::CoinStillInGracePeriod.into())
}
// The Merkle root is used to know whether this is a coin that
// existed in a previous state.
if !db_contains_key(staked_coins_roots_db, &serialize(&input.merkle_root))? {
msg!("[ConsensusUnstakeRequestV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifiers should not already exist. It is the double-spend protection.
if db_contains_key(nullifiers_db, &serialize(&input.nullifier))? {
msg!("[ConsensusUnstakeRequestV1] Error: Duplicate nullifier found");
return Err(MoneyError::DuplicateNullifier.into())
}
msg!("[ConsensusUnstakeRequestV1] Validating anonymous output");
// Verify value commits match
if output.value_commit != input.value_commit {
msg!("[ConsensusUnstakeRequestV1] Error: Value commitments do not match");
return Err(MoneyError::ValueMismatch.into())
}
// Newly created coin for this call is in the output. Here we gather it,
// and we also check that it hasn't existed before.
if db_contains_key(unstaked_coins_db, &serialize(&output.coin))? {
msg!("[ConsensusUnstakeRequestV1] Error: Duplicate coin found in output");
return Err(MoneyError::DuplicateCoin.into())
}
// At this point the state transition has passed, so we create a state update
let update = ConsensusProposalUpdateV1 { nullifier: input.nullifier, coin: output.coin };
let mut update_data = vec![];
update_data.write_u8(ConsensusFunction::UnstakeRequestV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Consensus::UnstakeRequestV1`
pub(crate) fn consensus_unstake_request_process_update_v1(
cid: ContractId,
update: ConsensusProposalUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let info_db = db_lookup(cid, CONSENSUS_CONTRACT_INFO_TREE)?;
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let unstaked_coins_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE)?;
let unstaked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE)?;
msg!("[ConsensusUnstakeRequestV1] Adding new nullifier to the set");
db_set(nullifiers_db, &serialize(&update.nullifier), &[])?;
msg!("[ConsensusUnstakeRequestV1] Adding new coin to the unstaked coins set");
db_set(unstaked_coins_db, &serialize(&update.coin), &[])?;
msg!("[ConsensusUnstakeRequestV1] Adding new coin to the unstaked coins Merkle tree");
let coins: Vec<_> = vec![MerkleNode::from(update.coin.inner())];
merkle_add(
info_db,
unstaked_coin_roots_db,
CONSENSUS_CONTRACT_UNSTAKED_COIN_LATEST_COIN_ROOT,
CONSENSUS_CONTRACT_UNSTAKED_COIN_MERKLE_TREE,
&coins,
)?;
Ok(())
}

View File

@@ -1,173 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::{
error::MoneyError,
model::{ConsensusUnstakeParamsV1, ConsensusUnstakeUpdateV1, MoneyUnstakeParamsV1},
MoneyFunction, CONSENSUS_CONTRACT_NULLIFIERS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE,
CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1,
};
use darkfi_sdk::{
crypto::{pasta_prelude::*, ContractId, MONEY_CONTRACT_ID},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
msg,
pasta::pallas,
util::get_verifying_slot_epoch,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{error::ConsensusError, model::GRACE_PERIOD, ConsensusFunction};
/// `get_metadata` function for `Consensus::UnstakeV1`
pub(crate) fn consensus_unstake_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: ConsensusUnstakeParamsV1 = deserialize(&self_.data[1..])?;
let input = &params.input;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// Public keys for the transaction signatures we have to verify
let signature_pubkeys = vec![input.signature_public];
// Grab the pedersen commitment and signature pubkey coordinates from the
// anonymous input
let value_coords = input.value_commit.to_affine().coordinates().unwrap();
let (sig_x, sig_y) = input.signature_public.xy();
// It is very important that these are in the same order as the
// `constrain_instance` calls in the zkas code.
// Otherwise verification will fail.
zk_public_inputs.push((
CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1.to_string(),
vec![
input.nullifier.inner(),
pallas::Base::from(input.epoch),
sig_x,
sig_y,
input.merkle_root.inner(),
*value_coords.x(),
*value_coords.y(),
],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Consensus::UnstakeV1`
pub(crate) fn consensus_unstake_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize];
let params: ConsensusUnstakeParamsV1 = deserialize(&self_.data.data[1..])?;
let input = &params.input;
// Access the necessary databases where there is information to
// validate this state transition.
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let unstaked_coin_roots_db = db_lookup(cid, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
// Check parent call is money contract
let parent_call_idx = self_.parent_index;
if parent_call_idx.is_none() {
msg!("[ConsensusUnstakeV1] Error: parent_call_idx is missing");
return Err(MoneyError::UnstakeParentCallNotMoneyContract.into())
}
let parent_call_idx = parent_call_idx.unwrap();
if parent_call_idx >= calls.len() {
msg!("[ConsensusUnstakeV1] Error: parent_call_idx out of bounds");
return Err(MoneyError::CallIdxOutOfBounds.into())
}
let parent = &calls[parent_call_idx].data;
if parent.contract_id.inner() != MONEY_CONTRACT_ID.inner() {
msg!("[ConsensusUnstakeV1] Error: Parent contract call is not money contract");
return Err(MoneyError::UnstakeParentCallNotMoneyContract.into())
}
// Verify parent call corresponds to Money::UnstakeV1
if parent.data[0] != MoneyFunction::UnstakeV1 as u8 {
msg!("[ConsensusUnstakeV1] Error: Parent call function mismatch");
return Err(MoneyError::ParentCallFunctionMismatch.into())
}
// Verify parent call input is the same as this calls input
let parent_params: MoneyUnstakeParamsV1 = deserialize(&parent.data[1..])?;
if input != &parent_params.input {
msg!("[ConsensusUnstakeV1] Error: Parent call input mismatch");
return Err(MoneyError::ParentCallInputMismatch.into())
}
msg!("[ConsensusUnstakeV1] Validating anonymous input");
// The coin has passed through the grace period and is allowed to get unstaked.
if get_verifying_slot_epoch() - input.epoch <= GRACE_PERIOD {
msg!("[ConsensusUnstakeV1] Error: Coin is not allowed to get unstaked yet");
return Err(ConsensusError::CoinStillInGracePeriod.into())
}
// The Merkle root is used to know whether this is an unstaked coin that
// existed in a previous state.
if !db_contains_key(unstaked_coin_roots_db, &serialize(&input.merkle_root))? {
msg!("[ConsensusUnstakeV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifiers should not already exist. It is the double-spend protection.
if db_contains_key(nullifiers_db, &serialize(&input.nullifier))? {
msg!("[ConsensusUnstakeV1] Error: Duplicate nullifier found");
return Err(MoneyError::DuplicateNullifier.into())
}
// At this point the state transition has passed, so we create a state update
let update = ConsensusUnstakeUpdateV1 { nullifier: input.nullifier };
let mut update_data = vec![];
update_data.write_u8(ConsensusFunction::UnstakeV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Consensus::UnstakeV1`
pub(crate) fn consensus_unstake_process_update_v1(
cid: ContractId,
update: ConsensusUnstakeUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let nullifiers_db = db_lookup(cid, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
msg!("[ConsensusUnstakeV1] Adding new nullifier to the set");
db_set(nullifiers_db, &serialize(&update.nullifier), &[])?;
Ok(())
}

View File

@@ -1,49 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::error::ContractError;
#[derive(Debug, Clone, thiserror::Error)]
pub enum ConsensusError {
#[error("Missing slot from db")]
ProposalMissingSlot,
#[error("Proposal extends unknown fork")]
ProposalExtendsUnknownFork,
#[error("Eta VRF proof couldn't be verified")]
ProposalErroneousVrfProof,
#[error("Coin is still in grace period")]
CoinStillInGracePeriod,
#[error("Coin doesn't exist in unstake set")]
CoinNotInUnstakeSet,
}
impl From<ConsensusError> for ContractError {
fn from(e: ConsensusError) -> Self {
match e {
ConsensusError::ProposalMissingSlot => Self::Custom(1),
ConsensusError::ProposalExtendsUnknownFork => Self::Custom(2),
ConsensusError::ProposalErroneousVrfProof => Self::Custom(3),
ConsensusError::CoinStillInGracePeriod => Self::Custom(4),
ConsensusError::CoinNotInUnstakeSet => Self::Custom(5),
}
}
}

View File

@@ -1,61 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! Smart contract implementing staking, unstaking and evolving
//! of consensus tokens.
use darkfi_sdk::error::ContractError;
/// Functions available in the contract
#[repr(u8)]
pub enum ConsensusFunction {
GenesisStakeV1 = 0x00,
StakeV1 = 0x01,
ProposalV1 = 0x02,
UnstakeRequestV1 = 0x03,
UnstakeV1 = 0x04,
}
impl TryFrom<u8> for ConsensusFunction {
type Error = ContractError;
fn try_from(b: u8) -> core::result::Result<Self, Self::Error> {
match b {
0x00 => Ok(Self::GenesisStakeV1),
0x01 => Ok(Self::StakeV1),
0x02 => Ok(Self::ProposalV1),
0x03 => Ok(Self::UnstakeRequestV1),
0x04 => Ok(Self::UnstakeV1),
_ => Err(ContractError::InvalidFunction),
}
}
}
/// Internal contract errors
pub mod error;
/// Call parameters definitions
pub mod model;
#[cfg(not(feature = "no-entrypoint"))]
/// WASM entrypoint functions
pub mod entrypoint;
#[cfg(feature = "client")]
/// Client API for interaction with this smart contract
pub mod client;

View File

@@ -1,138 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_money_contract::model::{ClearInput, Coin, ConsensusInput, ConsensusOutput, Output};
use darkfi_sdk::{
crypto::{ecvrf::VrfProof, Nullifier},
pasta::pallas,
};
use darkfi_serial::{SerialDecodable, SerialEncodable};
#[cfg(feature = "client")]
use darkfi_serial::async_trait;
/// Parameters for `Consensus::GenesisStake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusGenesisStakeParams
pub struct ConsensusGenesisStakeParamsV1 {
/// Clear input
pub input: ClearInput,
/// Anonymous output
pub output: ConsensusOutput,
}
// ANCHOR_END: ConsensusGenesisStakeParams
/// State update for `Consensus::GenesisStake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusGenesisStakeUpdate
pub struct ConsensusGenesisStakeUpdateV1 {
/// The newly minted coin
pub coin: Coin,
}
// ANCHOR_END: ConsensusGenesisStakeUpdate
/// Parameters for `Consensus::Proposal`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusProposalParams
pub struct ConsensusProposalParamsV1 {
/// Anonymous input
pub input: ConsensusInput,
/// Anonymous output
pub output: ConsensusOutput,
/// Reward value
pub reward: u64,
/// Revealed blinding factor for reward value
pub reward_blind: pallas::Scalar,
/// Extending fork last proposal/block hash
pub fork_hash: blake3::Hash,
/// Extending fork second to last proposal/block hash
pub fork_previous_hash: blake3::Hash,
/// VRF proof for eta calculation
pub vrf_proof: VrfProof,
/// Coin y
pub y: pallas::Base,
/// Lottery rho used
pub rho: pallas::Base,
}
// ANCHOR_END: ConsensusProposalParams
/// State update for `Consensus::Proposal`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusProposalUpdate
pub struct ConsensusProposalUpdateV1 {
/// Revealed nullifier
pub nullifier: Nullifier,
/// The newly minted coin
pub coin: Coin,
}
// ANCHOR_END: ConsensusProposalUpdate
/// Parameters for `Consensus::UnstakeRequest`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusUnstakeRequestParams
pub struct ConsensusUnstakeRequestParamsV1 {
/// Burnt token revealed info
pub input: ConsensusInput,
/// Anonymous output
pub output: Output,
}
// ANCHOR_END: ConsensusUnstakeRequestParams
// ======================================================================
// Consensus parameters configuration
// NOTE: In case of changes, always verify that the `pallas::Base` consts
// are correct using the `pallas_constants` tool in `script/research/`.
// ======================================================================
/// Number of slots in one epoch
pub const EPOCH_LENGTH: u64 = 10;
/// Slot time in seconds
pub const SLOT_TIME: u64 = 90;
// Stake/Unstake timelock length in epochs
pub const GRACE_PERIOD: u64 = calculate_grace_period();
/// Configured block reward (1 DRK == 1 * 10^8)
pub const REWARD: u64 = 100_000_000;
/// Reward `pallas::Base`, calculated by: pallas::Base::from(REWARD)
pub const REWARD_PALLAS: pallas::Base = pallas::Base::from_raw([100000000, 0, 0, 0]);
/// Serial prefix, calculated by: pallas::Base::from(2)
pub const SERIAL_PREFIX: pallas::Base = pallas::Base::from_raw([2, 0, 0, 0]);
/// Seed prefix, calculated by: pallas::Base::from(3)
pub const SEED_PREFIX: pallas::Base = pallas::Base::from_raw([3, 0, 0, 0]);
/// Secret key prefix, calculated by: pallas::Base::from(4)
pub const SECRET_KEY_PREFIX: pallas::Base = pallas::Base::from_raw([4, 0, 0, 0]);
/// Election seed y prefix, calculated by: pallas::Base::from(22)
pub const MU_Y_PREFIX: pallas::Base = pallas::Base::from_raw([22, 0, 0, 0]);
/// Election seed rho prefix, calculated by: pallas::Base::from(5)
pub const MU_RHO_PREFIX: pallas::Base = pallas::Base::from_raw([5, 0, 0, 0]);
/// Lottery headstart, calculated by: darkfi::consensus::LeadCoin::headstart()
pub const HEADSTART: pallas::Base = pallas::Base::from_raw([
11731824086999220879,
11830614503713258191,
737869762948382064,
46116860184273879,
]);
/// Auxiliary function to calculate the grace (locked) period, denominated
/// in epochs.
#[inline]
pub const fn calculate_grace_period() -> u64 {
// Grace period days target
const GRACE_PERIOD_DAYS: u64 = 2;
// 86400 seconds in a day
(86400 * GRACE_PERIOD_DAYS) / (SLOT_TIME * EPOCH_LENGTH)
}

View File

@@ -1,146 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! Integration test of consensus genesis staking and unstaking for Alice.
//!
//! We first stake Alice some native tokes on genesis slot, and then she can
//! propose and unstake them a couple of times.
//!
//! With this test, we want to confirm the consensus contract state
//! transitions work for a single party and are able to be verified.
use darkfi::Result;
use log::info;
use darkfi_consensus_contract::model::{calculate_grace_period, EPOCH_LENGTH};
use darkfi_contract_test_harness::{init_logger, Holder, TestHarness, TxAction};
#[test]
fn consensus_contract_genesis_stake_unstake() -> Result<()> {
smol::block_on(async {
init_logger();
// Holders this test will use
const HOLDERS: [Holder; 2] = [Holder::Faucet, Holder::Alice];
// Some numbers we want to assert
const ALICE_INITIAL: u64 = 1000;
// Slot to verify against
let mut current_slot = 0;
// Initialize harness
let mut th =
TestHarness::new(&["money".to_string(), "consensus".to_string()], false).await?;
// Now Alice can create a genesis stake transaction to mint
// some staked coins
info!(target: "consensus", "[Alice] =========================");
info!(target: "consensus", "[Alice] Building genesis stake tx");
info!(target: "consensus", "[Alice] =========================");
let (genesis_stake_tx, genesis_stake_params) =
th.genesis_stake(&Holder::Alice, ALICE_INITIAL)?;
// We are going to use alice genesis mint transaction to
// test some malicious cases.
info!(target: "consensus", "[Malicious] ===================================");
info!(target: "consensus", "[Malicious] Checking duplicate genesis stake tx");
info!(target: "consensus", "[Malicious] ===================================");
th.execute_erroneous_txs(
TxAction::ConsensusGenesisStake,
&Holder::Alice,
&[genesis_stake_tx.clone(), genesis_stake_tx.clone()],
current_slot,
1,
)
.await?;
info!(target: "consensus", "[Malicious] =============================================");
info!(target: "consensus", "[Malicious] Checking genesis stake tx not on genesis slot");
info!(target: "consensus", "[Malicious] =============================================");
th.execute_erroneous_txs(
TxAction::ConsensusGenesisStake,
&Holder::Alice,
&[genesis_stake_tx.clone()],
current_slot + 1,
1,
)
.await?;
for holder in &HOLDERS {
info!(target: "consensus", "[{holder:?}] ================================");
info!(target: "consensus", "[{holder:?}] Executing Alice genesis stake tx");
info!(target: "consensus", "[{holder:?}] ================================");
th.execute_genesis_stake_tx(
holder,
&genesis_stake_tx,
&genesis_stake_params,
current_slot,
)
.await?;
}
th.assert_trees(&HOLDERS);
// Gather new staked owncoin
let alice_staked_oc =
th.gather_consensus_staked_owncoin(&Holder::Alice, &genesis_stake_params.output, None)?;
// Verify values match
assert!(ALICE_INITIAL == alice_staked_oc.note.value);
// We simulate the proposal of genesis slot
// We progress 1 slot and simulate its proposal
current_slot += 1;
let slot = th.generate_slot(current_slot).await?;
// With alice's current coin value she can become the slot proposer,
// so she creates a proposal transaction to burn her staked coin,
// reward herself and mint the new coin.
let alice_rewarded_staked_oc = th
.execute_proposal(&HOLDERS, &Holder::Alice, current_slot, slot, &alice_staked_oc)
.await?;
// We progress after grace period
current_slot += calculate_grace_period() * EPOCH_LENGTH;
th.generate_slot(current_slot).await?;
// Alice can request for her owncoin to get unstaked
let alice_unstake_request_oc = th
.execute_unstake_request(
&HOLDERS,
&Holder::Alice,
current_slot,
&alice_rewarded_staked_oc,
)
.await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Now Alice can unstake her owncoin
th.execute_unstake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstake_request_oc)
.await?;
// Statistics
th.statistics();
// Thanks for reading
Ok(())
})
}

View File

@@ -1,226 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! Integration test of consensus staking and unstaking for Alice.
//!
//! We first airdrop Alice native tokes, and then she can stake,
//! propose and unstake them a couple of times.
//! The following malicious cases are also tested:
//! 1. Repeat staking coin
//! 2. Proposal before grace period
//! 3. Unstaking before grace period
//! 4. Repeat requesting unstaking coin
//! 5. Repeat unstaking coin
//! 6. Use unstaked coin in proposal
//!
//! With this test, we want to confirm the consensus contract state
//! transitions work for a single party and are able to be verified.
use darkfi::Result;
use log::info;
use darkfi_consensus_contract::model::{calculate_grace_period, EPOCH_LENGTH};
use darkfi_contract_test_harness::{init_logger, Holder, TestHarness, TxAction};
use darkfi_sdk::pasta::pallas;
#[test]
fn consensus_contract_stake_unstake() -> Result<()> {
smol::block_on(async {
init_logger();
// Holders this test will use
const HOLDERS: [Holder; 2] = [Holder::Faucet, Holder::Alice];
// Some numbers we want to assert
const ALICE_AIRDROP: u64 = 1000;
// Slot to verify against
let mut current_slot = 1;
// Initialize harness
let mut th =
TestHarness::new(&["money".to_string(), "consensus".to_string()], false).await?;
// Now Alice can airdrop some native tokens to herself
let alice_oc =
th.execute_airdrop(&HOLDERS, &Holder::Alice, ALICE_AIRDROP, current_slot).await?;
// Now Alice can stake her owncoin
let alice_staked_oc =
th.execute_stake(&HOLDERS, &Holder::Alice, current_slot, &alice_oc, 181).await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
let slot = th.generate_slot(current_slot).await?;
// With alice's current coin value she can become the slot proposer,
// so she creates a proposal transaction to burn her staked coin,
// reward herself and mint the new coin.
let alice_rewarded_staked_oc = th
.execute_proposal(&HOLDERS, &Holder::Alice, current_slot, slot, &alice_staked_oc)
.await?;
// We progress one slot
current_slot += 1;
th.generate_slot(current_slot).await?;
// Alice can request for her owncoin to get unstaked
let alice_unstake_request_oc = th
.execute_unstake_request(
&HOLDERS,
&Holder::Alice,
current_slot,
&alice_rewarded_staked_oc,
)
.await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Now Alice can unstake her owncoin
let alice_unstaked_oc = th
.execute_unstake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstake_request_oc)
.await?;
// Now Alice can stake her unstaked owncoin again to try some mallicious cases
let alice_staked_oc = th
.execute_stake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstaked_oc, 148)
.await?;
// Alice tries to stake her coin again
info!(target: "consensus", "[Malicious] ===========================");
info!(target: "consensus", "[Malicious] Checking staking coin again");
info!(target: "consensus", "[Malicious] ===========================");
let (stake_tx, _, _) = th
.stake(&Holder::Alice, current_slot, &alice_unstaked_oc, pallas::Base::from(148))
.await?;
th.execute_erroneous_txs(
TxAction::ConsensusStake,
&Holder::Alice,
&[stake_tx],
current_slot,
1,
)
.await?;
// We progress one slot
current_slot += 1;
let slot = th.generate_slot(current_slot).await?;
// Since alice didn't wait for the grace period to pass, her proposal should fail
info!(target: "consensus", "[Malicious] =====================================");
info!(target: "consensus", "[Malicious] Checking proposal before grace period");
info!(target: "consensus", "[Malicious] =====================================");
let (proposal_tx, _, _, _) = th.proposal(&Holder::Alice, slot, &alice_staked_oc).await?;
th.execute_erroneous_proposal_tx(&Holder::Alice, &proposal_tx, current_slot).await?;
// or be able to unstake the coin
info!(target: "consensus", "[Malicious] ======================================");
info!(target: "consensus", "[Malicious] Checking unstaking before grace period");
info!(target: "consensus", "[Malicious] ======================================");
let (unstake_request_tx, _, _, _) =
th.unstake_request(&Holder::Alice, current_slot, &alice_staked_oc).await?;
th.execute_erroneous_txs(
TxAction::ConsensusUnstakeRequest,
&Holder::Alice,
&[unstake_request_tx],
current_slot,
1,
)
.await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Alice can request for her owncoin to get unstaked
let alice_unstake_request_oc = th
.execute_unstake_request(&HOLDERS, &Holder::Alice, current_slot, &alice_staked_oc)
.await?;
info!(target: "consensus", "[Malicious] =====================================");
info!(target: "consensus", "[Malicious] Checking request unstaking coin again");
info!(target: "consensus", "[Malicious] =====================================");
let (unstake_request_tx, _, _, _) =
th.unstake_request(&Holder::Alice, current_slot, &alice_staked_oc).await?;
th.execute_erroneous_txs(
TxAction::ConsensusUnstakeRequest,
&Holder::Alice,
&[unstake_request_tx],
current_slot,
1,
)
.await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Now Alice can unstake her owncoin
let alice_unstaked_oc = th
.execute_unstake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstake_request_oc)
.await?;
info!(target: "consensus", "[Malicious] =============================");
info!(target: "consensus", "[Malicious] Checking unstaking coin again");
info!(target: "consensus", "[Malicious] =============================");
let (unstake_tx, _, _) = th.unstake(&Holder::Alice, &alice_unstake_request_oc)?;
th.execute_erroneous_txs(
TxAction::ConsensusUnstake,
&Holder::Alice,
&[unstake_tx],
current_slot,
1,
)
.await?;
// Now Alice can stake her unstaked owncoin again
let alice_staked_oc = th
.execute_stake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstaked_oc, 86)
.await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Alice can request for her owncoin to get unstaked
let alice_unstake_request_oc = th
.execute_unstake_request(&HOLDERS, &Holder::Alice, current_slot, &alice_staked_oc)
.await?;
// Now we will test if we can reuse token in proposal
current_slot += 1;
let slot = th.generate_slot(current_slot).await?;
info!(target: "consensus", "[Malicious] ========================================");
info!(target: "consensus", "[Malicious] Checking using unstaked coin in proposal");
info!(target: "consensus", "[Malicious] ========================================");
let (proposal_tx, _, _, _) = th.proposal(&Holder::Alice, slot, &alice_staked_oc).await?;
th.execute_erroneous_proposal_tx(&Holder::Alice, &proposal_tx, current_slot).await?;
// We progress after grace period
current_slot += (calculate_grace_period() * EPOCH_LENGTH) + EPOCH_LENGTH;
// Now Alice can unstake her owncoin
th.execute_unstake(&HOLDERS, &Holder::Alice, current_slot, &alice_unstake_request_oc)
.await?;
// Statistics
th.statistics();
// Thanks for reading
Ok(())
})
}

View File

@@ -28,7 +28,7 @@
use darkfi_sdk::{
bridgetree,
crypto::{pasta_prelude::*, Nullifier, SecretKey, TokenId, DARK_TOKEN_ID},
crypto::{Nullifier, SecretKey, TokenId},
pasta::pallas,
};
use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
@@ -53,12 +53,6 @@ pub mod token_mint_v1;
/// `Money::TokenFreezeV1` API
pub mod token_freeze_v1;
/// `Money::StakeV1` API
pub mod stake_v1;
/// `Money::UnstakeV1` API
pub mod unstake_v1;
/// `Money::PoWRewardV1` API
pub mod pow_reward_v1;
@@ -145,65 +139,6 @@ pub struct OwnCoin {
pub leaf_position: bridgetree::Position,
}
/// `ConsensusNote` holds the inner attributes of a `Coin`.
#[derive(Debug, Clone, Eq, PartialEq, SerialEncodable, SerialDecodable)]
pub struct ConsensusNote {
/// Serial number of the coin, used for the nullifier
pub serial: pallas::Base,
/// Value of the coin
pub value: u64,
/// Epoch the coin was minted
pub epoch: u64,
/// Blinding factor for the value pedersen commitment
pub value_blind: pallas::Scalar,
/// Value of the reward
pub reward: u64,
/// Blinding factor for the reward value pedersen commitment
pub reward_blind: pallas::Scalar,
}
impl From<ConsensusNote> for MoneyNote {
fn from(consensus_note: ConsensusNote) -> Self {
MoneyNote {
serial: consensus_note.serial,
value: consensus_note.value,
token_id: *DARK_TOKEN_ID,
spend_hook: pallas::Base::ZERO,
user_data: pallas::Base::ZERO,
value_blind: consensus_note.value_blind,
token_blind: pallas::Base::ZERO,
memo: vec![],
}
}
}
/// `ConsensusOwnCoin` is a representation of `Coin` with its respective metadata.
#[derive(Debug, Clone, Eq, PartialEq, SerialEncodable, SerialDecodable)]
pub struct ConsensusOwnCoin {
/// The coin hash
pub coin: Coin,
/// The attached `ConsensusNote`
pub note: ConsensusNote,
/// Coin's secret key
pub secret: SecretKey,
/// Coin's nullifier
pub nullifier: Nullifier,
/// Coin's leaf position in the Merkle tree of coins
pub leaf_position: bridgetree::Position,
}
impl From<ConsensusOwnCoin> for OwnCoin {
fn from(consensus_own_coin: ConsensusOwnCoin) -> Self {
OwnCoin {
coin: consensus_own_coin.coin,
note: consensus_own_coin.note.into(),
secret: consensus_own_coin.secret,
nullifier: consensus_own_coin.nullifier,
leaf_position: consensus_own_coin.leaf_position,
}
}
}
pub fn compute_remainder_blind(
clear_inputs: &[crate::model::ClearInput],
input_blinds: &[pallas::Scalar],

View File

@@ -1,225 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{halo2::Value, Proof, ProvingKey, Witness, ZkCircuit},
zkas::ZkBinary,
Result,
};
use darkfi_sdk::{
bridgetree,
bridgetree::Hashable,
crypto::{
pasta_prelude::*, pedersen_commitment_u64, poseidon_hash, MerkleNode, MerkleTree,
Nullifier, PublicKey, SecretKey, DARK_TOKEN_ID,
},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::{
client::{MoneyNote, OwnCoin},
model::{CoinAttributes, Input, MoneyStakeParamsV1, NullifierAttributes},
};
pub struct MoneyStakeCallDebris {
pub params: MoneyStakeParamsV1,
pub proofs: Vec<Proof>,
pub signature_secret: SecretKey,
pub value_blind: pallas::Scalar,
}
pub struct MoneyStakeBurnRevealed {
pub value_commit: pallas::Point,
pub token_commit: pallas::Base,
pub nullifier: Nullifier,
pub merkle_root: MerkleNode,
pub spend_hook: pallas::Base,
pub user_data_enc: pallas::Base,
pub signature_public: PublicKey,
}
impl MoneyStakeBurnRevealed {
pub fn to_vec(&self) -> Vec<pallas::Base> {
let valcom_coords = self.value_commit.to_affine().coordinates().unwrap();
let sigpub_coords = self.signature_public.inner().to_affine().coordinates().unwrap();
// NOTE: It's important to keep these in the same order
// as the `constrain_instance` calls in the zkas code.
vec![
self.nullifier.inner(),
*valcom_coords.x(),
*valcom_coords.y(),
self.token_commit,
self.merkle_root.inner(),
self.user_data_enc,
pallas::Base::ZERO, // We force spend_hook==0 here
*sigpub_coords.x(),
*sigpub_coords.y(),
]
}
}
pub struct TransactionBuilderInputInfo {
pub leaf_position: bridgetree::Position,
pub merkle_path: Vec<MerkleNode>,
pub secret: SecretKey,
pub note: MoneyNote,
}
/// Struct holding necessary information to build a `Money::StakeV1` contract call.
pub struct MoneyStakeCallBuilder {
/// `OwnCoin` we're given to use in this builder
pub coin: OwnCoin,
/// Merkle tree of coins used to create inclusion proofs
pub tree: MerkleTree,
/// `Burn_V1` zkas circuit ZkBinary
pub burn_zkbin: ZkBinary,
/// Proving key for the `Burn_V1` zk circuit
pub burn_pk: ProvingKey,
}
impl MoneyStakeCallBuilder {
pub fn build(&self) -> Result<MoneyStakeCallDebris> {
info!("Building Money::StakeV1 contract call");
assert!(self.coin.note.value != 0);
assert!(self.coin.note.token_id == *DARK_TOKEN_ID);
debug!("Building Money::StakeV1 anonymous input");
let leaf_position = self.coin.leaf_position;
let merkle_path = self.tree.witness(leaf_position, 0).unwrap();
let input = TransactionBuilderInputInfo {
leaf_position,
merkle_path,
secret: self.coin.secret,
note: self.coin.note.clone(),
};
// Create new random blinds and an ephemeral signature key
let value_blind = pallas::Scalar::random(&mut OsRng);
let token_blind = pallas::Base::random(&mut OsRng);
let signature_secret = SecretKey::random(&mut OsRng);
let user_data_blind = pallas::Base::random(&mut OsRng);
info!("Building Money::Stake V1 Burn ZK proof");
let (proof, public_inputs) = create_stake_burn_proof(
&self.burn_zkbin,
&self.burn_pk,
&input,
value_blind,
token_blind,
user_data_blind,
signature_secret,
)?;
let input = Input {
value_commit: public_inputs.value_commit,
token_commit: public_inputs.token_commit,
nullifier: public_inputs.nullifier,
merkle_root: public_inputs.merkle_root,
spend_hook: public_inputs.spend_hook,
user_data_enc: public_inputs.user_data_enc,
signature_public: public_inputs.signature_public,
};
// We now fill this with necessary stuff
let params = MoneyStakeParamsV1 { token_blind, input };
let proofs = vec![proof];
// Now we should have all the params, zk proof, signature secret and token blind.
// We return it all and let the caller deal with it.
let debris = MoneyStakeCallDebris { params, proofs, signature_secret, value_blind };
Ok(debris)
}
}
pub fn create_stake_burn_proof(
zkbin: &ZkBinary,
pk: &ProvingKey,
input: &TransactionBuilderInputInfo,
value_blind: pallas::Scalar,
token_blind: pallas::Base,
user_data_blind: pallas::Base,
signature_secret: SecretKey,
) -> Result<(Proof, MoneyStakeBurnRevealed)> {
let public_key = PublicKey::from_secret(input.secret);
let signature_public = PublicKey::from_secret(signature_secret);
let coin = CoinAttributes {
public_key,
value: input.note.value,
token_id: input.note.token_id,
serial: input.note.serial,
spend_hook: input.note.spend_hook,
user_data: input.note.user_data,
}
.to_coin();
let nullifier = NullifierAttributes { secret_key: input.secret, coin }.to_nullifier();
let merkle_root = {
let position: u64 = input.leaf_position.into();
let mut current = MerkleNode::from(coin.inner());
for (level, sibling) in input.merkle_path.iter().enumerate() {
let level = level as u8;
current = if position & (1 << level) == 0 {
MerkleNode::combine(level.into(), &current, sibling)
} else {
MerkleNode::combine(level.into(), sibling, &current)
};
}
current
};
let user_data_enc = poseidon_hash([input.note.user_data, user_data_blind]);
let value_commit = pedersen_commitment_u64(input.note.value, value_blind);
let token_commit = poseidon_hash([input.note.token_id.inner(), token_blind]);
let public_inputs = MoneyStakeBurnRevealed {
value_commit,
token_commit,
nullifier,
merkle_root,
spend_hook: input.note.spend_hook,
user_data_enc,
signature_public,
};
let prover_witnesses = vec![
Witness::Base(Value::known(pallas::Base::from(input.note.value))),
Witness::Base(Value::known(input.note.token_id.inner())),
Witness::Scalar(Value::known(value_blind)),
Witness::Base(Value::known(token_blind)),
Witness::Base(Value::known(input.note.serial)),
Witness::Base(Value::known(input.note.spend_hook)),
Witness::Base(Value::known(input.note.user_data)),
Witness::Base(Value::known(user_data_blind)),
Witness::Base(Value::known(input.secret.inner())),
Witness::Uint32(Value::known(u64::from(input.leaf_position).try_into().unwrap())),
Witness::MerklePath(Value::known(input.merkle_path.clone().try_into().unwrap())),
Witness::Base(Value::known(signature_secret.inner())),
];
let circuit = ZkCircuit::new(prover_witnesses, zkbin);
let proof = Proof::create(pk, &[circuit], &public_inputs.to_vec(), &mut OsRng)?;
Ok((proof, public_inputs))
}

View File

@@ -1,199 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//! This API is crufty. Please rework it into something nice to read and nice to use.
use darkfi::{
zk::{halo2::Value, Proof, ProvingKey, Witness, ZkCircuit},
zkas::ZkBinary,
Result,
};
use darkfi_sdk::{
crypto::{
note::AeadEncryptedNote, pasta_prelude::*, pedersen_commitment_u64, poseidon_hash,
MerkleNode, Nullifier, PublicKey, TokenId, DARK_TOKEN_ID,
},
pasta::pallas,
};
use log::{debug, info};
use rand::rngs::OsRng;
use crate::{
client::{ConsensusOwnCoin, MoneyNote},
model::{Coin, ConsensusInput, MoneyUnstakeParamsV1, Output},
};
pub struct MoneyUnstakeCallDebris {
pub params: MoneyUnstakeParamsV1,
pub proofs: Vec<Proof>,
}
pub struct MoneyMintRevealed {
pub coin: Coin,
pub value_commit: pallas::Point,
pub token_commit: pallas::Base,
}
impl MoneyMintRevealed {
pub fn to_vec(&self) -> Vec<pallas::Base> {
let valcom_coords = self.value_commit.to_affine().coordinates().unwrap();
// NOTE: It's important to keep these in the same order
// as the `constrain_instance` calls in the zkas code.
vec![self.coin.inner(), *valcom_coords.x(), *valcom_coords.y(), self.token_commit]
}
}
pub struct TransactionBuilderOutputInfo {
pub value: u64,
pub token_id: TokenId,
pub public_key: PublicKey,
}
/// Struct holding necessary information to build a `Money::UnstakeV1` contract call.
pub struct MoneyUnstakeCallBuilder {
/// `ConsensusOwnCoin` we're given to use in this builder
pub owncoin: ConsensusOwnCoin,
/// Recipient pubkey of the minted output
pub recipient: PublicKey,
/// Blinding factor for value commitment
pub value_blind: pallas::Scalar,
/// Revealed nullifier
pub nullifier: Nullifier,
/// Revealed Merkle root
pub merkle_root: MerkleNode,
/// Signature public key used in the input
pub signature_public: PublicKey,
/// `Mint_V1` zkas circuit ZkBinary
pub mint_zkbin: ZkBinary,
/// Proving key for the `Mint_V1` zk circuit
pub mint_pk: ProvingKey,
}
impl MoneyUnstakeCallBuilder {
pub fn build(&self) -> Result<MoneyUnstakeCallDebris> {
info!("Building Money::UnstakeV1 contract call");
assert!(self.owncoin.note.value != 0);
debug!("Building Money::UnstakeV1 anonymous output");
let output = TransactionBuilderOutputInfo {
value: self.owncoin.note.value,
token_id: *DARK_TOKEN_ID,
public_key: self.recipient,
};
let serial = pallas::Base::random(&mut OsRng);
let spend_hook = pallas::Base::ZERO;
let user_data_enc = pallas::Base::random(&mut OsRng);
let token_blind = pallas::Base::ZERO;
info!("Building Money::UnstakeV1 Mint ZK proof");
let (proof, public_inputs) = create_unstake_mint_proof(
&self.mint_zkbin,
&self.mint_pk,
&output,
self.value_blind,
token_blind,
serial,
spend_hook,
user_data_enc,
)?;
// Encrypted note
let note = MoneyNote {
serial,
value: output.value,
token_id: output.token_id,
spend_hook,
user_data: user_data_enc,
value_blind: self.value_blind,
token_blind,
memo: vec![],
};
let encrypted_note = AeadEncryptedNote::encrypt(&note, &output.public_key, &mut OsRng)?;
let tx_output = Output {
value_commit: public_inputs.value_commit,
token_commit: public_inputs.token_commit,
coin: public_inputs.coin,
note: encrypted_note,
};
let tx_input = ConsensusInput {
epoch: self.owncoin.note.epoch,
value_commit: public_inputs.value_commit,
nullifier: self.nullifier,
merkle_root: self.merkle_root,
signature_public: self.signature_public,
};
// We now fill this with necessary stuff
let params = MoneyUnstakeParamsV1 { input: tx_input, output: tx_output };
// Now we should have all the params and zk proof.
// We return it all and let the caller deal with it.
let debris = MoneyUnstakeCallDebris { params, proofs: vec![proof] };
Ok(debris)
}
}
#[allow(clippy::too_many_arguments)]
pub fn create_unstake_mint_proof(
zkbin: &ZkBinary,
pk: &ProvingKey,
output: &TransactionBuilderOutputInfo,
value_blind: pallas::Scalar,
token_blind: pallas::Base,
serial: pallas::Base,
spend_hook: pallas::Base,
user_data: pallas::Base,
) -> Result<(Proof, MoneyMintRevealed)> {
let value_commit = pedersen_commitment_u64(output.value, value_blind);
let token_commit = poseidon_hash([output.token_id.inner(), token_blind]);
let (pub_x, pub_y) = output.public_key.xy();
let coin = Coin::from(poseidon_hash([
pub_x,
pub_y,
pallas::Base::from(output.value),
output.token_id.inner(),
serial,
spend_hook,
user_data,
]));
let public_inputs = MoneyMintRevealed { coin, value_commit, token_commit };
let prover_witnesses = vec![
Witness::Base(Value::known(pub_x)),
Witness::Base(Value::known(pub_y)),
Witness::Base(Value::known(pallas::Base::from(output.value))),
Witness::Base(Value::known(output.token_id.inner())),
Witness::Base(Value::known(serial)),
Witness::Base(Value::known(spend_hook)),
Witness::Base(Value::known(user_data)),
Witness::Scalar(Value::known(value_blind)),
Witness::Base(Value::known(token_blind)),
];
let circuit = ZkCircuit::new(prover_witnesses, zkbin);
let proof = Proof::create(pk, &[circuit], &public_inputs.to_vec(), &mut OsRng)?;
Ok((proof, public_inputs))
}

View File

@@ -29,9 +29,8 @@ use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
model::{
MoneyFeeUpdateV1, MoneyGenesisMintUpdateV1, MoneyPoWRewardUpdateV1, MoneyStakeUpdateV1,
MoneyFeeUpdateV1, MoneyGenesisMintUpdateV1, MoneyPoWRewardUpdateV1,
MoneyTokenFreezeUpdateV1, MoneyTokenMintUpdateV1, MoneyTransferUpdateV1,
MoneyUnstakeUpdateV1,
},
MoneyFunction, MONEY_CONTRACT_COINS_TREE, MONEY_CONTRACT_COIN_MERKLE_TREE,
MONEY_CONTRACT_COIN_ROOTS_TREE, MONEY_CONTRACT_DB_VERSION, MONEY_CONTRACT_FAUCET_PUBKEYS,
@@ -80,19 +79,6 @@ use token_freeze_v1::{
money_token_freeze_process_update_v1,
};
/// `Money::Stake` functions
mod stake_v1;
use stake_v1::{
money_stake_get_metadata_v1, money_stake_process_instruction_v1, money_stake_process_update_v1,
};
/// `Money::Unstake` functions
mod unstake_v1;
use unstake_v1::{
money_unstake_get_metadata_v1, money_unstake_process_instruction_v1,
money_unstake_process_update_v1,
};
/// `Money::PoWReward` functions
mod pow_reward_v1;
use pow_reward_v1::{
@@ -209,8 +195,6 @@ fn get_metadata(cid: ContractId, ix: &[u8]) -> ContractResult {
MoneyFunction::GenesisMintV1 => money_genesis_mint_get_metadata_v1(cid, call_idx, calls)?,
MoneyFunction::TokenMintV1 => money_token_mint_get_metadata_v1(cid, call_idx, calls)?,
MoneyFunction::TokenFreezeV1 => money_token_freeze_get_metadata_v1(cid, call_idx, calls)?,
MoneyFunction::StakeV1 => money_stake_get_metadata_v1(cid, call_idx, calls)?,
MoneyFunction::UnstakeV1 => money_unstake_get_metadata_v1(cid, call_idx, calls)?,
MoneyFunction::PoWRewardV1 => money_pow_reward_get_metadata_v1(cid, call_idx, calls)?,
};
@@ -245,8 +229,6 @@ fn process_instruction(cid: ContractId, ix: &[u8]) -> ContractResult {
MoneyFunction::TokenFreezeV1 => {
money_token_freeze_process_instruction_v1(cid, call_idx, calls)?
}
MoneyFunction::StakeV1 => money_stake_process_instruction_v1(cid, call_idx, calls)?,
MoneyFunction::UnstakeV1 => money_unstake_process_instruction_v1(cid, call_idx, calls)?,
MoneyFunction::PoWRewardV1 => {
money_pow_reward_process_instruction_v1(cid, call_idx, calls)?
}
@@ -293,16 +275,6 @@ fn process_update(cid: ContractId, update_data: &[u8]) -> ContractResult {
Ok(money_token_freeze_process_update_v1(cid, update)?)
}
MoneyFunction::StakeV1 => {
let update: MoneyStakeUpdateV1 = deserialize(&update_data[1..])?;
Ok(money_stake_process_update_v1(cid, update)?)
}
MoneyFunction::UnstakeV1 => {
let update: MoneyUnstakeUpdateV1 = deserialize(&update_data[1..])?;
Ok(money_unstake_process_update_v1(cid, update)?)
}
MoneyFunction::PoWRewardV1 => {
let update: MoneyPoWRewardUpdateV1 = deserialize(&update_data[1..])?;
Ok(money_pow_reward_process_update_v1(cid, update)?)

View File

@@ -1,183 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
crypto::{pasta_prelude::*, poseidon_hash, ContractId, CONSENSUS_CONTRACT_ID, DARK_TOKEN_ID},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
msg,
pasta::pallas,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
error::MoneyError,
model::{ConsensusStakeParamsV1, MoneyStakeParamsV1, MoneyStakeUpdateV1},
MoneyFunction, MONEY_CONTRACT_COIN_ROOTS_TREE, MONEY_CONTRACT_NULLIFIERS_TREE,
MONEY_CONTRACT_ZKAS_BURN_NS_V1,
};
/// `get_metadata` function for `Money::StakeV1`
pub(crate) fn money_stake_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: MoneyStakeParamsV1 = deserialize(&self_.data[1..])?;
let input = &params.input;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// Public keys for the transaction signatures we have to verify
let signature_pubkeys = vec![input.signature_public];
// Grab the pedersen commitments and signature pubkeys from the
// anonymous input
let value_coords = input.value_commit.to_affine().coordinates().unwrap();
let (sig_x, sig_y) = input.signature_public.xy();
// It is very important that these are in the same order as the
// `constrain_instance` calls in the zkas code.
// Otherwise verification will fail.
zk_public_inputs.push((
MONEY_CONTRACT_ZKAS_BURN_NS_V1.to_string(),
vec![
input.nullifier.inner(),
*value_coords.x(),
*value_coords.y(),
input.token_commit,
input.merkle_root.inner(),
input.user_data_enc,
pallas::Base::ZERO, // We enforce spend_hook==0
sig_x,
sig_y,
],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Money::StakeV1`
pub(crate) fn money_stake_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize];
let params: MoneyStakeParamsV1 = deserialize(&self_.data.data[1..])?;
// Access the necessary databases where there is information to
// validate this state transition.
let nullifiers_db = db_lookup(cid, MONEY_CONTRACT_NULLIFIERS_TREE)?;
let coin_roots_db = db_lookup(cid, MONEY_CONTRACT_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
msg!("[MoneyStakeV1] Validating anonymous input");
let input = &params.input;
// Spend hook should be zero so there's no protocol holding the tokens back.
if input.spend_hook != pallas::Base::ZERO {
msg!("[MoneyStakeV1] Error: Input has a non-zero spend hook set");
return Err(MoneyError::SpendHookNonZero.into())
}
// Only native token can be staked
if input.token_commit != poseidon_hash([DARK_TOKEN_ID.inner(), params.token_blind]) {
msg!("[MoneyStakeV1] Error: Input used non-native token");
return Err(MoneyError::StakeInputNonNativeToken.into())
}
// The Merkle root is used to know whether this is a coin that
// existed in a previous state.
if !db_contains_key(coin_roots_db, &serialize(&input.merkle_root))? {
msg!("[MoneyStakeV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifiers should not already exist. It is the double-spend protection.
if db_contains_key(nullifiers_db, &serialize(&input.nullifier))? {
msg!("[MoneyStakeV1] Error: Duplicate nullifier found");
return Err(MoneyError::DuplicateNullifier.into())
}
// Check parent call is consensus contract
let parent_call_idx = self_.parent_index;
if parent_call_idx.is_none() {
msg!("[MoneyStakeV1] Error: parent_call_idx is missing");
return Err(MoneyError::StakeParentCallNotConsensusContract.into())
}
let parent_call_idx = parent_call_idx.unwrap();
if parent_call_idx >= calls.len() {
msg!("[MoneyStakeV1] Error: next_call_idx out of bounds");
return Err(MoneyError::CallIdxOutOfBounds.into())
}
// Verify parent call corresponds to Consensus::StakeV1 (0x01)
let parent = &calls[parent_call_idx].data;
if parent.contract_id.inner() != CONSENSUS_CONTRACT_ID.inner() {
msg!("[MoneyStakeV1] Error: Parent contract call is not consensus contract");
return Err(MoneyError::StakeParentCallNotConsensusContract.into())
}
if parent.data[0] != 0x01 {
msg!("[MoneyStakeV1] Error: Parent call function mismatch");
return Err(MoneyError::ParentCallFunctionMismatch.into())
}
// Verify parent call ConsensusInput is the same as this calls input
let parent_params: ConsensusStakeParamsV1 = deserialize(&parent.data[1..])?;
if input != &parent_params.input {
msg!("[MoneyStakeV1] Error: Parent call input mismatch");
return Err(MoneyError::ParentCallInputMismatch.into())
}
// At this point the state transition has passed, so we create a state update
let update = MoneyStakeUpdateV1 { nullifier: input.nullifier };
let mut update_data = vec![];
update_data.write_u8(MoneyFunction::StakeV1 as u8)?;
update.encode(&mut update_data)?;
// and return it
Ok(update_data)
}
/// `process_update` function for `Money::StakeV1`
pub(crate) fn money_stake_process_update_v1(
cid: ContractId,
update: MoneyStakeUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let nullifiers_db = db_lookup(cid, MONEY_CONTRACT_NULLIFIERS_TREE)?;
msg!("[MoneyStakeV1] Adding new nullifier to the set");
db_set(nullifiers_db, &serialize(&update.nullifier), &[])?;
Ok(())
}

View File

@@ -1,199 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use darkfi_sdk::{
crypto::{
pasta_prelude::*, poseidon_hash, ContractId, MerkleNode, PublicKey, CONSENSUS_CONTRACT_ID,
DARK_TOKEN_ID,
},
dark_tree::DarkLeaf,
db::{db_contains_key, db_lookup, db_set},
error::{ContractError, ContractResult},
merkle_add, msg,
pasta::pallas,
ContractCall,
};
use darkfi_serial::{deserialize, serialize, Encodable, WriteExt};
use crate::{
error::MoneyError,
model::{ConsensusUnstakeParamsV1, MoneyUnstakeParamsV1, MoneyUnstakeUpdateV1},
MoneyFunction, CONSENSUS_CONTRACT_NULLIFIERS_TREE, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE,
MONEY_CONTRACT_COINS_TREE, MONEY_CONTRACT_COIN_MERKLE_TREE, MONEY_CONTRACT_COIN_ROOTS_TREE,
MONEY_CONTRACT_INFO_TREE, MONEY_CONTRACT_LATEST_COIN_ROOT, MONEY_CONTRACT_ZKAS_MINT_NS_V1,
};
/// `get_metadata` function for `Money::UnstakeV1`
pub(crate) fn money_unstake_get_metadata_v1(
_cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize].data;
let params: MoneyUnstakeParamsV1 = deserialize(&self_.data[1..])?;
// Public inputs for the ZK proofs we have to verify
let mut zk_public_inputs: Vec<(String, Vec<pallas::Base>)> = vec![];
// We don't have to verify any signatures here, since they're already
// in the previous contract call (Consensus::UnstakeV1)
let signature_pubkeys: Vec<PublicKey> = vec![];
// Grab the pedersen commitment from the anonymous output
let value_coords = params.output.value_commit.to_affine().coordinates().unwrap();
zk_public_inputs.push((
MONEY_CONTRACT_ZKAS_MINT_NS_V1.to_string(),
vec![
params.output.coin.inner(),
*value_coords.x(),
*value_coords.y(),
params.output.token_commit,
],
));
// Serialize everything gathered and return it
let mut metadata = vec![];
zk_public_inputs.encode(&mut metadata)?;
signature_pubkeys.encode(&mut metadata)?;
Ok(metadata)
}
/// `process_instruction` function for `Money::UnstakeV1`
pub(crate) fn money_unstake_process_instruction_v1(
cid: ContractId,
call_idx: u32,
calls: Vec<DarkLeaf<ContractCall>>,
) -> Result<Vec<u8>, ContractError> {
let self_ = &calls[call_idx as usize];
let params: MoneyUnstakeParamsV1 = deserialize(&self_.data.data[1..])?;
let input = &params.input;
let output = &params.output;
// Access the necessary databases where there is information to
// validate this state transition.
let money_coins_db = db_lookup(cid, MONEY_CONTRACT_COINS_TREE)?;
let consensus_nullifiers_db =
db_lookup(*CONSENSUS_CONTRACT_ID, CONSENSUS_CONTRACT_NULLIFIERS_TREE)?;
let consensus_unstaked_coin_roots_db =
db_lookup(*CONSENSUS_CONTRACT_ID, CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE)?;
// ===================================
// Perform the actual state transition
// ===================================
// Check child call is consensus contract
if call_idx == 0 {
msg!("[MoneyUnstakeV1] Error: child_call_idx will be out of bounds");
return Err(MoneyError::CallIdxOutOfBounds.into())
}
let child_call_indexes = &self_.children_indexes;
if child_call_indexes.len() != 1 {
msg!("[MoneyUnstakeV1] Error: child_call_idx is missing");
return Err(MoneyError::UnstakeChildCallNotConsensusContract.into())
}
let child_call_idx = child_call_indexes[0];
let child = &calls[child_call_idx].data;
if child.contract_id.inner() != CONSENSUS_CONTRACT_ID.inner() {
msg!("[MoneyUnstakeV1] Error: Child contract call is not consensus contract");
return Err(MoneyError::UnstakeChildCallNotConsensusContract.into())
}
// Verify child call corresponds to Consensus::UnstakeV1 (0x04)
if child.data[0] != 0x04 {
msg!("[MoneyUnstakeV1] Error: Child call function mismatch");
return Err(MoneyError::ChildCallFunctionMismatch.into())
}
// Verify child call input is the same as this calls StakeInput
let child_params: ConsensusUnstakeParamsV1 = deserialize(&child.data[1..])?;
let child_input = &child_params.input;
if child_input != input {
msg!("[MoneyUnstakeV1] Error: Child call input mismatch");
return Err(MoneyError::ChildCallInputMismatch.into())
}
msg!("[MoneyUnstakeV1] Validating anonymous output");
// Only native token can be minted here.
// Since consensus coins don't have token commitments, we use zero as
// the token blind for the token commitment of the newly minted token
if output.token_commit != poseidon_hash([DARK_TOKEN_ID.inner(), pallas::Base::ZERO]) {
msg!("[MoneyUnstakeV1] Error: Input used non-native token");
return Err(MoneyError::StakeInputNonNativeToken.into())
}
// Verify value commits match
if output.value_commit != input.value_commit {
msg!("[MoneyUnstakeV1] Error: Value commitments do not match");
return Err(MoneyError::ValueMismatch.into())
}
// The Merkle root is used to know whether this is a coin that
// existed in a previous state.
if !db_contains_key(consensus_unstaked_coin_roots_db, &serialize(&input.merkle_root))? {
msg!("[MoneyUnstakeV1] Error: Merkle root not found in previous state");
return Err(MoneyError::TransferMerkleRootNotFound.into())
}
// The nullifiers should already exist in the Consensus nullifier set
if !db_contains_key(consensus_nullifiers_db, &serialize(&input.nullifier))? {
msg!("[MoneyUnstakeV1] Error: Nullifier not found in Consensus nullifier set");
return Err(MoneyError::MissingNullifier.into())
}
// Newly created coin for this call is in the output. Here we gather it,
// and we also check that it hasn't existed before.
if db_contains_key(money_coins_db, &serialize(&output.coin))? {
msg!("[MoneyUnstakeV1] Error: Duplicate coin found in output");
return Err(MoneyError::DuplicateCoin.into())
}
// Create a state update.
let update = MoneyUnstakeUpdateV1 { coin: output.coin };
let mut update_data = vec![];
update_data.write_u8(MoneyFunction::UnstakeV1 as u8)?;
update.encode(&mut update_data)?;
Ok(update_data)
}
/// `process_update` function for `Money::UnstakeV1`
pub(crate) fn money_unstake_process_update_v1(
cid: ContractId,
update: MoneyUnstakeUpdateV1,
) -> ContractResult {
// Grab all necessary db handles for where we want to write
let info_db = db_lookup(cid, MONEY_CONTRACT_INFO_TREE)?;
let coins_db = db_lookup(cid, MONEY_CONTRACT_COINS_TREE)?;
let coin_roots_db = db_lookup(cid, MONEY_CONTRACT_COIN_ROOTS_TREE)?;
msg!("[MoneyUnstakeV1] Adding new coin to the set");
db_set(coins_db, &serialize(&update.coin), &[])?;
msg!("[MoneyUnstakeV1] Adding new coin to the Merkle tree");
let coins: Vec<_> = vec![MerkleNode::from(update.coin.inner())];
merkle_add(
info_db,
coin_roots_db,
MONEY_CONTRACT_LATEST_COIN_ROOT,
MONEY_CONTRACT_COIN_MERKLE_TREE,
&coins,
)?;
Ok(())
}

View File

@@ -76,30 +76,6 @@ pub enum MoneyError {
#[error("Token mint is frozen")]
TokenMintFrozen,
#[error("Input used non-native token")]
StakeInputNonNativeToken,
#[error("Missing spend hook")]
StakeMissingSpendHook,
#[error("Missing nullifier")]
StakeMissingNullifier,
#[error("Parent contract call is not consensus contract")]
StakeParentCallNotConsensusContract,
#[error("Child contract call is not money contract")]
StakeChildCallNotMoneyContract,
#[error("Spend hook is not consensus contract")]
UnstakeSpendHookNotConsensusContract,
#[error("Parent contract call is not money contract")]
UnstakeParentCallNotMoneyContract,
#[error("Child contract call is not consensus contract")]
UnstakeChildCallNotConsensusContract,
#[error("Parent call function mismatch")]
ParentCallFunctionMismatch,
@@ -165,28 +141,20 @@ impl From<MoneyError> for ContractError {
MoneyError::SwapMerkleRootNotFound => Self::Custom(16),
MoneyError::TokenIdDoesNotDeriveFromMint => Self::Custom(17),
MoneyError::TokenMintFrozen => Self::Custom(18),
MoneyError::StakeInputNonNativeToken => Self::Custom(19),
MoneyError::StakeMissingSpendHook => Self::Custom(20),
MoneyError::StakeMissingNullifier => Self::Custom(21),
MoneyError::StakeParentCallNotConsensusContract => Self::Custom(22),
MoneyError::StakeChildCallNotMoneyContract => Self::Custom(23),
MoneyError::UnstakeSpendHookNotConsensusContract => Self::Custom(24),
MoneyError::UnstakeParentCallNotMoneyContract => Self::Custom(25),
MoneyError::UnstakeChildCallNotConsensusContract => Self::Custom(26),
MoneyError::ParentCallFunctionMismatch => Self::Custom(27),
MoneyError::ParentCallInputMismatch => Self::Custom(28),
MoneyError::ChildCallFunctionMismatch => Self::Custom(29),
MoneyError::ChildCallInputMismatch => Self::Custom(30),
MoneyError::GenesisCallNonGenesisBlock => Self::Custom(31),
MoneyError::GenesisCallNonGenesisSlot => Self::Custom(32),
MoneyError::MissingNullifier => Self::Custom(33),
MoneyError::PoWRewardCallAfterCutoffBlockHeight => Self::Custom(34),
MoneyError::PoWRewardMissingSlot => Self::Custom(35),
MoneyError::PoWRewardExtendsUnknownFork => Self::Custom(36),
MoneyError::PoWRewardErroneousVrfProof => Self::Custom(37),
MoneyError::FeeMissingInputs => Self::Custom(38),
MoneyError::InsufficientFee => Self::Custom(39),
MoneyError::CoinMerkleRootNotFound => Self::Custom(40),
MoneyError::ParentCallFunctionMismatch => Self::Custom(19),
MoneyError::ParentCallInputMismatch => Self::Custom(20),
MoneyError::ChildCallFunctionMismatch => Self::Custom(21),
MoneyError::ChildCallInputMismatch => Self::Custom(22),
MoneyError::GenesisCallNonGenesisBlock => Self::Custom(23),
MoneyError::GenesisCallNonGenesisSlot => Self::Custom(24),
MoneyError::MissingNullifier => Self::Custom(25),
MoneyError::PoWRewardCallAfterCutoffBlockHeight => Self::Custom(26),
MoneyError::PoWRewardMissingSlot => Self::Custom(27),
MoneyError::PoWRewardExtendsUnknownFork => Self::Custom(28),
MoneyError::PoWRewardErroneousVrfProof => Self::Custom(29),
MoneyError::FeeMissingInputs => Self::Custom(30),
MoneyError::InsufficientFee => Self::Custom(31),
MoneyError::CoinMerkleRootNotFound => Self::Custom(32),
}
}
}

View File

@@ -31,9 +31,7 @@ pub enum MoneyFunction {
OtcSwapV1 = 0x03,
TokenMintV1 = 0x04,
TokenFreezeV1 = 0x05,
StakeV1 = 0x06,
UnstakeV1 = 0x07,
PoWRewardV1 = 0x08,
PoWRewardV1 = 0x06,
}
// ANCHOR_END: money-function
@@ -48,9 +46,7 @@ impl TryFrom<u8> for MoneyFunction {
0x03 => Ok(Self::OtcSwapV1),
0x04 => Ok(Self::TokenMintV1),
0x05 => Ok(Self::TokenFreezeV1),
0x06 => Ok(Self::StakeV1),
0x07 => Ok(Self::UnstakeV1),
0x08 => Ok(Self::PoWRewardV1),
0x06 => Ok(Self::PoWRewardV1),
_ => Err(ContractError::InvalidFunction),
}
}
@@ -94,29 +90,3 @@ pub const MONEY_CONTRACT_ZKAS_BURN_NS_V1: &str = "Burn_V1";
pub const MONEY_CONTRACT_ZKAS_TOKEN_MINT_NS_V1: &str = "TokenMint_V1";
/// zkas token freeze circuit namespace
pub const MONEY_CONTRACT_ZKAS_TOKEN_FRZ_NS_V1: &str = "TokenFreeze_V1";
// These are the different sled trees that will be created
// for the consensus contract.
// We keep them here so we can reference them both in `Money`
// and `Consensus` contracts.
pub const CONSENSUS_CONTRACT_INFO_TREE: &str = "consensus_info";
pub const CONSENSUS_CONTRACT_NULLIFIERS_TREE: &str = "consensus_nullifiers";
pub const CONSENSUS_CONTRACT_STAKED_COINS_TREE: &str = "consensus_staked_coins";
pub const CONSENSUS_CONTRACT_UNSTAKED_COINS_TREE: &str = "consensus_unstaked_coins";
pub const CONSENSUS_CONTRACT_STAKED_COIN_ROOTS_TREE: &str = "consensus_staked_coin_roots";
pub const CONSENSUS_CONTRACT_UNSTAKED_COIN_ROOTS_TREE: &str = "consensus_unstaked_coin_roots";
// These are keys inside the consensus info tree
pub const CONSENSUS_CONTRACT_DB_VERSION: &[u8] = b"db_version";
pub const CONSENSUS_CONTRACT_STAKED_COIN_MERKLE_TREE: &[u8] = b"consensus_staked_coin_tree";
pub const CONSENSUS_CONTRACT_STAKED_COIN_LATEST_COIN_ROOT: &[u8] = b"consensus_staked_last_root";
pub const CONSENSUS_CONTRACT_UNSTAKED_COIN_MERKLE_TREE: &[u8] = b"consensus_unstaked_coin_tree";
pub const CONSENSUS_CONTRACT_UNSTAKED_COIN_LATEST_COIN_ROOT: &[u8] =
b"consensus_unstaked_last_root";
/// zkas consensus mint circuit namespace
pub const CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1: &str = "ConsensusMint_V1";
/// zkas consensus burn circuit namespace
pub const CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1: &str = "ConsensusBurn_V1";
/// zkas proposal circuit namespace
pub const CONSENSUS_CONTRACT_ZKAS_PROPOSAL_NS_V1: &str = "ConsensusProposal_V1";

View File

@@ -163,32 +163,6 @@ pub struct Output {
}
// ANCHOR_END: money-output
/// Anonymous input for consensus contract calls
#[derive(Clone, Debug, PartialEq, SerialEncodable, SerialDecodable)]
pub struct ConsensusInput {
/// Epoch the coin was minted
pub epoch: u64,
/// Pedersen commitment for the staked coin's value
pub value_commit: pallas::Point,
/// Revealed nullifier
pub nullifier: Nullifier,
/// Revealed Merkle root
pub merkle_root: MerkleNode,
/// Public key for the signature
pub signature_public: PublicKey,
}
/// A consensus contract call's anonymous output
#[derive(Clone, Debug, PartialEq, SerialEncodable, SerialDecodable)]
pub struct ConsensusOutput {
/// Pedersen commitment for the output's value
pub value_commit: pallas::Point,
/// Minted coin
pub coin: Coin,
/// AEAD encrypted note
pub note: AeadEncryptedNote,
}
/// Parameters for `Money::Fee`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
pub struct MoneyFeeParamsV1 {
@@ -304,90 +278,3 @@ pub struct MoneyPoWRewardUpdateV1 {
/// The newly minted coin
pub coin: Coin,
}
/// Parameters for `Money::Stake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: MoneyStakeParams
pub struct MoneyStakeParamsV1 {
/// Blinding factor for `token_id`
pub token_blind: pallas::Base,
/// Anonymous input
pub input: Input,
}
// ANCHOR_END: MoneyStakeParams
/// State update for `Money::Stake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: MoneyStakeUpdate
pub struct MoneyStakeUpdateV1 {
/// Revealed nullifier
pub nullifier: Nullifier,
}
// ANCHOR_END: MoneyStakeUpdate
/// Parameters for `Money::Unstake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: MoneyUnstakeParams
pub struct MoneyUnstakeParamsV1 {
/// Burnt token revealed info
pub input: ConsensusInput,
/// Anonymous output
pub output: Output,
}
// ANCHOR_END: MoneyUnstakeParams
/// State update for `Money::Unstake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: MoneyUnstakeUpdate
pub struct MoneyUnstakeUpdateV1 {
/// The newly minted coin
pub coin: Coin,
}
// ANCHOR_END: MoneyUnstakeUpdate
/// Parameters for `Consensus::Stake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusStakeParams
pub struct ConsensusStakeParamsV1 {
/// Burnt token revealed info
pub input: Input,
/// Anonymous output
pub output: ConsensusOutput,
}
// ANCHOR_END: ConsensusStakeParams
/// State update for `Consensus::Stake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusStakeUpdate
pub struct ConsensusStakeUpdateV1 {
/// The newly minted coin
pub coin: Coin,
}
// ANCHOR_END: ConsensusStakeUpdate
/// Parameters for `Consensus::UnstakeRequest`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusUnstakeReqParams
pub struct ConsensusUnstakeReqParamsV1 {
pub input: ConsensusInput,
pub output: ConsensusOutput,
}
// ANCHOR_END: ConsensusUnstakeReqParams
/// Parameters for `Consensus::Unstake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusUnstakeParams
pub struct ConsensusUnstakeParamsV1 {
/// Anonymous input
pub input: ConsensusInput,
}
// ANCHOR_END: ConsensusUnstakeParams
/// State update for `Consensus::Unstake`
#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
// ANCHOR: ConsensusUnstakeUpdate
pub struct ConsensusUnstakeUpdateV1 {
/// Revealed nullifier
pub nullifier: Nullifier,
}
// ANCHOR_END: ConsensusUnstakeUpdate

View File

@@ -11,7 +11,6 @@ darkfi-sdk = {path = "../../../src/sdk"}
darkfi-serial = {path = "../../../src/serial", features = ["crypto"]}
darkfi_dao_contract = {path = "../dao", features = ["client", "no-entrypoint"]}
darkfi_money_contract = {path = "../money", features = ["client", "no-entrypoint"]}
darkfi_consensus_contract = {path = "../consensus", features = ["client", "no-entrypoint"]}
darkfi_deployooor_contract = {path = "../deployooor", features = ["client", "no-entrypoint"]}
num-bigint = "0.4.4"

View File

@@ -1,119 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::time::Instant;
use darkfi::{
tx::{ContractCallLeaf, Transaction, TransactionBuilder},
zk::halo2::Field,
Result,
};
use darkfi_consensus_contract::{
client::genesis_stake_v1::ConsensusGenesisStakeCallBuilder,
model::ConsensusGenesisStakeParamsV1, ConsensusFunction,
};
use darkfi_money_contract::CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1;
use darkfi_sdk::{
crypto::{MerkleNode, CONSENSUS_CONTRACT_ID},
pasta::pallas,
ContractCall,
};
use darkfi_serial::{serialize, Encodable};
use rand::rngs::OsRng;
use super::{Holder, TestHarness, TxAction};
impl TestHarness {
pub fn genesis_stake(
&mut self,
holder: &Holder,
amount: u64,
) -> Result<(Transaction, ConsensusGenesisStakeParamsV1)> {
let wallet = self.holders.get(holder).unwrap();
let (mint_pk, mint_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string()).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusGenesisStake).unwrap();
let timer = Instant::now();
// Building Consensus::GenesisStake params
let genesis_stake_call_debris = ConsensusGenesisStakeCallBuilder {
keypair: wallet.keypair,
recipient: wallet.keypair.public,
amount,
mint_zkbin: mint_zkbin.clone(),
mint_pk: mint_pk.clone(),
}
.build_with_params(
pallas::Scalar::random(&mut OsRng),
pallas::Base::random(&mut OsRng),
pallas::Scalar::random(&mut OsRng),
pallas::Base::from(62),
)?;
let (genesis_stake_params, genesis_stake_proofs) =
(genesis_stake_call_debris.params, genesis_stake_call_debris.proofs);
// Building genesis stake tx
let mut data = vec![ConsensusFunction::GenesisStakeV1 as u8];
genesis_stake_params.encode(&mut data)?;
let contract_call = ContractCall { contract_id: *CONSENSUS_CONTRACT_ID, data };
let mut genesis_stake_tx_builder = TransactionBuilder::new(
ContractCallLeaf { call: contract_call, proofs: genesis_stake_proofs },
vec![],
)?;
let mut genesis_stake_tx = genesis_stake_tx_builder.build()?;
let sigs = genesis_stake_tx.create_sigs(&mut OsRng, &[wallet.keypair.secret])?;
genesis_stake_tx.signatures = vec![sigs];
tx_action_benchmark.creation_times.push(timer.elapsed());
// Calculate transaction sizes
let encoded: Vec<u8> = serialize(&genesis_stake_tx);
let size = std::mem::size_of_val(&*encoded);
tx_action_benchmark.sizes.push(size);
let base58 = bs58::encode(&encoded).into_string();
let size = std::mem::size_of_val(&*base58);
tx_action_benchmark.broadcasted_sizes.push(size);
Ok((genesis_stake_tx, genesis_stake_params))
}
pub async fn execute_genesis_stake_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
params: &ConsensusGenesisStakeParamsV1,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusGenesisStake).unwrap();
let timer = Instant::now();
wallet.validator.add_transactions(&[tx.clone()], slot, true).await?;
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
}

View File

@@ -1,179 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::time::Instant;
use darkfi::{
tx::{ContractCallLeaf, Transaction, TransactionBuilder},
Result,
};
use darkfi_consensus_contract::{
client::proposal_v1::ConsensusProposalCallBuilder,
model::{ConsensusProposalParamsV1, REWARD},
ConsensusFunction,
};
use darkfi_money_contract::{client::ConsensusOwnCoin, CONSENSUS_CONTRACT_ZKAS_PROPOSAL_NS_V1};
use darkfi_sdk::{
blockchain::Slot,
crypto::{MerkleNode, SecretKey, CONSENSUS_CONTRACT_ID},
ContractCall,
};
use darkfi_serial::{serialize, Encodable};
use log::info;
use rand::rngs::OsRng;
use super::{Holder, TestHarness, TxAction};
impl TestHarness {
pub async fn proposal(
&mut self,
holder: &Holder,
slot: Slot,
staked_oc: &ConsensusOwnCoin,
) -> Result<(Transaction, ConsensusProposalParamsV1, SecretKey, SecretKey)> {
let wallet = self.holders.get(holder).unwrap();
let (proposal_pk, proposal_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_PROPOSAL_NS_V1.to_string()).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusProposal).unwrap();
let timer = Instant::now();
// Proposals always extend genesis block
let fork_hash = self.genesis_block.hash()?;
// Building Consensus::Propose params
let proposal_call_debris = ConsensusProposalCallBuilder {
owncoin: staked_oc.clone(),
slot,
fork_hash,
fork_previous_hash: fork_hash,
merkle_tree: wallet.consensus_staked_merkle_tree.clone(),
proposal_zkbin: proposal_zkbin.clone(),
proposal_pk: proposal_pk.clone(),
}
.build()?;
let (params, proofs, output_keypair, signature_secret_key) = (
proposal_call_debris.params,
proposal_call_debris.proofs,
proposal_call_debris.keypair,
proposal_call_debris.signature_secret,
);
let mut data = vec![ConsensusFunction::ProposalV1 as u8];
params.encode(&mut data)?;
let call = ContractCall { contract_id: *CONSENSUS_CONTRACT_ID, data };
let mut tx_builder = TransactionBuilder::new(ContractCallLeaf { call, proofs }, vec![])?;
let mut tx = tx_builder.build()?;
let sigs = tx.create_sigs(&mut OsRng, &[signature_secret_key])?;
tx.signatures = vec![sigs];
tx_action_benchmark.creation_times.push(timer.elapsed());
// Calculate transaction sizes
let encoded: Vec<u8> = serialize(&tx);
let size = std::mem::size_of_val(&*encoded);
tx_action_benchmark.sizes.push(size);
let base58 = bs58::encode(&encoded).into_string();
let size = std::mem::size_of_val(&*base58);
tx_action_benchmark.broadcasted_sizes.push(size);
Ok((tx, params, signature_secret_key, output_keypair.secret))
}
pub async fn execute_proposal_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
params: &ConsensusProposalParamsV1,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusProposal).unwrap();
let timer = Instant::now();
wallet.validator.add_test_producer_transaction(tx, slot, 2, true).await?;
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
// Execute a proposal transaction and gather rewarded coin
pub async fn execute_proposal(
&mut self,
holders: &[Holder],
holder: &Holder,
current_slot: u64,
slot: Slot,
staked_oc: &ConsensusOwnCoin,
) -> Result<ConsensusOwnCoin> {
info!(target: "consensus", "[{holder:?}] ====================");
info!(target: "consensus", "[{holder:?}] Building proposal tx");
info!(target: "consensus", "[{holder:?}] ====================");
let (
proposal_tx,
proposal_params,
_proposal_signing_secret_key,
proposal_decryption_secret_key,
) = self.proposal(holder, slot, staked_oc).await?;
for h in holders {
info!(target: "consensus", "[{h:?}] ================================");
info!(target: "consensus", "[{h:?}] Executing {holder:?} proposal tx");
info!(target: "consensus", "[{h:?}] ================================");
self.execute_proposal_tx(h, &proposal_tx, &proposal_params, current_slot).await?;
}
self.assert_trees(holders);
// Gather new staked owncoin which includes the reward
let rewarded_staked_oc = self.gather_consensus_staked_owncoin(
holder,
&proposal_params.output,
Some(proposal_decryption_secret_key),
)?;
// Verify values match
assert!((staked_oc.note.value + REWARD) == rewarded_staked_oc.note.value);
Ok(rewarded_staked_oc)
}
pub async fn execute_erroneous_proposal_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusProposal).unwrap();
let timer = Instant::now();
assert!(wallet.validator.add_test_producer_transaction(tx, slot, 2, true).await.is_err());
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
}

View File

@@ -1,195 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::time::Instant;
use darkfi::{
tx::{ContractCallLeaf, Transaction, TransactionBuilder},
Result,
};
use darkfi_consensus_contract::{client::stake_v1::ConsensusStakeCallBuilder, ConsensusFunction};
use darkfi_money_contract::{
client::{stake_v1::MoneyStakeCallBuilder, ConsensusOwnCoin, OwnCoin},
model::ConsensusStakeParamsV1,
MoneyFunction, CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1, MONEY_CONTRACT_ZKAS_BURN_NS_V1,
};
use darkfi_sdk::{
crypto::{MerkleNode, SecretKey, CONSENSUS_CONTRACT_ID, MONEY_CONTRACT_ID},
dark_tree::DarkTree,
pasta::pallas,
ContractCall,
};
use darkfi_serial::{serialize, Encodable};
use log::info;
use rand::rngs::OsRng;
use super::{Holder, TestHarness, TxAction};
impl TestHarness {
pub async fn stake(
&mut self,
holder: &Holder,
slot: u64,
owncoin: &OwnCoin,
serial: pallas::Base,
) -> Result<(Transaction, ConsensusStakeParamsV1, SecretKey)> {
let wallet = self.holders.get(holder).unwrap();
let (mint_pk, mint_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string()).unwrap();
let (burn_pk, burn_zkbin) =
self.proving_keys.get(&MONEY_CONTRACT_ZKAS_BURN_NS_V1.to_string()).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusStake).unwrap();
let epoch = wallet.validator.consensus.time_keeper.slot_epoch(slot);
let timer = Instant::now();
// Building Money::Stake params
let money_stake_call_debris = MoneyStakeCallBuilder {
coin: owncoin.clone(),
tree: wallet.money_merkle_tree.clone(),
burn_zkbin: burn_zkbin.clone(),
burn_pk: burn_pk.clone(),
}
.build()?;
let (
money_stake_params,
money_stake_proofs,
money_stake_secret_key,
money_stake_value_blind,
) = (
money_stake_call_debris.params,
money_stake_call_debris.proofs,
money_stake_call_debris.signature_secret,
money_stake_call_debris.value_blind,
);
// Building Consensus::Stake params
let consensus_stake_call_debris = ConsensusStakeCallBuilder {
coin: owncoin.clone(),
epoch,
value_blind: money_stake_value_blind,
money_input: money_stake_params.input.clone(),
mint_zkbin: mint_zkbin.clone(),
mint_pk: mint_pk.clone(),
}
.build_with_params(serial)?;
let (consensus_stake_params, consensus_stake_proofs, consensus_stake_secret_key) = (
consensus_stake_call_debris.params,
consensus_stake_call_debris.proofs,
consensus_stake_call_debris.signature_secret,
);
// Building stake tx
let mut data = vec![MoneyFunction::StakeV1 as u8];
money_stake_params.encode(&mut data)?;
let money_call = ContractCall { contract_id: *MONEY_CONTRACT_ID, data };
let mut data = vec![ConsensusFunction::StakeV1 as u8];
consensus_stake_params.encode(&mut data)?;
let consensus_call = ContractCall { contract_id: *CONSENSUS_CONTRACT_ID, data };
let mut stake_tx_builder = TransactionBuilder::new(
ContractCallLeaf { call: consensus_call, proofs: consensus_stake_proofs },
vec![DarkTree::new(
ContractCallLeaf { call: money_call, proofs: money_stake_proofs },
vec![],
None,
None,
)],
)?;
let mut stake_tx = stake_tx_builder.build()?;
let money_sigs = stake_tx.create_sigs(&mut OsRng, &[money_stake_secret_key])?;
let consensus_sigs = stake_tx.create_sigs(&mut OsRng, &[consensus_stake_secret_key])?;
stake_tx.signatures = vec![money_sigs, consensus_sigs];
tx_action_benchmark.creation_times.push(timer.elapsed());
// Calculate transaction sizes
let encoded: Vec<u8> = serialize(&stake_tx);
let size = std::mem::size_of_val(&*encoded);
tx_action_benchmark.sizes.push(size);
let base58 = bs58::encode(&encoded).into_string();
let size = std::mem::size_of_val(&*base58);
tx_action_benchmark.broadcasted_sizes.push(size);
Ok((stake_tx, consensus_stake_params, consensus_stake_secret_key))
}
pub async fn execute_stake_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
params: &ConsensusStakeParamsV1,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusStake).unwrap();
let timer = Instant::now();
wallet.validator.add_transactions(&[tx.clone()], slot, true).await?;
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
// Execute a stake transaction and gather the coin
pub async fn execute_stake(
&mut self,
holders: &[Holder],
holder: &Holder,
current_slot: u64,
oc: &OwnCoin,
serial: u64,
) -> Result<ConsensusOwnCoin> {
info!(target: "consensus", "[{holder:?}] =================");
info!(target: "consensus", "[{holder:?}] Building stake tx");
info!(target: "consensus", "[{holder:?}] =================");
let (stake_tx, stake_params, stake_secret_key) =
self.stake(holder, current_slot, oc, pallas::Base::from(serial)).await?;
for h in holders {
info!(target: "consensus", "[{h:?}] =============================");
info!(target: "consensus", "[{h:?}] Executing {holder:?} stake tx");
info!(target: "consensus", "[{h:?}] =============================");
self.execute_stake_tx(h, &stake_tx, &stake_params, current_slot).await?;
}
self.assert_trees(holders);
// Gather new staked owncoin
let staked_oc = self.gather_consensus_staked_owncoin(
holder,
&stake_params.output,
Some(stake_secret_key),
)?;
// Verify values match
assert!(oc.note.value == staked_oc.note.value);
Ok(staked_oc)
}
}

View File

@@ -1,181 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::time::Instant;
use darkfi::{
tx::{ContractCallLeaf, Transaction, TransactionBuilder},
Result,
};
use darkfi_consensus_contract::{
client::unstake_v1::ConsensusUnstakeCallBuilder, ConsensusFunction,
};
use darkfi_money_contract::{
client::{unstake_v1::MoneyUnstakeCallBuilder, ConsensusOwnCoin, OwnCoin},
model::MoneyUnstakeParamsV1,
MoneyFunction, CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1, MONEY_CONTRACT_ZKAS_MINT_NS_V1,
};
use darkfi_sdk::{
crypto::{MerkleNode, SecretKey, CONSENSUS_CONTRACT_ID, MONEY_CONTRACT_ID},
dark_tree::DarkTree,
ContractCall,
};
use darkfi_serial::{serialize, Encodable};
use log::info;
use rand::rngs::OsRng;
use super::{Holder, TestHarness, TxAction};
impl TestHarness {
pub fn unstake(
&mut self,
holder: &Holder,
staked_oc: &ConsensusOwnCoin,
) -> Result<(Transaction, MoneyUnstakeParamsV1, SecretKey)> {
let wallet = self.holders.get(holder).unwrap();
let (burn_pk, burn_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1.to_string()).unwrap();
let (mint_pk, mint_zkbin) =
self.proving_keys.get(&MONEY_CONTRACT_ZKAS_MINT_NS_V1.to_string()).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstake).unwrap();
let timer = Instant::now();
// Building Consensus::Unstake params
let consensus_unstake_call_debris = ConsensusUnstakeCallBuilder {
owncoin: staked_oc.clone(),
tree: wallet.consensus_unstaked_merkle_tree.clone(),
burn_zkbin: burn_zkbin.clone(),
burn_pk: burn_pk.clone(),
}
.build()?;
let (
consensus_unstake_params,
consensus_unstake_proofs,
consensus_unstake_secret_key,
consensus_unstake_value_blind,
) = (
consensus_unstake_call_debris.params,
consensus_unstake_call_debris.proofs,
consensus_unstake_call_debris.signature_secret,
consensus_unstake_call_debris.value_blind,
);
// Building Money::Unstake params
let money_unstake_call_debris = MoneyUnstakeCallBuilder {
owncoin: staked_oc.clone(),
recipient: self.holders.get(holder).unwrap().keypair.public,
value_blind: consensus_unstake_value_blind,
nullifier: consensus_unstake_params.input.nullifier,
merkle_root: consensus_unstake_params.input.merkle_root,
signature_public: consensus_unstake_params.input.signature_public,
mint_zkbin: mint_zkbin.clone(),
mint_pk: mint_pk.clone(),
}
.build()?;
let (money_unstake_params, money_unstake_proofs) =
(money_unstake_call_debris.params, money_unstake_call_debris.proofs);
// Building unstake tx
let mut data = vec![ConsensusFunction::UnstakeV1 as u8];
consensus_unstake_params.encode(&mut data)?;
let consensus_call = ContractCall { contract_id: *CONSENSUS_CONTRACT_ID, data };
let mut data = vec![MoneyFunction::UnstakeV1 as u8];
money_unstake_params.encode(&mut data)?;
let money_call = ContractCall { contract_id: *MONEY_CONTRACT_ID, data };
let mut unstake_tx_builder = TransactionBuilder::new(
ContractCallLeaf { call: money_call, proofs: money_unstake_proofs },
vec![DarkTree::new(
ContractCallLeaf { call: consensus_call, proofs: consensus_unstake_proofs },
vec![],
None,
None,
)],
)?;
let mut unstake_tx = unstake_tx_builder.build()?;
let consensus_sigs = unstake_tx.create_sigs(&mut OsRng, &[consensus_unstake_secret_key])?;
let money_sigs = unstake_tx.create_sigs(&mut OsRng, &[consensus_unstake_secret_key])?;
unstake_tx.signatures = vec![consensus_sigs, money_sigs];
tx_action_benchmark.creation_times.push(timer.elapsed());
// Calculate transaction sizes
let encoded: Vec<u8> = serialize(&unstake_tx);
let size = ::std::mem::size_of_val(&*encoded);
tx_action_benchmark.sizes.push(size);
let base58 = bs58::encode(&encoded).into_string();
let size = ::std::mem::size_of_val(&*base58);
tx_action_benchmark.broadcasted_sizes.push(size);
Ok((unstake_tx, money_unstake_params, consensus_unstake_secret_key))
}
pub async fn execute_unstake_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
params: &MoneyUnstakeParamsV1,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstake).unwrap();
let timer = Instant::now();
wallet.validator.add_transactions(&[tx.clone()], slot, true).await?;
wallet.money_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
// Execute an unstake transaction and gather unstaked coin
pub async fn execute_unstake(
&mut self,
holders: &[Holder],
holder: &Holder,
current_slot: u64,
unstake_request_oc: &ConsensusOwnCoin,
) -> Result<OwnCoin> {
info!(target: "consensus", "[{holder:?}] ===================");
info!(target: "consensus", "[{holder:?}] Building unstake tx");
info!(target: "consensus", "[{holder:?}] ===================");
let (unstake_tx, unstake_params, _) = self.unstake(holder, unstake_request_oc)?;
for h in holders {
info!(target: "consensus", "[{h:?}] ===============================");
info!(target: "consensus", "[{h:?}] Executing {holder:?} unstake tx");
info!(target: "consensus", "[{h:?}] ===============================");
self.execute_unstake_tx(h, &unstake_tx, &unstake_params, current_slot).await?;
}
self.assert_trees(holders);
// Gather new unstaked owncoin
let unstaked_oc = self.gather_owncoin(holder, &unstake_params.output, None)?;
// Verify values match
assert!(unstake_request_oc.note.value == unstaked_oc.note.value);
Ok(unstaked_oc)
}
}

View File

@@ -1,182 +0,0 @@
/* This file is part of DarkFi (https://dark.fi)
*
* Copyright (C) 2020-2024 Dyne.org foundation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
use std::time::Instant;
use darkfi::{
tx::{ContractCallLeaf, Transaction, TransactionBuilder},
Result,
};
use darkfi_consensus_contract::{
client::unstake_request_v1::ConsensusUnstakeRequestCallBuilder, ConsensusFunction,
};
use darkfi_money_contract::{
client::ConsensusOwnCoin, model::ConsensusUnstakeReqParamsV1,
CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1, CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1,
};
use darkfi_sdk::{
crypto::{MerkleNode, SecretKey, CONSENSUS_CONTRACT_ID},
ContractCall,
};
use darkfi_serial::{serialize, Encodable};
use log::info;
use rand::rngs::OsRng;
use super::{Holder, TestHarness, TxAction};
impl TestHarness {
pub async fn unstake_request(
&mut self,
holder: &Holder,
slot: u64,
staked_oc: &ConsensusOwnCoin,
) -> Result<(Transaction, ConsensusUnstakeReqParamsV1, SecretKey, SecretKey)> {
let wallet = self.holders.get(holder).unwrap();
let (burn_pk, burn_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_BURN_NS_V1.to_string()).unwrap();
let (mint_pk, mint_zkbin) =
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1.to_string()).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstakeRequest).unwrap();
let epoch = wallet.validator.consensus.time_keeper.slot_epoch(slot);
let timer = Instant::now();
// Building Consensus::Unstake params
let unstake_request_call_debris = ConsensusUnstakeRequestCallBuilder {
owncoin: staked_oc.clone(),
epoch,
tree: wallet.consensus_staked_merkle_tree.clone(),
burn_zkbin: burn_zkbin.clone(),
burn_pk: burn_pk.clone(),
mint_zkbin: mint_zkbin.clone(),
mint_pk: mint_pk.clone(),
}
.build()?;
let (
unstake_request_params,
unstake_request_proofs,
unstake_request_output_keypair,
unstake_request_signature_secret_key,
) = (
unstake_request_call_debris.params,
unstake_request_call_debris.proofs,
unstake_request_call_debris.keypair,
unstake_request_call_debris.signature_secret,
);
// Building unstake request tx
let mut data = vec![ConsensusFunction::UnstakeRequestV1 as u8];
unstake_request_params.encode(&mut data)?;
let call = ContractCall { contract_id: *CONSENSUS_CONTRACT_ID, data };
let mut unstake_request_builder = TransactionBuilder::new(
ContractCallLeaf { call, proofs: unstake_request_proofs },
vec![],
)?;
let mut unstake_request_tx = unstake_request_builder.build()?;
let sigs =
unstake_request_tx.create_sigs(&mut OsRng, &[unstake_request_signature_secret_key])?;
unstake_request_tx.signatures = vec![sigs];
tx_action_benchmark.creation_times.push(timer.elapsed());
// Calculate transaction sizes
let encoded: Vec<u8> = serialize(&unstake_request_tx);
let size = std::mem::size_of_val(&*encoded);
tx_action_benchmark.sizes.push(size);
let base58 = bs58::encode(&encoded).into_string();
let size = std::mem::size_of_val(&*base58);
tx_action_benchmark.broadcasted_sizes.push(size);
Ok((
unstake_request_tx,
unstake_request_params,
unstake_request_output_keypair.secret,
unstake_request_signature_secret_key,
))
}
pub async fn execute_unstake_request_tx(
&mut self,
holder: &Holder,
tx: &Transaction,
params: &ConsensusUnstakeReqParamsV1,
slot: u64,
) -> Result<()> {
let wallet = self.holders.get_mut(holder).unwrap();
let tx_action_benchmark =
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstakeRequest).unwrap();
let timer = Instant::now();
wallet.validator.add_transactions(&[tx.clone()], slot, true).await?;
wallet.consensus_unstaked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
tx_action_benchmark.verify_times.push(timer.elapsed());
Ok(())
}
// Execute an unstake request transaction and gather requested unstaked coin
pub async fn execute_unstake_request(
&mut self,
holders: &[Holder],
holder: &Holder,
current_slot: u64,
rewarded_staked_oc: &ConsensusOwnCoin,
) -> Result<ConsensusOwnCoin> {
info!(target: "consensus", "[{holder:?}] ===========================");
info!(target: "consensus", "[{holder:?}] Building unstake request tx");
info!(target: "consensus", "[{holder:?}] ===========================");
let (
unstake_request_tx,
unstake_request_params,
unstake_request_output_secret_key,
_unstake_request_signature_secret_key,
) = self.unstake_request(holder, current_slot, rewarded_staked_oc).await?;
for h in holders {
info!(target: "consensus", "[{h:?}] ==================================");
info!(target: "consensus", "[{h:?}] Executing {holder:?} unstake request tx");
info!(target: "consensus", "[{h:?}] ==================================");
self.execute_unstake_request_tx(
h,
&unstake_request_tx,
&unstake_request_params,
current_slot,
)
.await?;
}
self.assert_trees(holders);
// Gather new unstake request owncoin
let unstake_request_oc = self.gather_consensus_unstaked_owncoin(
holder,
&unstake_request_params.output,
Some(unstake_request_output_secret_key),
)?;
// Verify values match
assert!(rewarded_staked_oc.note.value == unstake_request_oc.note.value);
Ok(unstake_request_oc)
}
}

Some files were not shown because too many files have changed in this diff Show More