Merge branch 'master' of github.com:darkrenaissance/darkfi

This commit is contained in:
lunar-mining
2022-04-27 09:30:33 +02:00
34 changed files with 952 additions and 398 deletions

View File

@@ -57,7 +57,7 @@ jobs:
- name: Install dependencies (Linux)
run: |
sudo apt update
sudo apt -y install build-essential clang libclang-dev llvm-dev libudev-dev pkg-config
sudo apt -y install jq build-essential clang libclang-dev llvm-dev libudev-dev pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install dependencies (macOS)

7
Cargo.lock generated
View File

@@ -1188,6 +1188,7 @@ dependencies = [
"async-executor",
"async-std",
"async-trait",
"blake3",
"chrono",
"ctrlc-async",
"darkfi",
@@ -1970,8 +1971,7 @@ dependencies = [
[[package]]
name = "halo2_gadgets"
version = "0.1.0-beta.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7524b798b8b3689a198cd87ee1d22fe3ca007a51d35c4093f32d75c0efc30abe"
source = "git+https://github.com/parazyd/halo2?branch=clone-impls-keys#a6d7785ddc86e897b917d8450055d0d6b2494b5d"
dependencies = [
"arrayvec 0.7.2",
"bitvec 0.22.3",
@@ -1990,8 +1990,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.1.0-beta.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0240b05b791cccfd6451b010b19711280e63b87f495bd84df0103f35c9139e7"
source = "git+https://github.com/parazyd/halo2?branch=clone-impls-keys#a6d7785ddc86e897b917d8450055d0d6b2494b5d"
dependencies = [
"backtrace",
"blake2b_simd 1.0.0",

View File

@@ -114,8 +114,11 @@ blake2b_simd = {version = "1.0.0", optional = true}
pasta_curves = {version = "0.3.1", optional = true}
crypto_api_chachapoly = {version = "0.5.0", optional = true}
incrementalmerkletree = {version = "0.3.0-beta.2", optional = true}
halo2_proofs = {version = "0.1.0-beta.4", features = ["dev-graph", "gadget-traces", "sanity-checks"], optional = true}
halo2_gadgets = {version = "0.1.0-beta.3", features = ["dev-graph", "test-dependencies"], optional = true}
#halo2_proofs = {version = "0.1.0-beta.4", features = ["dev-graph", "gadget-traces", "sanity-checks"], optional = true}
#halo2_gadgets = {version = "0.1.0-beta.3", features = ["dev-graph", "test-dependencies"], optional = true}
halo2_proofs = {git = "https://github.com/parazyd/halo2", branch = "clone-impls-keys", features = ["dev-graph", "gadget-traces", "sanity-checks"], optional = true}
halo2_gadgets = {git = "https://github.com/parazyd/halo2", branch = "clone-impls-keys", features = ["dev-graph", "test-dependencies"], optional = true}
# Smart contract runtime
drk-sdk = {path = "src/sdk", optional = true}
@@ -191,6 +194,7 @@ blockchain = [
"crypto",
"tx",
"net",
"node",
"util",
]

View File

@@ -30,13 +30,13 @@ $(BINS): token_lists $(BINDEPS)
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) build --all-features --release --package $@
cp -f target/release/$@ $@
check:
check: token_lists
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) hack check --release --feature-powerset --all
fix:
fix: token_lists
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clippy --release --all-features --fix --allow-dirty --all
clippy:
clippy: token_lists
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) clippy --release --all-features --all
# zkas source files which we want to compile for tests
@@ -46,7 +46,7 @@ VM_BIN = $(VM_SRC:=.bin)
$(VM_BIN): zkas $(VM_SRC)
./zkas $(basename $@) -o $@
test: $(VM_BIN) test-tx
test: token_lists $(VM_BIN) test-tx
RUSTFLAGS="$(RUSTFLAGS)" $(CARGO) test --release --all-features --all
test-tx:

View File

@@ -13,6 +13,7 @@ async-channel = "1.6.1"
async-executor = "1.4.1"
async-std = "1.11.0"
async-trait = "0.1.53"
blake3 = "1.3.1"
chrono = "0.4.19"
ctrlc-async = {version = "3.2.2", default-features = false, features = ["async-std", "termination"]}
darkfi = {path = "../../", features = ["blockchain", "wallet", "rpc", "net", "node"]}

View File

@@ -13,6 +13,11 @@ pub enum RpcError {
KeypairNotFound = -32105,
InvalidKeypair = -32106,
UnknownSlot = -32107,
TxBuildFail = -32108,
NetworkNameError = -32109,
ParseError = -32110,
TxBroadcastFail = -32111,
NotYetSynced = -32112,
}
fn to_tuple(e: RpcError) -> (i64, String) {
@@ -24,6 +29,11 @@ fn to_tuple(e: RpcError) -> (i64, String) {
RpcError::KeypairNotFound => "Keypair not found",
RpcError::InvalidKeypair => "Invalid keypair",
RpcError::UnknownSlot => "Did not find slot",
RpcError::TxBuildFail => "Failed building transaction",
RpcError::NetworkNameError => "Unknown network name",
RpcError::ParseError => "Parse error",
RpcError::TxBroadcastFail => "Failed broadcasting transaction",
RpcError::NotYetSynced => "Blockchain not yet synced",
};
(e as i64, msg.to_string())

View File

@@ -1,11 +1,10 @@
use std::net::SocketAddr;
use std::{net::SocketAddr, str::FromStr};
use async_executor::Executor;
use async_std::sync::{Arc, Mutex};
use async_trait::async_trait;
use easy_parallel::Parallel;
use futures_lite::future;
use lazy_init::Lazy;
use log::{error, info};
use serde_derive::Deserialize;
use simplelog::{ColorChoice, TermLogger, TerminalMode};
@@ -14,9 +13,7 @@ use structopt_toml::StructOptToml;
use url::Url;
use darkfi::{
async_daemonize,
blockchain::{NullifierStore, RootStore},
cli_desc,
async_daemonize, cli_desc,
consensus::{
proto::{
ProtocolParticipant, ProtocolProposal, ProtocolSync, ProtocolSyncConsensus, ProtocolTx,
@@ -27,10 +24,14 @@ use darkfi::{
util::Timestamp,
ValidatorState, MAINNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_HASH_BYTES,
},
crypto::token_list::{DrkTokenList, TokenList},
crypto::{
address::Address,
keypair::PublicKey,
token_list::{DrkTokenList, TokenList},
},
net,
net::P2pPtr,
node::{Client, State},
node::Client,
rpc::{
jsonrpc,
jsonrpc::{
@@ -44,7 +45,7 @@ use darkfi::{
expand_path,
path::get_config_path,
},
wallet::walletdb::{init_wallet, WalletPtr},
wallet::walletdb::init_wallet,
Error, Result,
};
@@ -126,6 +127,14 @@ struct Args {
/// Connect to seed for the syncing protocol (repeatable flag)
sync_seed: Vec<SocketAddr>,
#[structopt(long)]
/// Whitelisted cashier address (repeatable flag)
cashier_pub: Vec<String>,
#[structopt(long)]
/// Whitelisted fauced address (repeatable flag)
faucet_pub: Vec<String>,
#[structopt(short, parse(from_occurrences))]
/// Increase verbosity (-vvv supported)
verbose: u8,
@@ -137,11 +146,10 @@ struct Args {
pub struct Darkfid {
synced: Mutex<bool>, // AtomicBool is weird in Arc
client: Client,
consensus_p2p: Option<P2pPtr>,
sync_p2p: Option<P2pPtr>,
client: Arc<Client>,
validator_state: ValidatorStatePtr,
state: Arc<Mutex<State>>,
drk_tokenlist: DrkTokenList,
btc_tokenlist: TokenList,
eth_tokenlist: TokenList,
@@ -151,6 +159,7 @@ pub struct Darkfid {
// JSON-RPC methods
mod rpc_blockchain;
mod rpc_misc;
mod rpc_tx;
mod rpc_wallet;
#[async_trait]
@@ -165,6 +174,7 @@ impl RequestHandler for Darkfid {
match req.method.as_str() {
Some("ping") => return self.pong(req.id, params).await,
Some("blockchain.get_slot") => return self.get_slot(req.id, params).await,
Some("tx.transfer") => return self.transfer(req.id, params).await,
Some("wallet.keygen") => return self.keygen(req.id, params).await,
Some("wallet.get_key") => return self.get_key(req.id, params).await,
Some("wallet.export_keypair") => return self.export_keypair(req.id, params).await,
@@ -180,8 +190,6 @@ impl RequestHandler for Darkfid {
impl Darkfid {
pub async fn new(
db: &sled::Db,
wallet: WalletPtr,
validator_state: ValidatorStatePtr,
consensus_p2p: Option<P2pPtr>,
sync_p2p: Option<P2pPtr>,
@@ -195,30 +203,14 @@ impl Darkfid {
TokenList::new(include_bytes!("../../../contrib/token/solana_token_list.min.json"))?;
let drk_tokenlist = DrkTokenList::new(&sol_tokenlist, &eth_tokenlist, &btc_tokenlist)?;
// Initialize Client
let client = Client::new(wallet).await?;
let tree = client.get_tree().await?;
let merkle_roots = RootStore::new(db)?;
let nullifiers = NullifierStore::new(db)?;
// Initialize State
let state = Arc::new(Mutex::new(State {
tree,
merkle_roots,
nullifiers,
cashier_pubkeys: vec![],
faucet_pubkeys: vec![],
mint_vk: Lazy::new(),
burn_vk: Lazy::new(),
}));
let client = validator_state.read().await.client.clone();
Ok(Self {
synced: Mutex::new(false),
client,
consensus_p2p,
sync_p2p,
client,
validator_state,
state,
drk_tokenlist,
btc_tokenlist,
eth_tokenlist,
@@ -258,11 +250,35 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
};
// TODO: sqldb init cleanup
Client::new(wallet.clone()).await?;
let address = wallet.get_default_address().await?;
// Initialize Client
let client = Arc::new(Client::new(wallet).await?);
// Parse cashier addresses
let mut cashier_pubkeys = vec![];
for i in args.cashier_pub {
let addr = Address::from_str(&i)?;
let pk = PublicKey::try_from(addr)?;
cashier_pubkeys.push(pk);
}
// Parse fauced addresses
let mut faucet_pubkeys = vec![];
for i in args.faucet_pub {
let addr = Address::from_str(&i)?;
let pk = PublicKey::try_from(addr)?;
faucet_pubkeys.push(pk);
}
// Initialize validator state
let state = ValidatorState::new(&sled_db, address, genesis_ts, genesis_data)?;
let state = ValidatorState::new(
&sled_db,
genesis_ts,
genesis_data,
client,
cashier_pubkeys,
faucet_pubkeys,
)
.await?;
let sync_p2p = {
info!("Registering block sync P2P protocols...");
@@ -357,9 +373,7 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
};
// Initialize program state
let darkfid =
Darkfid::new(&sled_db, wallet, state.clone(), consensus_p2p.clone(), sync_p2p.clone())
.await?;
let darkfid = Darkfid::new(state.clone(), consensus_p2p.clone(), sync_p2p.clone()).await?;
let darkfid = Arc::new(darkfid);
// JSON-RPC server
@@ -383,7 +397,7 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
}
// Consensus protocol
if args.consensus {
if args.consensus && *darkfid.synced.lock().await {
info!("Starting consensus P2P network");
consensus_p2p.clone().unwrap().start(ex.clone()).await?;
let _ex = ex.clone();
@@ -397,6 +411,8 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
info!("Starting consensus protocol task");
ex.spawn(proposal_task(consensus_p2p.unwrap(), state)).detach();
} else {
info!("Not starting consensus P2P network");
}
// Wait for SIGINT

132
bin/darkfid2/src/rpc_tx.rs Normal file
View File

@@ -0,0 +1,132 @@
use std::str::FromStr;
use log::{error, warn};
use serde_json::{json, Value};
use darkfi::{
consensus::Tx,
crypto::{address::Address, keypair::PublicKey},
rpc::{
jsonrpc,
jsonrpc::{
ErrorCode::{
InternalError, InvalidAddressParam, InvalidAmountParam, InvalidParams,
InvalidTokenIdParam,
},
JsonResult,
},
},
util::{decode_base10, serial::serialize, NetworkName},
};
use super::Darkfid;
use crate::{server_error, RpcError};
impl Darkfid {
// RPCAPI:
// Transfer a given amount of some token to the given address.
// Returns a transaction ID upon success.
// --> {"jsonrpc": "2.0", "method": "tx.transfer", "params": ["darkfi" "gdrk", "1DarkFi...", 12.0], "id": 1}
// <-- {"jsonrpc": "2.0", "result": "txID...", "id": 1}
pub async fn transfer(&self, id: Value, params: &[Value]) -> JsonResult {
if params.len() != 4 ||
!params[0].is_string() ||
!params[1].is_string() ||
!params[2].is_string() ||
!params[3].is_f64()
{
return jsonrpc::error(InvalidParams, None, id).into()
}
let network = params[0].as_str().unwrap();
let token = params[1].as_str().unwrap();
let address = params[2].as_str().unwrap();
let amount = params[3].as_f64().unwrap();
if *self.synced.lock().await == false {
error!("transfer(): Blockchain is not yet synced");
return server_error(RpcError::NotYetSynced, id)
}
let address = match Address::from_str(address) {
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed parsing address from string: {}", e);
return jsonrpc::error(InvalidAddressParam, None, id).into()
}
};
let pubkey = match PublicKey::try_from(address) {
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed parsing PublicKey from Address: {}", e);
return server_error(RpcError::ParseError, id)
}
};
let amount = amount.to_string();
let amount = match decode_base10(&amount, 8, true) {
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed parsing amount from string: {}", e);
return jsonrpc::error(InvalidAmountParam, None, id).into()
}
};
let amount: u64 = match amount.try_into() {
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed converting biguint to u64: {}", e);
return jsonrpc::error(InternalError, None, id).into()
}
};
let network = match NetworkName::from_str(network) {
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed parsing NetworkName: {}", e);
return server_error(RpcError::NetworkNameError, id)
}
};
let token_id = if let Some(token_id) =
self.drk_tokenlist.tokens[&network].get(&token.to_uppercase())
{
token_id
} else {
return jsonrpc::error(InvalidTokenIdParam, None, id).into()
};
let tx = match self
.client
.build_transaction(
pubkey,
amount,
*token_id,
false,
self.validator_state.read().await.state_machine.clone(),
)
.await
{
Ok(v) => v,
Err(e) => {
error!("transfer(): Failed building transaction: {}", e);
return server_error(RpcError::TxBuildFail, id)
}
};
if let Some(sync_p2p) = &self.sync_p2p {
match sync_p2p.broadcast(Tx(tx.clone())).await {
Ok(()) => {}
Err(e) => {
error!("transfer(): Failed broadcasting transaction: {}", e);
return server_error(RpcError::TxBroadcastFail, id)
}
}
} else {
warn!("No sync P2P network, not broadcasting transaction.");
}
let tx_hash = blake3::hash(&serialize(&tx)).to_hex().as_str().to_string();
jsonrpc::response(json!(tx_hash), id).into()
}
}

View File

@@ -6,7 +6,6 @@ use async_trait::async_trait;
use chrono::Utc;
use easy_parallel::Parallel;
use futures_lite::future;
use lazy_init::Lazy;
use log::{debug, error, info};
use num_bigint::BigUint;
use serde_derive::Deserialize;
@@ -17,18 +16,17 @@ use structopt_toml::StructOptToml;
use url::Url;
use darkfi::{
async_daemonize,
blockchain::{NullifierStore, RootStore},
cli_desc,
async_daemonize, cli_desc,
consensus::{
proto::{ProtocolSync, ProtocolTx},
task::block_sync_task,
Timestamp, Tx, ValidatorState, MAINNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_HASH_BYTES,
Timestamp, Tx, ValidatorState, ValidatorStatePtr, MAINNET_GENESIS_HASH_BYTES,
TESTNET_GENESIS_HASH_BYTES,
},
crypto::{keypair::PublicKey, types::DrkTokenId},
crypto::{address::Address, keypair::PublicKey, types::DrkTokenId},
net,
net::P2pPtr,
node::{Client, State},
node::Client,
rpc::{
jsonrpc,
jsonrpc::{
@@ -44,7 +42,7 @@ use darkfi::{
serial::serialize,
sleep,
},
wallet::walletdb::{init_wallet, WalletPtr},
wallet::walletdb::init_wallet,
Error, Result,
};
@@ -102,6 +100,14 @@ struct Args {
/// Connect to peer for the syncing protocol (repeatable flag)
sync_peer: Vec<SocketAddr>,
#[structopt(long)]
/// Whitelisted cashier address (repeatable flag)
cashier_pub: Vec<String>,
#[structopt(long)]
/// Whitelisted faucet address (repeatable flag)
faucet_pub: Vec<String>,
#[structopt(long, default_value = "600")]
/// Airdrop timeout limit in seconds
airdrop_timeout: i64,
@@ -120,13 +126,13 @@ struct Args {
}
pub struct Faucetd {
synced: Mutex<bool>, // AtomicBool is weird in Arc
sync_p2p: P2pPtr,
client: Arc<Client>,
validator_state: ValidatorStatePtr,
airdrop_timeout: i64,
airdrop_limit: BigUint,
airdrop_map: Arc<Mutex<HashMap<String, i64>>>,
synced: Mutex<bool>, // AtomicBool is weird in Arc
client: Client,
p2p: P2pPtr,
state: Arc<Mutex<State>>,
airdrop_map: Arc<Mutex<HashMap<Address, i64>>>,
}
#[async_trait]
@@ -147,39 +153,21 @@ impl RequestHandler for Faucetd {
impl Faucetd {
pub async fn new(
db: &sled::Db,
wallet: WalletPtr,
p2p: P2pPtr,
validator_state: ValidatorStatePtr,
sync_p2p: P2pPtr,
timeout: i64,
limit: BigUint,
) -> Result<Self> {
// Initialize client
let client = Client::new(wallet).await?;
let tree = client.get_tree().await?;
let merkle_roots = RootStore::new(db)?;
let nullifiers = NullifierStore::new(db)?;
let kp = client.main_keypair.lock().await.public;
// Initialize state
let state = Arc::new(Mutex::new(State {
tree,
merkle_roots,
nullifiers,
cashier_pubkeys: vec![],
faucet_pubkeys: vec![kp],
mint_vk: Lazy::new(),
burn_vk: Lazy::new(),
}));
let client = validator_state.read().await.client.clone();
Ok(Self {
synced: Mutex::new(false),
sync_p2p,
client,
validator_state,
airdrop_timeout: timeout,
airdrop_limit: limit,
airdrop_map: Arc::new(Mutex::new(HashMap::new())),
synced: Mutex::new(false),
client,
p2p,
state,
})
}
@@ -193,39 +181,69 @@ impl Faucetd {
return jsonrpc::error(InvalidParams, None, id).into()
}
let pubkey = match PublicKey::from_str(params[0].as_str().unwrap()) {
if *self.synced.lock().await == false {
error!("airdrop(): Blockchain is not yet synced");
return jsonrpc::error(InternalError, None, id).into()
}
let address = match Address::from_str(params[0].as_str().unwrap()) {
Ok(v) => v,
Err(_) => return server_error(RpcError::ParseError, id),
Err(_) => {
error!("airdrop(): Failed parsing address from string");
return server_error(RpcError::ParseError, id)
}
};
let amount = match decode_base10(params[1].as_str().unwrap(), 8, true) {
let pubkey = match PublicKey::try_from(address) {
Ok(v) => v,
Err(_) => return server_error(RpcError::ParseError, id),
Err(_) => {
error!("airdrop(): Failed parsing PublicKey from Address");
return server_error(RpcError::ParseError, id)
}
};
let amount = params[1].as_f64().unwrap().to_string();
let amount = match decode_base10(&amount, 8, true) {
Ok(v) => v,
Err(_) => {
error!("airdrop(): Failed parsing amount from string");
return server_error(RpcError::ParseError, id)
}
};
if amount > self.airdrop_limit {
return server_error(RpcError::AmountExceedsLimit, id)
}
// Check if there as a previous airdrop and the timeout has passed.
let now = Utc::now().timestamp();
let map = self.airdrop_map.lock().await;
if let Some(last_airdrop) = map.get(params[1].as_str().unwrap()) {
if let Some(last_airdrop) = map.get(&address) {
if now - last_airdrop <= self.airdrop_timeout {
return server_error(RpcError::TimeLimitReached, id)
}
};
drop(map);
if *self.synced.lock().await == false {
error!("airdrop(): Blockchain is not yet synced");
return jsonrpc::error(InternalError, None, id).into()
}
// TODO: Token ID decision
// TODO: Rename this function to tx build
let token_id = DrkTokenId::from(1);
let amnt: u64 = match amount.try_into() {
Ok(v) => v,
Err(e) => {
error!("airdrop(): Failed converting biguint to u64: {}", e);
return jsonrpc::error(InternalError, None, id).into()
}
};
let tx = match self
.client
.send(pubkey, amount.try_into().unwrap(), DrkTokenId::from(1), true, self.state.clone())
.build_transaction(
pubkey,
amnt,
token_id,
true,
self.validator_state.read().await.state_machine.clone(),
)
.await
{
Ok(v) => v,
@@ -235,10 +253,8 @@ impl Faucetd {
}
};
let tx_hash = blake3::hash(&serialize(&tx)).to_hex().as_str().to_string();
// Broadcast transaction to the network.
match self.p2p.broadcast(Tx(tx)).await {
match self.sync_p2p.broadcast(Tx(tx.clone())).await {
Ok(()) => {}
Err(e) => {
error!("airdrop(): Failed broadcasting transaction: {}", e);
@@ -246,15 +262,17 @@ impl Faucetd {
}
}
// Add/Update this airdrop into the hashmap
let mut map = self.airdrop_map.lock().await;
map.insert(params[1].as_str().unwrap().to_string(), now);
map.insert(address, now);
drop(map);
let tx_hash = blake3::hash(&serialize(&tx)).to_hex().as_str().to_string();
jsonrpc::response(json!(tx_hash), id).into()
}
}
async fn prune_airdrop_map(map: Arc<Mutex<HashMap<String, i64>>>, timeout: i64) {
async fn prune_airdrop_map(map: Arc<Mutex<HashMap<Address, i64>>>, timeout: i64) {
loop {
sleep(timeout as u64).await;
debug!("Pruning airdrop map");
@@ -310,11 +328,35 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
};
// TODO: sqldb init cleanup
Client::new(wallet.clone()).await?;
let address = wallet.get_default_address().await?;
// Initialize client
let client = Arc::new(Client::new(wallet.clone()).await?);
// Parse cashier addresses
let mut cashier_pubkeys = vec![];
for i in args.cashier_pub {
let addr = Address::from_str(&i)?;
let pk = PublicKey::try_from(addr)?;
cashier_pubkeys.push(pk);
}
// Parse faucet addresses
let mut faucet_pubkeys = vec![wallet.get_default_keypair().await?.public];
for i in args.faucet_pub {
let addr = Address::from_str(&i)?;
let pk = PublicKey::try_from(addr)?;
faucet_pubkeys.push(pk);
}
// Initialize validator state
let state = ValidatorState::new(&sled_db, address, genesis_ts, genesis_data)?;
let state = ValidatorState::new(
&sled_db,
genesis_ts,
genesis_data,
client,
cashier_pubkeys,
faucet_pubkeys,
)
.await?;
// P2P network. The faucet doesn't participate in consensus, so we only
// build the sync protocol.
@@ -327,8 +369,8 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
..Default::default()
};
let p2p = net::P2p::new(network_settings).await;
let registry = p2p.protocol_registry();
let sync_p2p = net::P2p::new(network_settings).await;
let registry = sync_p2p.protocol_registry();
info!("Registering block sync P2P protocols...");
let _state = state.clone();
@@ -352,7 +394,7 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
// Initialize program state
let faucetd =
Faucetd::new(&sled_db, wallet, p2p.clone(), airdrop_timeout, airdrop_limit).await?;
Faucetd::new(state.clone(), sync_p2p.clone(), airdrop_timeout, airdrop_limit).await?;
let faucetd = Arc::new(faucetd);
// Task to periodically clean up the hashmap of airdrops.
@@ -363,9 +405,9 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
ex.spawn(listen_and_serve(args.rpc_listen, faucetd.clone())).detach();
info!("Starting sync P2P network");
p2p.clone().start(ex.clone()).await?;
sync_p2p.clone().start(ex.clone()).await?;
let _ex = ex.clone();
let _sync_p2p = p2p.clone();
let _sync_p2p = sync_p2p.clone();
ex.spawn(async move {
if let Err(e) = _sync_p2p.run(_ex).await {
error!("Failed starting sync P2P network: {}", e);
@@ -373,7 +415,7 @@ async fn realmain(args: Args, ex: Arc<Executor<'_>>) -> Result<()> {
})
.detach();
match block_sync_task(p2p.clone(), state.clone()).await {
match block_sync_task(sync_p2p.clone(), state.clone()).await {
Ok(()) => *faucetd.synced.lock().await = true,
Err(e) => error!("Failed syncing blockchain: {}", e),
}

View File

@@ -1,8 +1,8 @@
## JSON-RPC listen URL
#rpc_listen="127.0.0.1:8857"
#rpc_listen="127.0.0.1:11055"
## IRC listen URL
#irc_listen="127.0.0.1:8855"
#irc_listen="127.0.0.1:11066"
## Sets Datastore Path
#datastore="~/.config/ircd"
@@ -13,7 +13,7 @@
#inbound="127.0.0.1:11002"
## Connection slots
#outbound_connections=0
#outbound_connections=5
## P2P external address
#external_addr="127.0.0.1:11002"

View File

@@ -1,5 +1,5 @@
use async_std::net::{TcpListener, TcpStream};
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
use std::{net::SocketAddr, sync::Arc};
use async_channel::Receiver;
use async_executor::Executor;
@@ -16,7 +16,7 @@ use darkfi::{
rpc::rpcserver::{listen_and_serve, RpcServerConfig},
util::{
cli::{log_config, spawn_config},
path::get_config_path,
path::{expand_path, get_config_path},
},
Error, Result,
};
@@ -103,7 +103,7 @@ async fn realmain(settings: Args, executor: Arc<Executor<'_>>) -> Result<()> {
let local_addr = listener.local_addr()?;
info!("Listening on {}", local_addr);
let datastore_path = PathBuf::from(&settings.datastore);
let datastore_path = expand_path(&settings.datastore)?;
let net_settings = settings.net;
//

View File

@@ -18,10 +18,10 @@ pub struct Args {
#[structopt(long)]
pub config: Option<String>,
/// JSON-RPC listen URL
#[structopt(long = "rpc", default_value = "127.0.0.1:8857")]
#[structopt(long = "rpc", default_value = "127.0.0.1:11055")]
pub rpc_listen: SocketAddr,
/// IRC listen URL
#[structopt(long = "irc", default_value = "127.0.0.1:8855")]
#[structopt(long = "irc", default_value = "127.0.0.1:11066")]
pub irc_listen: SocketAddr,
/// Sets Datastore Path
#[structopt(long, default_value = "~/.config/ircd")]

View File

@@ -1,6 +1,6 @@
use clap::{CommandFactory, Parser};
use log::error;
use prettytable::{cell, format, row, table, Cell, Row, Table};
use prettytable::{cell, format, row, table};
use serde_json::{json, Value};
use simplelog::{ColorChoice, TermLogger, TerminalMode};
@@ -18,181 +18,140 @@ mod util;
use crate::{
jsonrpc::{add, get_by_id, get_state, list, set_comment, set_state, update},
util::{
desc_in_editor, due_as_timestamp, get_comments, get_events, get_from_task, set_title,
timestamp_to_date, CliTau, CliTauSubCommands, TaskInfo, TauConfig, CONFIG_FILE_CONTENTS,
desc_in_editor, due_as_timestamp, get_comments, get_events, get_from_task, list_tasks,
set_title, timestamp_to_date, CliTau, CliTauSubCommands, TaskInfo, TauConfig,
CONFIG_FILE_CONTENTS,
},
};
async fn start(options: CliTau, config: TauConfig) -> Result<()> {
let rpc_addr = &format!("tcp://{}", &config.rpc_listen.url.clone());
let rpc_addr = &format!("tcp://{}", &config.rpc_listen.clone());
match options.command {
Some(CliTauSubCommands::Add { title, desc, assign, project, due, rank }) => {
let title = match title {
Some(t) => t,
None => set_title()?,
};
let desc = match desc {
Some(d) => Some(d),
None => desc_in_editor()?,
};
let assign: Vec<String> = match assign {
Some(a) => a.split(',').map(|s| s.into()).collect(),
None => vec![],
};
let project: Vec<String> = match project {
Some(p) => p.split(',').map(|s| s.into()).collect(),
None => vec![],
};
let due = match due {
Some(d) => due_as_timestamp(&d),
None => None,
};
let rank = rank.unwrap_or(0.0);
add(
rpc_addr,
json!([{"title": title, "desc": desc, "assign": assign, "project": project, "due": due, "rank": rank}]),
)
.await?;
}
Some(CliTauSubCommands::Update { id, key, value }) => {
let value = value.as_str().trim();
let updated_value: Value = match key.as_str() {
"due" => {
json!(due_as_timestamp(value))
}
"rank" => {
json!(value.parse::<f32>()?)
}
"project" | "assign" => {
json!(value.split(',').collect::<Vec<&str>>())
}
_ => {
json!(value)
}
};
update(rpc_addr, id, json!({ key: updated_value })).await?;
}
Some(CliTauSubCommands::SetState { id, state }) => {
if state.as_str() == "open" {
set_state(rpc_addr, id, state.trim()).await?;
} else if state.as_str() == "pause" {
set_state(rpc_addr, id, state.trim()).await?;
} else if state.as_str() == "stop" {
set_state(rpc_addr, id, state.trim()).await?;
} else {
error!("Task state could only be one of three states: open, pause or stop");
}
}
Some(CliTauSubCommands::GetState { id }) => {
let state = get_state(rpc_addr, id).await?;
println!("Task with id {} is: {}", id, state);
}
Some(CliTauSubCommands::SetComment { id, author, content }) => {
set_comment(rpc_addr, id, author.trim(), content.trim()).await?;
}
Some(CliTauSubCommands::GetComment { id }) => {
let rep = get_by_id(rpc_addr, id).await?;
let comments = get_comments(rep)?;
println!("Comments on Task with id {}:\n{}", id, comments);
}
Some(CliTauSubCommands::Get { id }) => {
let task = get_by_id(rpc_addr, id).await?;
let taskinfo: TaskInfo = serde_json::from_value(task.clone())?;
let current_state: String = serde_json::from_value(get_state(rpc_addr, id).await?)?;
let mut table = table!([Bd => "ref_id", &taskinfo.ref_id],
["id", &taskinfo.id.to_string()],
[Bd =>"title", &taskinfo.title],
["desc", &taskinfo.desc],
[Bd =>"assign", get_from_task(task.clone(), "assign")?],
["project", get_from_task(task.clone(), "project")?],
[Bd =>"due", timestamp_to_date(task["due"].clone(),"date")],
["rank", &taskinfo.rank.to_string()],
[Bd =>"created_at", timestamp_to_date(task["created_at"].clone(), "datetime")],
["current_state", &current_state],
[Bd => "comments", get_comments(task.clone())?]);
table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.set_titles(row!["Name", "Value"]);
table.printstd();
let mut event_table = table!(["events", get_events(task.clone())?]);
event_table.set_format(*format::consts::FORMAT_NO_COLSEP);
event_table.printstd();
}
Some(CliTauSubCommands::List {}) | None => {
let rep = list(rpc_addr, json!([])).await?;
let mut table = Table::new();
table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.set_titles(row!["ID", "Title", "Project", "Assigned", "Due", "Rank"]);
let mut tasks: Vec<Value> = serde_json::from_value(rep)?;
tasks.sort_by(|a, b| b["rank"].as_f64().partial_cmp(&a["rank"].as_f64()).unwrap());
let (max_rank, min_rank) = if !tasks.is_empty() {
(
serde_json::from_value(tasks[0]["rank"].clone())?,
serde_json::from_value(tasks[tasks.len() - 1]["rank"].clone())?,
)
} else {
(0.0, 0.0)
};
for task in tasks {
let events: Vec<Value> = serde_json::from_value(task["events"].clone())?;
let state = match events.last() {
Some(s) => s["action"].as_str().unwrap(),
None => "open",
if !options.filter.is_empty() {
let rep = list(rpc_addr, json!([])).await?;
list_tasks(rep, options.filter)?;
} else {
match options.command {
Some(CliTauSubCommands::Add { title, desc, assign, project, due, rank }) => {
let title = match title {
Some(t) => t,
None => set_title()?,
};
let rank = task["rank"].as_f64().unwrap_or(0.0) as f32;
let desc = match desc {
Some(d) => Some(d),
None => desc_in_editor()?,
};
let (max_style, min_style, mid_style, gen_style) = if state == "open" {
("bFC", "Fb", "Fc", "")
let assign: Vec<String> = match assign {
Some(a) => a.split(',').map(|s| s.into()).collect(),
None => vec![],
};
let project: Vec<String> = match project {
Some(p) => p.split(',').map(|s| s.into()).collect(),
None => vec![],
};
let due = match due {
Some(d) => due_as_timestamp(&d),
None => None,
};
let rank = rank.unwrap_or(0.0);
add(
rpc_addr,
json!([{"title": title, "desc": desc, "assign": assign, "project": project, "due": due, "rank": rank}]),
)
.await?;
}
Some(CliTauSubCommands::Update { id, key, value }) => {
let value = value.as_str().trim();
let updated_value: Value = match key.as_str() {
"due" => {
json!(due_as_timestamp(value))
}
"rank" => {
json!(value.parse::<f32>()?)
}
"project" | "assign" => {
json!(value.split(',').collect::<Vec<&str>>())
}
_ => {
json!(value)
}
};
update(rpc_addr, id, json!({ key: updated_value })).await?;
}
Some(CliTauSubCommands::SetState { id, state }) => {
if state.as_str() == "open" {
set_state(rpc_addr, id, state.trim()).await?;
} else if state.as_str() == "pause" {
set_state(rpc_addr, id, state.trim()).await?;
} else if state.as_str() == "stop" {
set_state(rpc_addr, id, state.trim()).await?;
} else {
("iFYBd", "iFYBd", "iFYBd", "iFYBd")
};
table.add_row(Row::new(vec![
Cell::new(&task["id"].to_string()).style_spec(gen_style),
Cell::new(task["title"].as_str().unwrap()).style_spec(gen_style),
Cell::new(&get_from_task(task.clone(), "project")?).style_spec(gen_style),
Cell::new(&get_from_task(task.clone(), "assign")?).style_spec(gen_style),
Cell::new(&timestamp_to_date(task["due"].clone(), "date"))
.style_spec(gen_style),
if rank == max_rank {
Cell::new(&rank.to_string()).style_spec(max_style)
} else if rank == min_rank {
Cell::new(&rank.to_string()).style_spec(min_style)
} else {
Cell::new(&rank.to_string()).style_spec(mid_style)
},
]));
error!("Task state could only be one of three states: open, pause or stop");
}
}
Some(CliTauSubCommands::GetState { id }) => {
let state = get_state(rpc_addr, id).await?;
println!("Task with id {} is: {}", id, state);
}
Some(CliTauSubCommands::SetComment { id, author, content }) => {
set_comment(rpc_addr, id, author.trim(), content.trim()).await?;
}
Some(CliTauSubCommands::GetComment { id }) => {
let rep = get_by_id(rpc_addr, id).await?;
let comments = get_comments(rep)?;
println!("Comments on Task with id {}:\n{}", id, comments);
}
Some(CliTauSubCommands::Get { id }) => {
let task = get_by_id(rpc_addr, id).await?;
let taskinfo: TaskInfo = serde_json::from_value(task.clone())?;
let current_state: String = serde_json::from_value(get_state(rpc_addr, id).await?)?;
let mut table = table!([Bd => "ref_id", &taskinfo.ref_id],
["id", &taskinfo.id.to_string()],
[Bd =>"title", &taskinfo.title],
["desc", &taskinfo.desc],
[Bd =>"assign", get_from_task(task.clone(), "assign")?],
["project", get_from_task(task.clone(), "project")?],
[Bd =>"due", timestamp_to_date(task["due"].clone(),"date")],
["rank", &taskinfo.rank.to_string()],
[Bd =>"created_at", timestamp_to_date(task["created_at"].clone(), "datetime")],
["current_state", &current_state],
[Bd => "comments", get_comments(task.clone())?]);
table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.set_titles(row!["Name", "Value"]);
table.printstd();
let mut event_table = table!(["events", get_events(task.clone())?]);
event_table.set_format(*format::consts::FORMAT_NO_COLSEP);
event_table.printstd();
}
Some(CliTauSubCommands::List {}) | None => {
let rep = list(rpc_addr, json!([])).await?;
list_tasks(rep, vec![])?;
}
table.printstd();
}
}
Ok(())
}

View File

@@ -2,6 +2,7 @@ use std::{
env::{temp_dir, var},
fs::{self, File},
io::{self, Read, Write},
net::SocketAddr,
ops::Index,
process::Command,
};
@@ -9,17 +10,18 @@ use std::{
use chrono::{Datelike, Local, NaiveDate, NaiveDateTime};
use clap::{Parser, Subcommand};
use log::error;
use prettytable::{cell, format, row, Cell, Row, Table};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use darkfi::{util::cli::UrlConfig, Error, Result};
use darkfi::{Error, Result};
pub const CONFIG_FILE_CONTENTS: &[u8] = include_bytes!("../../taud_config.toml");
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TauConfig {
/// The address where taud should bind its RPC socket
pub rpc_listen: UrlConfig,
/// JSON-RPC listen URL
pub rpc_listen: SocketAddr,
}
#[derive(Subcommand)]
@@ -117,6 +119,9 @@ pub struct CliTau {
pub config: Option<String>,
#[clap(subcommand)]
pub command: Option<CliTauSubCommands>,
#[clap(multiple_values = true)]
/// Search criteria (zero or more)
pub filter: Vec<String>,
}
pub fn due_as_timestamp(due: &str) -> Option<i64> {
@@ -265,3 +270,162 @@ pub fn get_from_task(task: Value, value: &str) -> Result<String> {
}
Ok(result)
}
fn sort_and_filter(tasks: Vec<Value>, filter: Option<String>) -> Result<Vec<Value>> {
let filter = match filter {
Some(f) => f,
None => "all".to_string(),
};
let mut filtered_tasks: Vec<Value> = match filter.as_str() {
"all" => tasks,
"open" => tasks
.into_iter()
.filter(|task| {
let events = task["events"].as_array().unwrap().to_owned();
let state = match events.last() {
Some(s) => s["action"].as_str().unwrap(),
None => "open",
};
state == "open"
})
.collect(),
"pause" => tasks
.into_iter()
.filter(|task| {
let events = task["events"].as_array().unwrap().to_owned();
let state = match events.last() {
Some(s) => s["action"].as_str().unwrap(),
None => "open",
};
state == "pause"
})
.collect(),
"month" => tasks
.into_iter()
.filter(|task| {
let date = task["created_at"].as_i64().unwrap();
let task_month = NaiveDateTime::from_timestamp(date, 0).month();
let this_month = Local::today().month();
task_month == this_month
})
.collect(),
_ if filter.contains("assign:") | filter.contains("project:") => {
let kv: Vec<&str> = filter.split(':').collect();
let key = kv[0];
let value = kv[1];
tasks
.into_iter()
.filter(|task| {
task[key]
.as_array()
.unwrap()
.iter()
.map(|s| s.as_str().unwrap())
.any(|x| x == value)
})
.collect()
}
_ if filter.contains("rank>") | filter.contains("rank<") => {
let kv: Vec<&str> = if filter.contains('>') {
filter.split('>').collect()
} else {
filter.split('<').collect()
};
let key = kv[0];
let value = kv[1].parse::<f32>()?;
tasks
.into_iter()
.filter(|task| {
let rank = task[key].as_f64().unwrap_or(0.0) as f32;
if filter.contains('>') {
rank > value
} else {
rank < value
}
})
.collect()
}
_ => tasks,
};
filtered_tasks.sort_by(|a, b| b["rank"].as_f64().partial_cmp(&a["rank"].as_f64()).unwrap());
Ok(filtered_tasks)
}
pub fn list_tasks(rep: Value, filter: Vec<String>) -> Result<()> {
let mut table = Table::new();
table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.set_titles(row!["ID", "Title", "Project", "Assigned", "Due", "Rank"]);
let tasks: Vec<Value> = serde_json::from_value(rep)?;
// we match up to 3 filters to keep things simple and avoid using loops
let tasks = match filter.len() {
1 => sort_and_filter(tasks, Some(filter[0].clone()))?,
2 => {
let res = sort_and_filter(tasks, Some(filter[0].clone()))?;
sort_and_filter(res, Some(filter[1].clone()))?
}
3 => {
let res1 = sort_and_filter(tasks, Some(filter[0].clone()))?;
let res2 = sort_and_filter(res1, Some(filter[1].clone()))?;
sort_and_filter(res2, Some(filter[2].clone()))?
}
_ => sort_and_filter(tasks, None)?,
};
let (max_rank, min_rank) = if !tasks.is_empty() {
(
serde_json::from_value(tasks[0]["rank"].clone())?,
serde_json::from_value(tasks[tasks.len() - 1]["rank"].clone())?,
)
} else {
(0.0, 0.0)
};
for task in tasks {
let events: Vec<Value> = serde_json::from_value(task["events"].clone())?;
let state = match events.last() {
Some(s) => s["action"].as_str().unwrap(),
None => "open",
};
let rank = task["rank"].as_f64().unwrap_or(0.0) as f32;
let (max_style, min_style, mid_style, gen_style) = if state == "open" {
("bFC", "Fb", "Fc", "")
} else {
("iFYBd", "iFYBd", "iFYBd", "iFYBd")
};
table.add_row(Row::new(vec![
Cell::new(&task["id"].to_string()).style_spec(gen_style),
Cell::new(task["title"].as_str().unwrap()).style_spec(gen_style),
Cell::new(&get_from_task(task.clone(), "project")?).style_spec(gen_style),
Cell::new(&get_from_task(task.clone(), "assign")?).style_spec(gen_style),
Cell::new(&timestamp_to_date(task["due"].clone(), "date")).style_spec(gen_style),
if rank == max_rank {
Cell::new(&rank.to_string()).style_spec(max_style)
} else if rank == min_rank {
Cell::new(&rank.to_string()).style_spec(min_style)
} else {
Cell::new(&rank.to_string()).style_spec(mid_style)
},
]));
}
table.printstd();
Ok(())
}

View File

@@ -1,4 +1,7 @@
use std::path::{Path, PathBuf};
use std::{
fs, io,
path::{Path, PathBuf},
};
use chrono::{TimeZone, Utc};
use log::debug;
@@ -62,22 +65,61 @@ impl MonthTasks {
.map_err(TaudError::Darkfi)
}
pub fn load_or_create(date: &Timestamp, dataset_path: &Path) -> TaudResult<Self> {
fn get_all(dataset_path: &Path) -> io::Result<Vec<PathBuf>> {
debug!(target: "tau", "MonthTasks::get_all()");
let mut entries = fs::read_dir(dataset_path.join("month"))?
.map(|res| res.map(|e| e.path()))
.collect::<Result<Vec<_>, io::Error>>()?;
entries.sort();
Ok(entries)
}
fn create(date: &Timestamp, dataset_path: &Path) -> TaudResult<Self> {
debug!(target: "tau", "MonthTasks::create()");
let mut mt = Self::new(&[]);
mt.set_date(date);
mt.save(dataset_path)?;
Ok(mt)
}
pub fn load_or_create(date: Option<&Timestamp>, dataset_path: &Path) -> TaudResult<Self> {
debug!(target: "tau", "MonthTasks::load_or_create()");
match load::<Self>(&Self::get_path(date, dataset_path)) {
Ok(mt) => Ok(mt),
Err(_) => {
let mut mt = Self::new(&[]);
mt.set_date(date);
mt.save(dataset_path)?;
Ok(mt)
// if a date is given we load that date's month tasks
// if not, we load tasks from all months
match date {
Some(date) => match load::<Self>(&Self::get_path(date, dataset_path)) {
Ok(mt) => Ok(mt),
Err(_) => Self::create(date, dataset_path),
},
None => {
let path_all = match Self::get_all(dataset_path) {
Ok(t) => t,
Err(_) => vec![],
};
let mut loaded_mt = Self::new(&[]);
for path in path_all {
let mt = load::<Self>(&path)?;
loaded_mt.created_at = mt.created_at;
for tks in mt.task_tks {
if !loaded_mt.task_tks.contains(&tks) {
loaded_mt.task_tks.push(tks)
}
}
}
Ok(loaded_mt)
}
}
}
pub fn load_current_open_tasks(dataset_path: &Path) -> TaudResult<Vec<TaskInfo>> {
debug!(target: "tau", "MonthTasks::load_current_open_tasks()");
let mt = Self::load_or_create(&get_current_time(), dataset_path)?;
let mt = Self::load_or_create(None, dataset_path)?;
Ok(mt.objects(dataset_path)?.into_iter().filter(|t| t.get_state() != "stop").collect())
}
}
@@ -137,7 +179,7 @@ mod tests {
mt.save(&dataset_path)?;
let mt_load = MonthTasks::load_or_create(&get_current_time(), &dataset_path)?;
let mt_load = MonthTasks::load_or_create(Some(&get_current_time()), &dataset_path)?;
assert_eq!(mt, mt_load);
@@ -145,7 +187,7 @@ mod tests {
mt.save(&dataset_path)?;
let mt_load = MonthTasks::load_or_create(&get_current_time(), &dataset_path)?;
let mt_load = MonthTasks::load_or_create(Some(&get_current_time()), &dataset_path)?;
assert_eq!(mt, mt_load);
@@ -156,7 +198,7 @@ mod tests {
task.save(&dataset_path)?;
let mt_load = MonthTasks::load_or_create(&get_current_time(), &dataset_path)?;
let mt_load = MonthTasks::load_or_create(Some(&get_current_time()), &dataset_path)?;
assert!(mt_load.task_tks.contains(&task.ref_id));

View File

@@ -18,7 +18,7 @@ pub struct Args {
#[structopt(long)]
pub config: Option<String>,
/// JSON-RPC listen URL
#[structopt(long = "rpc", default_value = "127.0.0.1:8857")]
#[structopt(long = "rpc", default_value = "127.0.0.1:11055")]
pub rpc_listen: SocketAddr,
/// Sets Datastore Path
#[structopt(long, default_value = "~/.config/tau")]

View File

@@ -123,14 +123,14 @@ impl TaskInfo {
pub fn activate(&self, path: &Path) -> TaudResult<()> {
debug!(target: "tau", "TaskInfo::activate()");
let mut mt = MonthTasks::load_or_create(&self.created_at, path)?;
let mut mt = MonthTasks::load_or_create(Some(&self.created_at), path)?;
mt.add(&self.ref_id);
mt.save(path)
}
pub fn deactivate(&self, path: &Path) -> TaudResult<()> {
debug!(target: "tau", "TaskInfo::deactivate()");
let mut mt = MonthTasks::load_or_create(&self.created_at, path)?;
let mut mt = MonthTasks::load_or_create(Some(&self.created_at), path)?;
mt.remove(&self.ref_id);
mt.save(path)
}

View File

@@ -1,5 +1,5 @@
## JSON-RPC listen URL
#rpc_listen="127.0.0.1:8857"
#rpc_listen="127.0.0.1:11055"
## Sets Datastore Path
#datastore="~/.config/tau"

View File

@@ -4,13 +4,10 @@ DARKFID_JSONRPC = src/clients/darkfid_jsonrpc.md
all:
echo "# darkfid JSON-RPC API" > $(DARKFID_JSONRPC)
./build_jsonrpc.py ../bin/darkfid2/src/main.rs >> $(DARKFID_JSONRPC)
echo "## blockchain methods" >> $(DARKFID_JSONRPC)
./build_jsonrpc.py ../bin/darkfid2/src/rpc_blockchain.rs >> $(DARKFID_JSONRPC)
echo "## wallet methods" >> $(DARKFID_JSONRPC)
./build_jsonrpc.py ../bin/darkfid2/src/rpc_wallet.rs >> $(DARKFID_JSONRPC)
echo "## miscellaneous methods" >> $(DARKFID_JSONRPC)
./build_jsonrpc.py ../bin/darkfid2/src/rpc_misc.rs >> $(DARKFID_JSONRPC)
for i in blockchain tx wallet misc; do \
echo "## $$i methods" >> $(DARKFID_JSONRPC);\
./build_jsonrpc.py ../bin/darkfid2/src/rpc_$$i.rs >> $(DARKFID_JSONRPC);\
done
echo "# cashierd JSON-RPC API" > src/clients/cashierd_jsonrpc.md
./build_jsonrpc.py ../bin/cashierd/src/main.rs \

View File

@@ -8,6 +8,7 @@ use crate::{
const SLED_NULLIFIER_TREE: &[u8] = b"_nullifiers";
#[derive(Clone)]
pub struct NullifierStore(sled::Tree);
impl NullifierStore {

View File

@@ -8,6 +8,7 @@ use crate::{
const SLED_ROOTS_TREE: &[u8] = b"_merkleroots";
#[derive(Clone)]
pub struct RootStore(sled::Tree);
impl RootStore {

View File

@@ -1,14 +1,15 @@
use async_executor::Executor;
use async_std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use log::{debug, warn};
use crate::{
consensus::{BlockProposal, ValidatorStatePtr},
consensus::{BlockProposal, ValidatorState, ValidatorStatePtr},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
node::MemoryState,
Result,
};
@@ -47,6 +48,20 @@ impl ProtocolProposal {
debug!("ProtocolProposal::handle_receive_proposal() recv: {:?}", proposal);
let proposal_copy = (*proposal).clone();
debug!("handle_receive_proposal(): Starting state transition validation");
let canon_state_clone = self.state.read().await.state_machine.lock().await.clone();
let mem_state = MemoryState::new(canon_state_clone);
match ValidatorState::validate_state_transitions(mem_state, &proposal_copy.block.txs) {
Ok(_) => {
debug!("handle_receive_proposal(): State transition valid")
}
Err(e) => {
warn!("handle_receive_proposal(): State transition fail: {}", e);
continue
}
}
let vote = self.state.write().await.receive_proposal(&proposal_copy);
match vote {
Ok(v) => {

View File

@@ -1,17 +1,18 @@
use async_executor::Executor;
use async_std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use log::{debug, error, warn};
use crate::{
consensus::{
block::{BlockInfo, BlockOrder, BlockResponse},
ValidatorStatePtr,
ValidatorState, ValidatorStatePtr,
},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
node::MemoryState,
Result,
};
@@ -85,8 +86,32 @@ impl ProtocolSync {
if !self.consensus_mode {
let info_copy = (*info).clone();
if !self.state.read().await.blockchain.has_block(&info_copy)? {
debug!("handle_receive_block(): Starting state transition validation");
let canon_state_clone =
self.state.read().await.state_machine.lock().await.clone();
let mem_state = MemoryState::new(canon_state_clone);
let state_updates =
match ValidatorState::validate_state_transitions(mem_state, &info.txs) {
Ok(v) => v,
Err(e) => {
warn!("handle_receive_block(): State transition fail: {}", e);
continue
}
};
debug!("ProtocolSync::handle_receive_block(): All state transitions passed");
debug!("ProtocolSync::handle_receive_block(): Updating canon state machine");
match self.state.write().await.update_canon_state(state_updates, None).await {
Ok(()) => {}
Err(e) => {
error!("Failed updating canon state machine: {}", e);
continue
}
}
debug!("ProtocolSync::handle_receive_block(): Appending block to ledger");
self.state.write().await.blockchain.add(&[info_copy.clone()])?;
self.state.write().await.remove_txs(info_copy.txs.clone())?;
self.p2p.broadcast(info_copy).await?;
}

View File

@@ -1,14 +1,15 @@
use async_executor::Executor;
use async_std::sync::Arc;
use async_trait::async_trait;
use log::debug;
use log::{debug, warn};
use crate::{
consensus::{Tx, ValidatorStatePtr},
consensus::{Tx, ValidatorState, ValidatorStatePtr},
net::{
ChannelPtr, MessageSubscription, P2pPtr, ProtocolBase, ProtocolBasePtr,
ProtocolJobsManager, ProtocolJobsManagerPtr,
},
node::MemoryState,
Result,
};
@@ -48,6 +49,17 @@ impl ProtocolTx {
let tx_copy = (*tx).clone();
debug!("ProtocolTx::handle_receive_tx(): Starting state transition validation");
let canon_state_clone = self.state.read().await.state_machine.lock().await.clone();
let mem_state = MemoryState::new(canon_state_clone);
match ValidatorState::validate_state_transitions(mem_state, &[tx_copy.clone()]) {
Ok(_) => debug!("ProtocolTx::handle_receive_tx(): State transition valid"),
Err(e) => {
warn!("ProtocolTx::handle_receive_tx(): State transition fail: {}", e);
continue
}
}
// Nodes use unconfirmed_txs vector as seen_txs pool.
if self.state.write().await.append_tx(tx_copy.clone()) {
self.p2p.broadcast(tx_copy).await?;

View File

@@ -5,8 +5,9 @@ use std::{
time::Duration,
};
use async_std::sync::{Arc, RwLock};
use async_std::sync::{Arc, Mutex, RwLock};
use chrono::{NaiveDateTime, Utc};
use lazy_init::Lazy;
use log::{debug, error, info, warn};
use rand::rngs::OsRng;
@@ -22,6 +23,10 @@ use crate::{
schnorr::{SchnorrPublic, SchnorrSecret},
},
net,
node::{
state::{state_transition, StateUpdate},
Client, MemoryState, State,
},
util::serial::{serialize, Encodable, SerialDecodable, SerialEncodable},
Result,
};
@@ -107,6 +112,10 @@ pub struct ValidatorState {
pub consensus: ConsensusState,
/// Canonical (finalized) blockchain
pub blockchain: Blockchain,
/// Canonical state machine
pub state_machine: Arc<Mutex<State>>,
/// Client providing wallet access
pub client: Arc<Client>,
/// Pending transactions
pub unconfirmed_txs: Vec<Tx>,
/// Participation flag
@@ -115,11 +124,13 @@ pub struct ValidatorState {
impl ValidatorState {
// TODO: Clock sync
pub fn new(
pub async fn new(
db: &sled::Db, // <-- TODO: Avoid this with some wrapping, sled should only be in blockchain
address: Address,
genesis_ts: Timestamp,
genesis_data: blake3::Hash,
client: Arc<Client>,
cashier_pubkeys: Vec<PublicKey>,
faucet_pubkeys: Vec<PublicKey>,
) -> Result<ValidatorStatePtr> {
let secret = SecretKey::random(&mut OsRng);
let public = PublicKey::from_secret(secret);
@@ -128,12 +139,25 @@ impl ValidatorState {
let unconfirmed_txs = vec![];
let participating = false;
let address = client.wallet.get_default_address().await?;
let state_machine = Arc::new(Mutex::new(State {
tree: client.get_tree().await?,
merkle_roots: blockchain.merkle_roots.clone(),
nullifiers: blockchain.nullifiers.clone(),
cashier_pubkeys,
faucet_pubkeys,
mint_vk: Lazy::new(),
burn_vk: Lazy::new(),
}));
let state = Arc::new(RwLock::new(ValidatorState {
address,
secret,
public,
consensus,
blockchain,
state_machine,
client,
unconfirmed_txs,
participating,
}));
@@ -786,4 +810,52 @@ impl ValidatorState {
self.consensus = consensus;
Ok(())
}
// ==========================
// State transition functions
// ==========================
/// Validate state transitions for given transactions and state and
/// return a vector of [`StateUpdate`]
pub fn validate_state_transitions(state: MemoryState, txs: &[Tx]) -> Result<Vec<StateUpdate>> {
let mut ret = vec![];
let mut st = state.clone();
for (i, tx) in txs.iter().enumerate() {
let update = match state_transition(&st, tx.0.clone()) {
Ok(v) => v,
Err(e) => {
warn!("validate_state_transition(): Failed for tx {}: {}", i, e);
return Err(e.into())
}
};
st.apply(update.clone());
ret.push(update);
}
Ok(ret)
}
/// Apply a vector of [`StateUpdate`] to the canonical state.
pub async fn update_canon_state(
&self,
updates: Vec<StateUpdate>,
notify: Option<async_channel::Sender<(PublicKey, u64)>>,
) -> Result<()> {
let secret_keys: Vec<SecretKey> =
self.client.get_keypairs().await?.iter().map(|x| x.secret).collect();
debug!("update_canon_state(): Acquiring state machine lock");
let mut state = self.state_machine.lock().await;
for update in updates {
state
.apply(update, secret_keys.clone(), notify.clone(), self.client.wallet.clone())
.await?;
}
drop(state);
debug!("update_canon_state(): Dropped state machine lock");
debug!("update_canon_state(): Successfully applied state updates");
Ok(())
}
}

View File

@@ -1,11 +1,13 @@
use crate::{
consensus::{
block::{BlockOrder, BlockResponse},
ValidatorStatePtr,
ValidatorState, ValidatorStatePtr,
},
net, Result,
net,
node::MemoryState,
Result,
};
use log::{info, warn};
use log::{debug, info, warn};
/// async task used for block syncing.
pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Result<()> {
@@ -35,9 +37,31 @@ pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Resu
let order = BlockOrder { sl: last.0, block: last.1 };
channel.send(order).await?;
// Node stores response data. Extra validations can be added here.
let response = response_sub.receive().await?;
state.write().await.blockchain.add(&response.blocks)?;
// Node stores response data.
let resp = response_sub.receive().await?;
// Verify state transitions for all blocks and their respective transactions.
debug!("block_sync_task(): Starting state transition validations");
let mut canon_updates = vec![];
let canon_state_clone = state.read().await.state_machine.lock().await.clone();
let mut mem_state = MemoryState::new(canon_state_clone);
for block in &resp.blocks {
let mut state_updates =
ValidatorState::validate_state_transitions(mem_state.clone(), &block.txs)?;
for update in &state_updates {
mem_state.apply(update.clone());
}
canon_updates.append(&mut state_updates);
}
debug!("block_sync_task(): All state transitions passed");
debug!("block_sync_task(): Updating canon state");
state.write().await.update_canon_state(canon_updates, None).await?;
debug!("block_sync_task(): Appending blocks to ledger");
state.write().await.blockchain.add(&resp.blocks)?;
let last_received = state.read().await.blockchain.last()?.unwrap();
info!("Last received block: {:?} - {:?}", last_received.0, last_received.1);

View File

@@ -12,7 +12,7 @@ enum AddressType {
Payment = 0,
}
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd, Hash)]
pub struct Address(pub [u8; 37]);
impl Address {

View File

@@ -15,7 +15,7 @@ use crate::{
Result,
};
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct VerifyingKey {
pub params: Params<vesta::Affine>,
pub vk: plonk::VerifyingKey<vesta::Affine>,
@@ -29,7 +29,7 @@ impl VerifyingKey {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct ProvingKey {
pub params: Params<vesta::Affine>,
pub pk: plonk::ProvingKey<vesta::Affine>,

View File

@@ -41,7 +41,6 @@ impl Default for Settings {
/// Defines the network settings.
#[derive(Clone, Debug, Deserialize, StructOpt, StructOptToml)]
#[serde(default)]
#[structopt()]
pub struct SettingsOpt {
/// P2P accept address
@@ -49,43 +48,45 @@ pub struct SettingsOpt {
pub inbound: Option<SocketAddr>,
/// Connection slots
#[structopt(long = "slots", default_value = "0")]
pub outbound_connections: u32,
#[structopt(long = "slots")]
pub outbound_connections: Option<u32>,
/// P2P external address
#[structopt(long)]
pub external_addr: Option<SocketAddr>,
/// Peer nodes to connect to
#[serde(default)]
#[structopt(long)]
pub peers: Vec<SocketAddr>,
/// Seed nodes to connect to
#[serde(default)]
#[structopt(long)]
pub seeds: Vec<SocketAddr>,
#[structopt(skip = 0 as u32)]
pub manual_attempt_limit: u32,
#[structopt(skip = 8 as u32)]
pub seed_query_timeout_seconds: u32,
#[structopt(skip = 10 as u32)]
pub connect_timeout_seconds: u32,
#[structopt(skip = 4 as u32)]
pub channel_handshake_seconds: u32,
#[structopt(skip = 10 as u32)]
pub channel_heartbeat_seconds: u32,
#[structopt(skip)]
pub manual_attempt_limit: Option<u32>,
#[structopt(skip)]
pub seed_query_timeout_seconds: Option<u32>,
#[structopt(skip)]
pub connect_timeout_seconds: Option<u32>,
#[structopt(skip)]
pub channel_handshake_seconds: Option<u32>,
#[structopt(skip)]
pub channel_heartbeat_seconds: Option<u32>,
}
impl Into<Settings> for SettingsOpt {
fn into(self) -> Settings {
Settings {
inbound: self.inbound,
outbound_connections: self.outbound_connections,
manual_attempt_limit: self.manual_attempt_limit,
seed_query_timeout_seconds: self.seed_query_timeout_seconds,
connect_timeout_seconds: self.connect_timeout_seconds,
channel_handshake_seconds: self.channel_handshake_seconds,
channel_heartbeat_seconds: self.channel_heartbeat_seconds,
outbound_connections: self.outbound_connections.unwrap_or(0),
manual_attempt_limit: self.manual_attempt_limit.unwrap_or(0),
seed_query_timeout_seconds: self.seed_query_timeout_seconds.unwrap_or(8),
connect_timeout_seconds: self.connect_timeout_seconds.unwrap_or(10),
channel_handshake_seconds: self.channel_handshake_seconds.unwrap_or(4),
channel_heartbeat_seconds: self.channel_heartbeat_seconds.unwrap_or(10),
external_addr: self.external_addr,
peers: self.peers,
seeds: self.seeds,

View File

@@ -8,7 +8,7 @@ use crate::{
crypto::{
address::Address,
coin::Coin,
keypair::{Keypair, PublicKey, SecretKey},
keypair::{Keypair, PublicKey},
merkle_node::MerkleNode,
proof::ProvingKey,
types::DrkTokenId,
@@ -31,7 +31,7 @@ use crate::{
/// This includes, receiving, broadcasting, and building.
pub struct Client {
pub main_keypair: Mutex<Keypair>,
wallet: WalletPtr,
pub wallet: WalletPtr,
mint_pk: Lazy<ProvingKey>,
burn_pk: Lazy<ProvingKey>,
}
@@ -149,10 +149,8 @@ impl Client {
Ok((tx, coins))
}
// TODO: This was changed so it does not broadcast transactions anymore.
// Instead, it returns the transaction itself, which is then able to be
// arbitrarily broadcasted. Rename the function from send() to a better name.
pub async fn send(
/// Build a transaction given the required parameters and state machine.
pub async fn build_transaction(
&self,
pubkey: PublicKey,
amount: u64,
@@ -167,6 +165,10 @@ impl Client {
return Err(ClientFailed::InvalidAmount(0))
}
if !self.wallet.token_id_exists(token_id).await? && !clear_input {
return Err(ClientFailed::NotEnoughValue(amount))
}
let (tx, coins) =
self.build_slab_from_tx(pubkey, amount, token_id, clear_input, state).await?;
for coin in coins.iter() {
@@ -179,48 +181,6 @@ impl Client {
Ok(tx)
}
pub async fn transfer(
&self,
token_id: DrkTokenId,
pubkey: PublicKey,
amount: u64,
state: Arc<Mutex<State>>,
) -> ClientResult<()> {
debug!("transfer(): Start transfer {}", amount);
if self.wallet.token_id_exists(token_id).await? {
self.send(pubkey, amount, token_id, false, state).await?;
debug!("transfer(): Finish transfer {}", amount);
return Ok(())
}
Err(ClientFailed::NotEnoughValue(amount))
}
// TODO: Should this function run on finalized blocks and iterate over its transactions?
async fn update_state(
secret_keys: Vec<SecretKey>,
tx: Transaction,
state: Arc<Mutex<State>>,
wallet: WalletPtr,
notify: Option<async_channel::Sender<(PublicKey, u64)>>,
) -> Result<()> {
debug!("update_state(): Begin state update");
debug!("update_state(): Acquiring state lock");
let update;
{
let state = &*state.lock().await;
update = state_transition(state, tx)?;
}
debug!("update_state(): Trying to apply the new state");
let mut state = state.lock().await;
state.apply(update, secret_keys, notify, wallet).await?;
drop(state);
debug!("update_state(): Successfully updated state");
Ok(())
}
pub async fn init_db(&self) -> Result<()> {
self.wallet.init_db().await
}

72
src/node/memorystate.rs Normal file
View File

@@ -0,0 +1,72 @@
use incrementalmerkletree::{bridgetree::BridgeTree, Frontier};
use log::debug;
use super::state::{ProgramState, State, StateUpdate};
use crate::crypto::{
keypair::PublicKey, merkle_node::MerkleNode, nullifier::Nullifier, proof::VerifyingKey,
};
/// In-memory state extension for state transition validations
#[derive(Clone)]
pub struct MemoryState {
/// Canonical state
pub canon: State,
/// The entire Merkle tree state (copied from `canon`)
pub tree: BridgeTree<MerkleNode, 32>,
/// List of all previous and the current merkle roots.
pub merkle_roots: Vec<MerkleNode>,
/// Nullifiers prevent double-spending
pub nullifiers: Vec<Nullifier>,
}
impl ProgramState for MemoryState {
fn is_valid_cashier_public_key(&self, public: &PublicKey) -> bool {
self.canon.is_valid_cashier_public_key(public)
}
fn is_valid_faucet_public_key(&self, public: &PublicKey) -> bool {
self.canon.is_valid_faucet_public_key(public)
}
fn is_valid_merkle(&self, merkle_root: &MerkleNode) -> bool {
self.canon.is_valid_merkle(merkle_root) || self.merkle_roots.contains(merkle_root)
}
fn nullifier_exists(&self, nullifier: &Nullifier) -> bool {
self.canon.nullifier_exists(nullifier) || self.nullifiers.contains(nullifier)
}
fn mint_vk(&self) -> &VerifyingKey {
self.canon.mint_vk()
}
fn burn_vk(&self) -> &VerifyingKey {
self.canon.burn_vk()
}
}
impl MemoryState {
pub fn new(canon_state: State) -> Self {
Self {
canon: canon_state.clone(),
tree: canon_state.tree,
merkle_roots: vec![],
nullifiers: vec![],
}
}
pub fn apply(&mut self, update: StateUpdate) {
debug!(target: "state_apply", "(in-memory) Extend nullifier set");
let mut nfs = update.nullifiers.clone();
self.nullifiers.append(&mut nfs);
debug!(target: "state_apply", "(in-memory) Update Merkle tree and witnesses");
for coin in update.coins {
let node = MerkleNode(coin.0);
self.tree.append(&node);
self.merkle_roots.push(self.tree.root());
}
debug!(target: "state_apply", "(in-memory) Finished apply() successfully.");
}
}

View File

@@ -3,3 +3,6 @@ pub use client::Client;
pub mod state;
pub use state::State;
pub mod memorystate;
pub use memorystate::MemoryState;

View File

@@ -37,6 +37,7 @@ pub trait ProgramState {
/// A struct representing a state update.
/// This gets applied on top of an existing state.
#[derive(Clone)]
pub struct StateUpdate {
/// All nullifiers in a transaction
pub nullifiers: Vec<Nullifier>,
@@ -109,6 +110,7 @@ pub fn state_transition<S: ProgramState>(state: &S, tx: Transaction) -> VerifyRe
}
/// Struct holding the state which we can apply a [`StateUpdate`] onto.
#[derive(Clone)]
pub struct State {
/// The entire Merkle tree state
pub tree: BridgeTree<MerkleNode, 32>,

View File

@@ -146,7 +146,7 @@ impl WalletDb {
Ok(keypair)
}
async fn get_default_keypair(&self) -> Result<Keypair> {
pub async fn get_default_keypair(&self) -> Result<Keypair> {
debug!("Returning default keypair");
let mut conn = self.conn.acquire().await?;