diff --git a/Cargo.lock b/Cargo.lock index 4b5406361..e14cfa115 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3128,6 +3128,7 @@ name = "explorerd" version = "0.4.1" dependencies = [ "async-trait", + "blake3 1.8.2", "darkfi", "darkfi-sdk", "darkfi-serial", diff --git a/bin/darkfid/genesis_block_localnet b/bin/darkfid/genesis_block_localnet index 45ad1e249..a953cfc5f 100644 --- a/bin/darkfid/genesis_block_localnet +++ b/bin/darkfid/genesis_block_localnet @@ -1 +1 @@ -AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAKPqE2YAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAH0JGmgAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCMPjhlzo1AEcT03pj4ZfNZRxh9AwSl4slZFTeM+H5fyGAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= diff --git a/bin/darkfid/genesis_block_mainnet b/bin/darkfid/genesis_block_mainnet index 45ad1e249..5a73429dc 100644 --- a/bin/darkfid/genesis_block_mainnet +++ b/bin/darkfid/genesis_block_mainnet @@ -1 +1 @@ -AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAKPqE2YAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAK0JGmgAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCMPjhlzo1AEcT03pj4ZfNZRxh9AwSl4slZFTeM+H5fyGAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= diff --git a/bin/darkfid/genesis_block_testnet b/bin/darkfid/genesis_block_testnet index 45ad1e249..f219f1da4 100644 --- a/bin/darkfid/genesis_block_testnet +++ b/bin/darkfid/genesis_block_testnet @@ -1 +1 @@ -AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAKPqE2YAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AYa7rEMKSzoYLxJbN6SG6cSGu/o02E70pmtKI+XwxiWxAAAAAKEJGmgAAAAAAAAAAAAAAAA7PNtvmfHqHEnjNnAS1ULkbCEM4uIYpCgNuv5kw2ETCMPjhlzo1AEcT03pj4ZfNZRxh9AwSl4slZFTeM+H5fyGAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= diff --git a/bin/darkfid/src/proto/protocol_sync.rs b/bin/darkfid/src/proto/protocol_sync.rs index 7b4220305..d2fca2412 100644 --- a/bin/darkfid/src/proto/protocol_sync.rs +++ b/bin/darkfid/src/proto/protocol_sync.rs @@ -107,7 +107,7 @@ pub struct HeaderSyncResponse { impl_p2p_message!( HeaderSyncResponse, "headersyncresponse", - 1701, + 2341, 1, PROTOCOL_SYNC_METERING_CONFIGURATION ); @@ -221,7 +221,7 @@ pub struct ForkHeadersResponse { impl_p2p_message!( ForkHeadersResponse, "forkheadersresponse", - 1701, + 2341, 1, PROTOCOL_SYNC_METERING_CONFIGURATION ); diff --git a/bin/darkfid/src/task/consensus.rs b/bin/darkfid/src/task/consensus.rs index 3cdebe652..c3b257b3d 100644 --- a/bin/darkfid/src/task/consensus.rs +++ b/bin/darkfid/src/task/consensus.rs @@ -56,6 +56,11 @@ pub async fn consensus_init_task( config: ConsensusInitTaskConfig, ex: ExecutorPtr, ) -> Result<()> { + // Check current canonical blockchain for curruption + // TODO: create a restore method reverting each block backwards + // until its healthy again + node.validator.consensus.healthcheck().await?; + // Check if network is configured to start in the future. // NOTE: Always configure the network to start in the future when bootstrapping // or restarting it. diff --git a/bin/darkfid/src/task/miner.rs b/bin/darkfid/src/task/miner.rs index 4ecbd268a..b5fb555f7 100644 --- a/bin/darkfid/src/task/miner.rs +++ b/bin/darkfid/src/task/miner.rs @@ -17,7 +17,7 @@ */ use darkfi::{ - blockchain::{BlockInfo, Header}, + blockchain::{BlockInfo, Header, HeaderHash}, rpc::{jsonrpc::JsonNotification, util::JsonValue}, system::{ExecutorPtr, StoppableTask, Subscription}, tx::{ContractCallLeaf, Transaction, TransactionBuilder}, @@ -25,6 +25,7 @@ use darkfi::{ validator::{ consensus::{Fork, Proposal}, utils::best_fork_index, + verification::apply_producer_transaction, }, zk::{empty_witnesses, ProvingKey, ZkCircuit}, zkas::ZkBinary, @@ -34,7 +35,7 @@ use darkfi_money_contract::{ client::pow_reward_v1::PoWRewardCallBuilder, MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1, }; use darkfi_sdk::{ - crypto::{poseidon_hash, FuncId, PublicKey, SecretKey, MONEY_CONTRACT_ID}, + crypto::{poseidon_hash, FuncId, MerkleTree, PublicKey, SecretKey, MONEY_CONTRACT_ID}, pasta::pallas, ContractCall, }; @@ -160,12 +161,15 @@ pub async fn miner_task( }; drop(forks); + // Grab extended fork last proposal hash + let last_proposal_hash = extended_fork.last_proposal()?.hash; + // Start listenning for network proposals and mining next block for best fork. match smol::future::or( - listen_to_network(node, &extended_fork, &subscription, &sender), + listen_to_network(node, last_proposal_hash, &subscription, &sender), mine( node, - &extended_fork, + extended_fork, &mut secret, recipient_config, &zkbin, @@ -234,12 +238,10 @@ pub async fn miner_task( /// Async task to listen for incoming proposals and check if the best fork has changed. async fn listen_to_network( node: &DarkfiNodePtr, - extended_fork: &Fork, + last_proposal_hash: HeaderHash, subscription: &Subscription, sender: &Sender<()>, ) -> Result<()> { - // Grab extended fork last proposal hash - let last_proposal_hash = extended_fork.last_proposal()?.hash; loop { // Wait until a new proposal has been received subscription.receive().await; @@ -273,7 +275,7 @@ async fn listen_to_network( #[allow(clippy::too_many_arguments)] async fn mine( node: &DarkfiNodePtr, - extended_fork: &Fork, + extended_fork: Fork, secret: &mut SecretKey, recipient_config: &MinerRewardsRecipientConfig, zkbin: &ZkBinary, @@ -304,7 +306,7 @@ pub async fn wait_stop_signal(stop_signal: &Receiver<()>) -> Result<()> { /// Async task to generate and mine provided fork index next block. async fn mine_next_block( node: &DarkfiNodePtr, - extended_fork: &Fork, + mut extended_fork: Fork, secret: &mut SecretKey, recipient_config: &MinerRewardsRecipientConfig, zkbin: &ZkBinary, @@ -313,7 +315,7 @@ async fn mine_next_block( ) -> Result<()> { // Grab next target and block let (next_target, mut next_block) = generate_next_block( - extended_fork, + &mut extended_fork, secret, recipient_config, zkbin, @@ -354,7 +356,7 @@ async fn mine_next_block( /// Auxiliary function to generate next block in an atomic manner. async fn generate_next_block( - extended_fork: &Fork, + extended_fork: &mut Fork, secret: &mut SecretKey, recipient_config: &MinerRewardsRecipientConfig, zkbin: &ZkBinary, @@ -369,7 +371,7 @@ async fn generate_next_block( let next_block_height = last_proposal.block.header.height + 1; // Grab forks' unproposed transactions - let (mut txs, _, fees) = extended_fork + let (mut txs, _, fees, overlay) = extended_fork .unproposed_txs(&extended_fork.blockchain, next_block_height, block_target, verify_fees) .await?; @@ -382,10 +384,31 @@ async fn generate_next_block( // Generate reward transaction let tx = generate_transaction(next_block_height, fees, secret, recipient_config, zkbin, pk)?; + + // Apply producer transaction in the overlay + let _ = apply_producer_transaction( + &overlay, + next_block_height, + block_target, + &tx, + &mut MerkleTree::new(1), + ) + .await?; txs.push(tx); + // Grab the updated contracts states root + overlay.lock().unwrap().contracts.update_state_monotree(&mut extended_fork.state_monotree)?; + let Some(state_root) = extended_fork.state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + + // Drop new trees opened by the unproposed transactions overlay + overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?; + // Generate the new header - let header = Header::new(last_proposal.hash, next_block_height, Timestamp::current_time(), 0); + let mut header = + Header::new(last_proposal.hash, next_block_height, Timestamp::current_time(), 0); + header.state_root = state_root; // Generate the block let mut next_block = BlockInfo::new_empty(header); diff --git a/bin/darkfid/src/task/unknown_proposal.rs b/bin/darkfid/src/task/unknown_proposal.rs index 2b82cedd5..4b818dc41 100644 --- a/bin/darkfid/src/task/unknown_proposal.rs +++ b/bin/darkfid/src/task/unknown_proposal.rs @@ -374,6 +374,9 @@ async fn handle_reorg( peer_fork.overlay.lock().unwrap().overlay.lock().unwrap().add_diff(inverse_diff)?; } + // Rebuild fork contracts states monotree + peer_fork.compute_monotree()?; + // Retrieve the proposals of the hashes sequence, in batches info!(target: "darkfid::task::handle_reorg", "Peer sequence ranks higher than our current best fork, retrieving {} proposals from peer...", peer_header_hashes.len()); let mut batch = Vec::with_capacity(BATCH); @@ -425,7 +428,7 @@ async fn handle_reorg( // Verify proposal if let Err(e) = - verify_fork_proposal(&peer_fork, peer_proposal, validator.verify_fees).await + verify_fork_proposal(&mut peer_fork, peer_proposal, validator.verify_fees).await { error!(target: "darkfid::task::handle_reorg", "Verify fork proposal failed: {e}"); return Ok(()) @@ -446,7 +449,7 @@ async fn handle_reorg( } // Verify trigger proposal - if let Err(e) = verify_fork_proposal(&peer_fork, &proposal, validator.verify_fees).await { + if let Err(e) = verify_fork_proposal(&mut peer_fork, &proposal, validator.verify_fees).await { error!(target: "darkfid::task::handle_reorg", "Verify proposal failed: {e}"); return Ok(()) } diff --git a/bin/darkfid/src/tests/harness.rs b/bin/darkfid/src/tests/harness.rs index 6ea520700..1338bfff2 100644 --- a/bin/darkfid/src/tests/harness.rs +++ b/bin/darkfid/src/tests/harness.rs @@ -19,12 +19,17 @@ use std::{collections::HashMap, sync::Arc}; use darkfi::{ - blockchain::{BlockInfo, Header, HeaderHash}, + blockchain::{BlockInfo, Blockchain, BlockchainOverlay, Header, HeaderHash}, net::Settings, rpc::jsonrpc::JsonSubscriber, system::sleep, tx::{ContractCallLeaf, TransactionBuilder}, - validator::{consensus::Proposal, Validator, ValidatorConfig}, + validator::{ + consensus::{Fork, Proposal}, + utils::deploy_native_contracts, + verification::{apply_producer_transaction, verify_block}, + Validator, ValidatorConfig, + }, zk::{empty_witnesses, ProvingKey, ZkCircuit}, Result, }; @@ -33,7 +38,7 @@ use darkfi_money_contract::{ client::pow_reward_v1::PoWRewardCallBuilder, MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1, }; use darkfi_sdk::{ - crypto::{Keypair, MONEY_CONTRACT_ID}, + crypto::{Keypair, MerkleTree, MONEY_CONTRACT_ID}, ContractCall, }; use darkfi_serial::Encodable; @@ -78,6 +83,15 @@ impl Harness { // Append it again so its added to the merkle tree genesis_block.append_txs(vec![producer_tx]); + // Compute genesis contracts states monotree root + let (_, vks) = vks::get_cached_pks_and_vks()?; + let sled_db = sled::Config::new().temporary(true).open()?; + vks::inject(&sled_db, &vks)?; + let overlay = BlockchainOverlay::new(&Blockchain::new(&sled_db)?)?; + deploy_native_contracts(&overlay, config.pow_target).await?; + genesis_block.header.state_root = + overlay.lock().unwrap().contracts.get_state_monotree()?.get_headroot()?.unwrap(); + // Generate validators configuration // NOTE: we are not using consensus constants here so we // don't get circular dependencies. @@ -89,8 +103,7 @@ impl Harness { verify_fees, }; - // Generate validators using pregenerated vks - let (_, vks) = vks::get_cached_pks_and_vks()?; + // Generate validators let mut settings = Settings { localnet: true, inbound_connections: 3, ..Default::default() }; @@ -139,11 +152,13 @@ impl Harness { for (index, fork) in alice.iter().enumerate() { assert_eq!(fork.proposals.len(), fork_sizes[index]); assert_eq!(fork.diffs.len(), fork_sizes[index]); + assert!(fork.healthcheck().is_ok()); } for (index, fork) in bob.iter().enumerate() { assert_eq!(fork.proposals.len(), fork_sizes[index]); assert_eq!(fork.diffs.len(), fork_sizes[index]); + assert!(fork.healthcheck().is_ok()); } } @@ -166,18 +181,22 @@ impl Harness { Ok(()) } - pub async fn generate_next_block(&self, previous: &BlockInfo) -> Result { + pub async fn generate_next_block(&self, fork: &mut Fork) -> Result { + // Grab fork last block + let previous = fork.overlay.lock().unwrap().last_block()?; + // Next block info let block_height = previous.header.height + 1; let last_nonce = previous.header.nonce; // Generate a producer transaction let keypair = Keypair::default(); - let (zkbin, _) = self.alice.validator.blockchain.contracts.get_zkas( - &self.alice.validator.blockchain.sled_db, - &MONEY_CONTRACT_ID, - MONEY_CONTRACT_ZKAS_MINT_NS_V1, - )?; + let (zkbin, _) = fork + .overlay + .lock() + .unwrap() + .contracts + .get_zkas(&MONEY_CONTRACT_ID, MONEY_CONTRACT_ZKAS_MINT_NS_V1)?; let circuit = ZkCircuit::new(empty_witnesses(&zkbin)?, &zkbin); let pk = ProvingKey::build(zkbin.k, &circuit); @@ -216,9 +235,34 @@ impl Harness { // Add producer transaction to the block block.append_txs(vec![tx]); + // Compute block contracts states monotree root + let overlay = fork.overlay.lock().unwrap().full_clone()?; + let _ = apply_producer_transaction( + &overlay, + block.header.height, + fork.module.target, + block.txs.last().unwrap(), + &mut MerkleTree::new(1), + ) + .await?; + block.header.state_root = + overlay.lock().unwrap().contracts.get_state_monotree()?.get_headroot()?.unwrap(); + // Attach signature block.sign(&keypair.secret); + // Append new block to fork + verify_block( + &fork.overlay, + &fork.module, + &mut fork.state_monotree, + &block, + &previous, + self.alice.validator.verify_fees, + ) + .await?; + fork.append_proposal(&Proposal::new(block.clone())).await?; + Ok(block) } } diff --git a/bin/darkfid/src/tests/metering.rs b/bin/darkfid/src/tests/metering.rs index 989ce5d81..86789696c 100644 --- a/bin/darkfid/src/tests/metering.rs +++ b/bin/darkfid/src/tests/metering.rs @@ -33,9 +33,9 @@ fn darkfid_protocols_metering() { const U32_LEN: usize = 4; const VARINT_LEN: usize = 1; const HEADER_HASH_LEN: usize = 32; - // Header = U8_LEN + HEADER_HASH_LEN + U32_LEN + U64_LEN + U64_LEN + (U8_LEN * 32) = - // 1 + 32 + 4 + 8 + 8 + (1 * 32) = 53 + 32 = 85 - const HEADER_LEN: usize = 85; + // Header = U8_LEN + HEADER_HASH_LEN + U32_LEN + U64_LEN + U64_LEN + (U8_LEN * 32) + STATE_HASH_LEN = + // 1 + 32 + 4 + 8 + 8 + (1 * 32) = 53 + 32 + 32 = 117 + const HEADER_LEN: usize = 117; // Generate a dummy `Header`. // Its bytes vector length is constant. @@ -103,7 +103,7 @@ fn darkfid_protocols_metering() { // Based on length size/type, this can add from 1(u8) to 8(u64) bytes. // Since `BATCH` is 20, its `VarInt` will be represented as a u8, // adding an extra byte. - // Length = (BATCH * HEADER_LEN) + VARINT_LEN = (20 * 85) + 1 = 1700 + 1 = 1701 + // Length = (BATCH * HEADER_LEN) + VARINT_LEN = (20 * 117) + 1 = 2340 + 1 = 2341 assert_eq!(serialize(&header_sync_response).len(), (BATCH * HEADER_LEN) + VARINT_LEN); // Protocol sync `SyncRequest` is limited by `BATCH` so it has a @@ -158,7 +158,7 @@ fn darkfid_protocols_metering() { // constant max bytes length limit. let fork_headers_response = ForkHeadersResponse { headers: vec![header; BATCH] }; // Don't forget the extra byte from `Vec` length. - // Length = (BATCH * HEADER_LEN) + VARINT_LEN = (20 * 85) + 1 = 1700 + 1 = 1701 + // Length = (BATCH * HEADER_LEN) + VARINT_LEN = (20 * 117) + 1 = 2340 + 1 = 2341 assert_eq!(serialize(&fork_headers_response).len(), (BATCH * HEADER_LEN) + VARINT_LEN); // Protocol sync `ForkProposalsRequest` is limited by `BATCH` so it has a diff --git a/bin/darkfid/src/tests/mod.rs b/bin/darkfid/src/tests/mod.rs index 2f4bb356e..86de5b055 100644 --- a/bin/darkfid/src/tests/mod.rs +++ b/bin/darkfid/src/tests/mod.rs @@ -19,7 +19,10 @@ use std::sync::Arc; use darkfi::{ - net::Settings, rpc::settings::RpcSettings, validator::utils::best_fork_index, Result, + net::Settings, + rpc::settings::RpcSettings, + validator::{consensus::Fork, utils::best_fork_index, verification::verify_block}, + Result, }; use darkfi_contract_test_harness::init_logger; use darkfi_sdk::num_traits::One; @@ -53,27 +56,42 @@ async fn sync_blocks_real(ex: Arc>) -> Result<()> { }; let th = Harness::new(config, true, &ex).await?; - // Retrieve genesis block - let genesis = th.alice.validator.blockchain.last_block()?; + // Generate a fork to create new blocks + let mut fork = th.alice.validator.consensus.forks.read().await[0].full_clone()?; // Generate next blocks - let block1 = th.generate_next_block(&genesis).await?; - let block2 = th.generate_next_block(&block1).await?; - let block3 = th.generate_next_block(&block2).await?; - let block4 = th.generate_next_block(&block3).await?; + let block1 = th.generate_next_block(&mut fork).await?; + let block2 = th.generate_next_block(&mut fork).await?; + let block3 = th.generate_next_block(&mut fork).await?; + let block4 = th.generate_next_block(&mut fork).await?; // Add them to nodes - th.add_blocks(&vec![block1, block2.clone(), block3.clone(), block4.clone()]).await?; + th.add_blocks(&vec![block1, block2.clone(), block3.clone(), block4]).await?; // Nodes must have one fork with 2 blocks th.validate_fork_chains(1, vec![2]).await; // Extend current fork sequence - let block5 = th.generate_next_block(&block4).await?; + let block5 = th.generate_next_block(&mut fork).await?; // Create a new fork extending canonical - let block6 = th.generate_next_block(&block3).await?; + fork = Fork::new( + th.alice.validator.consensus.blockchain.clone(), + th.alice.validator.consensus.module.read().await.clone(), + ) + .await?; + // Append block3 to fork and generate the next one + verify_block( + &fork.overlay, + &fork.module, + &mut fork.state_monotree, + &block3, + &block2, + th.alice.validator.verify_fees, + ) + .await?; + let block6 = th.generate_next_block(&mut fork).await?; // Add them to nodes - th.add_blocks(&vec![block5, block6.clone()]).await?; + th.add_blocks(&vec![block5, block6]).await?; // Grab current best fork index let forks = th.alice.validator.consensus.forks.read().await; @@ -91,7 +109,6 @@ async fn sync_blocks_real(ex: Arc>) -> Result<()> { // We are going to create a third node and try to sync from Bob let mut settings = Settings { localnet: true, inbound_connections: 3, ..Default::default() }; - let charlie_url = Url::parse("tcp+tls://127.0.0.1:18342")?; settings.inbound_addrs = vec![charlie_url]; let bob_url = th.bob.p2p_handler.p2p.settings().read().await.inbound_addrs[0].clone(); @@ -121,8 +138,7 @@ async fn sync_blocks_real(ex: Arc>) -> Result<()> { drop(charlie_forks); // Extend the small fork sequence and add it to nodes - let block7 = th.generate_next_block(&block6).await?; - th.add_blocks(&vec![block7.clone()]).await?; + th.add_blocks(&vec![th.generate_next_block(&mut fork).await?]).await?; // Nodes must have two forks with 2 blocks each th.validate_fork_chains(2, vec![2, 2]).await; @@ -148,11 +164,8 @@ async fn sync_blocks_real(ex: Arc>) -> Result<()> { // Since the don't know if the second fork was the best, // we extend it until it becomes best and a confirmation // occurred. - let mut fork_sequence = vec![block6, block7]; loop { - let proposal = th.generate_next_block(fork_sequence.last().unwrap()).await?; - th.add_blocks(&vec![proposal.clone()]).await?; - fork_sequence.push(proposal); + th.add_blocks(&vec![th.generate_next_block(&mut fork).await?]).await?; // Check if confirmation occured if th.alice.validator.blockchain.len() > 4 { break @@ -160,15 +173,15 @@ async fn sync_blocks_real(ex: Arc>) -> Result<()> { } // Nodes must have executed confirmation, so we validate their chains - th.validate_chains(4 + (fork_sequence.len() - 2)).await?; + th.validate_chains(4 + (fork.proposals.len() - 2)).await?; let bob = &th.bob.validator; let last = alice.blockchain.last()?.1; - assert_eq!(last, fork_sequence[fork_sequence.len() - 3].hash()); + assert_eq!(last, fork.proposals[fork.proposals.len() - 3]); assert_eq!(last, bob.blockchain.last()?.1); // Nodes must have one fork with 2 blocks th.validate_fork_chains(1, vec![2]).await; let last_proposal = alice.consensus.forks.read().await[0].proposals[1]; - assert_eq!(last_proposal, fork_sequence.last().unwrap().hash()); + assert_eq!(last_proposal, *fork.proposals.last().unwrap()); assert_eq!(last_proposal, bob.consensus.forks.read().await[0].proposals[1]); // Same for Charlie @@ -228,34 +241,6 @@ fn darkfid_programmatic_control() -> Result<()> { log::debug!(target: "darkfid_programmatic_control", "Logger initialized"); } - // Daemon configuration - let mut genesis_block = darkfi::blockchain::BlockInfo::default(); - let producer_tx = genesis_block.txs.pop().unwrap(); - genesis_block.append_txs(vec![producer_tx]); - let bootstrap = genesis_block.header.timestamp.inner(); - let config = darkfi::validator::ValidatorConfig { - confirmation_threshold: 1, - pow_target: 20, - pow_fixed_difficulty: Some(BigUint::one()), - genesis_block, - verify_fees: false, - }; - let consensus_config = crate::ConsensusInitTaskConfig { - skip_sync: true, - checkpoint_height: None, - checkpoint: None, - miner: false, - recipient: None, - spend_hook: None, - user_data: None, - bootstrap, - }; - let sled_db = sled_overlay::sled::Config::new().temporary(true).open()?; - let (_, vks) = darkfi_contract_test_harness::vks::get_cached_pks_and_vks()?; - darkfi_contract_test_harness::vks::inject(&sled_db, &vks)?; - let rpc_settings = - RpcSettings { listen: Url::parse("tcp://127.0.0.1:8240")?, ..RpcSettings::default() }; - // Create an executor and communication signals let ex = Arc::new(smol::Executor::new()); let (signal, shutdown) = smol::channel::unbounded::<()>(); @@ -263,6 +248,50 @@ fn darkfid_programmatic_control() -> Result<()> { easy_parallel::Parallel::new().each(0..1, |_| smol::block_on(ex.run(shutdown.recv()))).finish( || { smol::block_on(async { + // Daemon configuration + let mut genesis_block = darkfi::blockchain::BlockInfo::default(); + let producer_tx = genesis_block.txs.pop().unwrap(); + genesis_block.append_txs(vec![producer_tx]); + let sled_db = sled_overlay::sled::Config::new().temporary(true).open().unwrap(); + let (_, vks) = darkfi_contract_test_harness::vks::get_cached_pks_and_vks().unwrap(); + darkfi_contract_test_harness::vks::inject(&sled_db, &vks).unwrap(); + let overlay = darkfi::blockchain::BlockchainOverlay::new( + &darkfi::blockchain::Blockchain::new(&sled_db).unwrap(), + ) + .unwrap(); + darkfi::validator::utils::deploy_native_contracts(&overlay, 20).await.unwrap(); + genesis_block.header.state_root = overlay + .lock() + .unwrap() + .contracts + .get_state_monotree() + .unwrap() + .get_headroot() + .unwrap() + .unwrap(); + let bootstrap = genesis_block.header.timestamp.inner(); + let config = darkfi::validator::ValidatorConfig { + confirmation_threshold: 1, + pow_target: 20, + pow_fixed_difficulty: Some(BigUint::one()), + genesis_block, + verify_fees: false, + }; + let consensus_config = crate::ConsensusInitTaskConfig { + skip_sync: true, + checkpoint_height: None, + checkpoint: None, + miner: false, + recipient: None, + spend_hook: None, + user_data: None, + bootstrap, + }; + let rpc_settings = RpcSettings { + listen: Url::parse("tcp://127.0.0.1:8240").unwrap(), + ..RpcSettings::default() + }; + // Initialize a daemon let daemon = crate::Darkfid::init( &sled_db, diff --git a/bin/darkfid/src/tests/sync_forks.rs b/bin/darkfid/src/tests/sync_forks.rs index b1493779b..384263eab 100644 --- a/bin/darkfid/src/tests/sync_forks.rs +++ b/bin/darkfid/src/tests/sync_forks.rs @@ -42,22 +42,24 @@ async fn sync_forks_real(ex: Arc>) -> Result<()> { }; let th = Harness::new(config, true, &ex).await?; - // Retrieve genesis block and generate 3 forks - let genesis = th.alice.validator.blockchain.last_block()?; + // Generate 3 forks + let mut fork0 = th.alice.validator.consensus.forks.read().await[0].full_clone()?; + let mut fork1 = fork0.full_clone()?; + let mut fork2 = fork1.full_clone()?; - // Generate a fork with 3 blocks - let block1 = th.generate_next_block(&genesis).await?; - let block2 = th.generate_next_block(&block1).await?; - let block3 = th.generate_next_block(&block2).await?; - th.add_blocks(&vec![block1, block2, block3]).await?; + // Extend first fork with 3 blocks + th.add_blocks(&vec![ + th.generate_next_block(&mut fork0).await?, + th.generate_next_block(&mut fork0).await?, + th.generate_next_block(&mut fork0).await?, + ]) + .await?; - // Generate a fork with 1 block - let block4 = th.generate_next_block(&genesis).await?; - th.add_blocks(&vec![block4.clone()]).await?; + // Extend second fork with 1 block + th.add_blocks(&vec![th.generate_next_block(&mut fork1).await?]).await?; - // Generate a fork with 1 block - let block5 = th.generate_next_block(&genesis).await?; - th.add_blocks(&vec![block5.clone()]).await?; + // Extend third fork with 1 block + th.add_blocks(&vec![th.generate_next_block(&mut fork2).await?]).await?; // Check nodes have all the forks th.validate_fork_chains(3, vec![3, 1, 1]).await; @@ -82,11 +84,8 @@ async fn sync_forks_real(ex: Arc>) -> Result<()> { drop(charlie_forks); // Extend the small fork sequences and add it to nodes - let block6 = th.generate_next_block(&block4).await?; - th.add_blocks(&vec![block6]).await?; - - let block7 = th.generate_next_block(&block5).await?; - th.add_blocks(&vec![block7]).await?; + th.add_blocks(&vec![th.generate_next_block(&mut fork1).await?]).await?; + th.add_blocks(&vec![th.generate_next_block(&mut fork2).await?]).await?; // Check charlie has the correct forks let charlie_forks = charlie.validator.consensus.forks.read().await; diff --git a/bin/darkfid/src/tests/unproposed_txs.rs b/bin/darkfid/src/tests/unproposed_txs.rs index 23634ec95..d9d21b609 100644 --- a/bin/darkfid/src/tests/unproposed_txs.rs +++ b/bin/darkfid/src/tests/unproposed_txs.rs @@ -95,7 +95,7 @@ async fn simulate_unproposed_txs( let best_fork = &forks[best_fork_index(&forks)?]; // Retrieve unproposed transactions - let (tx, total_gas_used, _) = best_fork + let (tx, total_gas_used, _, _) = best_fork .unproposed_txs( &best_fork.clone().blockchain, current_block_height, diff --git a/bin/explorer/explorerd/Cargo.toml b/bin/explorer/explorerd/Cargo.toml index 6ff384755..bc6e26c30 100644 --- a/bin/explorer/explorerd/Cargo.toml +++ b/bin/explorer/explorerd/Cargo.toml @@ -34,6 +34,7 @@ structopt-toml = "0.5.1" sled-overlay = "0.1.7" # Misc +blake3 = "1.8.2" log = "0.4.27" lazy_static = "1.5.0" tar = "0.4.44" diff --git a/bin/explorer/explorerd/src/service/blocks.rs b/bin/explorer/explorerd/src/service/blocks.rs index 6d14e32f5..15444bad4 100644 --- a/bin/explorer/explorerd/src/service/blocks.rs +++ b/bin/explorer/explorerd/src/service/blocks.rs @@ -48,7 +48,9 @@ pub struct BlockRecord { /// The block's nonce. This value changes arbitrarily with mining. pub nonce: u64, /// Merkle tree root of the transactions hashes contained in this block - pub root: String, + pub transactions_root: String, + /// Contracts states Monotree(SMT) root this block commits to + pub state_root: String, /// Block producer signature pub signature: Signature, } @@ -63,7 +65,8 @@ impl BlockRecord { JsonValue::Number(self.height as f64), JsonValue::String(self.timestamp.to_string()), JsonValue::Number(self.nonce as f64), - JsonValue::String(self.root.clone()), + JsonValue::String(self.transactions_root.clone()), + JsonValue::String(self.state_root.clone()), JsonValue::String(format!("{:?}", self.signature)), ]) } @@ -78,7 +81,8 @@ impl From<&BlockInfo> for BlockRecord { height: block.header.height, timestamp: block.header.timestamp, nonce: block.header.nonce, - root: block.header.root.to_string(), + transactions_root: block.header.transactions_root.to_string(), + state_root: blake3::hash(&block.header.state_root).to_string(), signature: block.signature, } } diff --git a/bin/explorer/site/templates/block.html b/bin/explorer/site/templates/block.html index a53832b0b..299f25bec 100644 --- a/bin/explorer/site/templates/block.html +++ b/bin/explorer/site/templates/block.html @@ -37,8 +37,9 @@
  • Height: {{ block[3] }}
  • Timestamp: {{ block[4] }}
  • Nonce: {{ block[5] }}
  • -
  • Root: {{ block[6] }}
  • -
  • Signature: {{ block[7] }}
  • +
  • Transactions Root: {{ block[6] }}
  • +
  • State Root: {{ block[7] }}
  • +
  • Signature: {{ block[8] }}
  • diff --git a/doc/src/arch/consensus.md b/doc/src/arch/consensus.md index 7bd5d250a..291a51d3a 100644 --- a/doc/src/arch/consensus.md +++ b/doc/src/arch/consensus.md @@ -253,14 +253,15 @@ the tx in Bitcoin with the most outputs has 2501. ## Header -| Field | Type | Description | -|-------------|------------|-----------------------------------------------------| -| `version` | `u8` | Block version | -| `previous` | `[u8; 32]` | Previous block hash | -| `height` | `u32` | Block height | -| `timestamp` | `u64` | Block creation timestamp | -| `nonce` | `u64` | The block's nonce value | -| `tree_root` | `[u8; 32]` | Merkle tree root of the block's transactions hashes | +| Field | Type | Description | +|---------------------|------------|----------------------------------------------------------| +| `version` | `u8` | Block version | +| `previous` | `[u8; 32]` | Previous block hash | +| `height` | `u32` | Block height | +| `timestamp` | `u64` | Block creation timestamp | +| `nonce` | `u64` | The block's nonce value | +| `transactions_root` | `[u8; 32]` | Merkle tree root of the block's transactions hashes | +| `state_root` | `[u8; 32]` | Contracts states Monotree(SMT) root the block commits to | ## Block diff --git a/script/research/gg/src/main.rs b/script/research/gg/src/main.rs index 072108cdc..819f2826a 100644 --- a/script/research/gg/src/main.rs +++ b/script/research/gg/src/main.rs @@ -26,21 +26,24 @@ use std::{ use clap::{Parser, Subcommand}; use darkfi::{ - blockchain::{BlockInfo, Blockchain, BlockchainOverlay}, + blockchain::{block_store::append_tx_to_merkle_tree, BlockInfo, Blockchain, BlockchainOverlay}, cli_desc, - tx::{ContractCallLeaf, Transaction, TransactionBuilder}, + tx::{ContractCallLeaf, TransactionBuilder}, util::{encoding::base64, parse::decode_base10, path::expand_path, time::Timestamp}, - validator::{utils::deploy_native_contracts, verification::verify_genesis_block}, + validator::{ + utils::deploy_native_contracts, + verification::{apply_transaction, verify_genesis_block}, + }, zk::{empty_witnesses, ProvingKey, ZkCircuit}, zkas::ZkBinary, - Result, + Error, Result, }; use darkfi_contract_test_harness::vks; use darkfi_money_contract::{ client::genesis_mint_v1::GenesisMintCallBuilder, MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1, }; use darkfi_sdk::{ - crypto::{contract_id::MONEY_CONTRACT_ID, FuncId, PublicKey, SecretKey}, + crypto::{contract_id::MONEY_CONTRACT_ID, FuncId, MerkleTree, PublicKey, SecretKey}, pasta::{group::ff::PrimeField, pallas}, ContractCall, }; @@ -69,6 +72,10 @@ enum Subcmd { #[arg(short, long)] /// Genesis timestamp to use, instead of current one genesis_timestamp: Option, + + #[arg(short, long, default_value = "90")] + /// Configured PoW target + pow_target: u32, }, /// Read a Darkfi genesis block from stdin and verify it @@ -119,33 +126,52 @@ fn main() -> Result<()> { println!("{genesis_block:?}"); } - Subcmd::Generate { txs_folder, genesis_timestamp } => { + Subcmd::Generate { txs_folder, genesis_timestamp, pow_target } => { + // Generate the genesis block + let mut genesis_block = BlockInfo::default(); + + // Retrieve genesis producer transaction + let producer_tx = genesis_block.txs.pop().unwrap(); + + // Initialize a temporary sled database + let sled_db = sled::Config::new().temporary(true).open()?; + let (_, vks) = vks::get_cached_pks_and_vks()?; + vks::inject(&sled_db, &vks)?; + + // Create an overlay over whole blockchain + let blockchain = Blockchain::new(&sled_db)?; + let overlay = BlockchainOverlay::new(&blockchain)?; + deploy_native_contracts(&overlay, 0).await?; + // Grab genesis transactions from folder let txs_folder = expand_path(&txs_folder).unwrap(); - let mut genesis_txs: Vec = vec![]; + let mut tree = MerkleTree::new(1); for file in read_dir(txs_folder)? { let file = file?; let bytes = base64::decode(read_to_string(file.path())?.trim()).unwrap(); let tx = deserialize_async(&bytes).await?; - genesis_txs.push(tx); + apply_transaction(&overlay, 0, pow_target, &tx, &mut tree).await?; + genesis_block.txs.push(tx); } - // Generate the genesis block - let mut genesis_block = BlockInfo::default(); - // Update timestamp if one was provided if let Some(timestamp) = genesis_timestamp { genesis_block.header.timestamp = Timestamp::from_u64(timestamp); } - // Retrieve genesis producer transaction - let producer_tx = genesis_block.txs.pop().unwrap(); + // Append producer tx + append_tx_to_merkle_tree(&mut tree, &producer_tx); + genesis_block.txs.push(producer_tx); - // Append genesis transactions - if !genesis_txs.is_empty() { - genesis_block.append_txs(genesis_txs); - } - genesis_block.append_txs(vec![producer_tx]); + // Update the transactions root + genesis_block.header.transactions_root = tree.root(0).unwrap(); + + // Grab the updated contracts states root + let state_monotree = overlay.lock().unwrap().get_state_monotree()?; + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + genesis_block.header.state_root = state_root; // Write generated genesis block to stdin let encoded = base64::encode(&serialize_async(&genesis_block).await); diff --git a/src/blockchain/block_store.rs b/src/blockchain/block_store.rs index 3e1698213..3cae1cca1 100644 --- a/src/blockchain/block_store.rs +++ b/src/blockchain/block_store.rs @@ -128,7 +128,7 @@ impl BlockInfo { append_tx_to_merkle_tree(&mut tree, &tx); self.txs.push(tx); // Grab the tree root and store it in the header - self.header.root = tree.root(0).unwrap(); + self.header.transactions_root = tree.root(0).unwrap(); } /// Append a vector of transactions to the block. Also adds them to the @@ -147,7 +147,7 @@ impl BlockInfo { self.txs.push(tx); } // Grab the tree root and store it in the header - self.header.root = tree.root(0).unwrap(); + self.header.transactions_root = tree.root(0).unwrap(); } /// Sign block header using provided secret key diff --git a/src/blockchain/contract_store.rs b/src/blockchain/contract_store.rs index af685eaca..c1c44e8bd 100644 --- a/src/blockchain/contract_store.rs +++ b/src/blockchain/contract_store.rs @@ -18,7 +18,7 @@ r* This program is distributed in the hope that it will be useful, use std::{collections::BTreeMap, io::Cursor}; -use darkfi_sdk::crypto::ContractId; +use darkfi_sdk::{crypto::ContractId, monotree::Monotree}; use darkfi_serial::{deserialize, serialize}; use log::{debug, error}; use sled_overlay::{serial::parse_record, sled}; @@ -253,6 +253,38 @@ impl ContractStore { Ok(ret) } + + /// Generate a Monotree(SMT) containing all contracts states checksums. + pub fn get_state_monotree(&self, db: &sled::Db) -> Result { + // Initialize the monotree + let mut root = None; + let mut tree = Monotree::new(); + + // Iterate over current contracts states records + // TODO: parallelize this with a threadpool + for state_record in self.state.iter().values() { + // Iterate over contract states pointers + let state_pointers: Vec<[u8; 32]> = deserialize(&state_record?)?; + for state_ptr in state_pointers { + // Grab the state tree + let state_tree = db.open_tree(state_ptr)?; + + // Compute its checksum + let mut hasher = blake3::Hasher::new(); + for record in state_tree.iter() { + let (key, value) = record?; + hasher.update(&key); + hasher.update(&value); + } + + // Insert record to monotree + root = tree.insert(root.as_ref(), &state_ptr, hasher.finalize().as_bytes())?; + tree.set_headroot(root.as_ref()); + } + } + + Ok(tree) + } } /// Overlay structure over a [`ContractStore`] instance. @@ -389,4 +421,76 @@ impl ContractStoreOverlay { Ok((zkbin, vk)) } + + /// Generate a Monotree(SMT) containing all contracts states checksums. + /// Be carefull as this will open all states trees in the overlay. + pub fn get_state_monotree(&self) -> Result { + let mut lock = self.0.lock().unwrap(); + + // Grab all states pointers + let mut states_pointers = vec![]; + for state_record in lock.iter(SLED_CONTRACTS_TREE)? { + let state_pointers: Vec<[u8; 32]> = deserialize(&state_record?.1)?; + for state_ptr in state_pointers { + states_pointers.push(state_ptr); + } + } + + // Initialize the monotree + let mut root = None; + let mut tree = Monotree::new(); + + // Iterate over contract states pointers + // TODO: parallelize this with a threadpool + for state_ptr in states_pointers { + // Open the state tree in the overlay + lock.open_tree(&state_ptr, false)?; + + // Compute its checksum + let mut hasher = blake3::Hasher::new(); + for record in lock.iter(&state_ptr)? { + let (key, value) = record?; + hasher.update(&key); + hasher.update(&value); + } + + // Insert record to monotree + root = tree.insert(root.as_ref(), &state_ptr, hasher.finalize().as_bytes())?; + tree.set_headroot(root.as_ref()); + } + + Ok(tree) + } + + /// Compute all updated contracts states checksums and update their records + /// in the provided Monotree(SMT). + pub fn update_state_monotree(&self, tree: &mut Monotree) -> Result<()> { + let lock = self.0.lock().unwrap(); + + // Iterate over overlay's caches + // TODO: parallelize this with a threadpool + let mut root = tree.get_headroot()?; + for state_key in lock.state.caches.keys() { + // Check if that cache is a contract state one. + // Overlay protected trees are all the native/non-contract ones. + if lock.state.protected_tree_names.contains(state_key) { + continue + } + + // Iterate over state tree to compute its checksum + let mut hasher = blake3::Hasher::new(); + for record in lock.iter(state_key)? { + let (key, value) = record?; + hasher.update(&key); + hasher.update(&value); + } + + // Insert record to monotree + root = + tree.insert(root.as_ref(), &deserialize(state_key)?, hasher.finalize().as_bytes())?; + tree.set_headroot(root.as_ref()); + } + + Ok(()) + } } diff --git a/src/blockchain/header_store.rs b/src/blockchain/header_store.rs index 34ad0e9c4..799facf0b 100644 --- a/src/blockchain/header_store.rs +++ b/src/blockchain/header_store.rs @@ -22,6 +22,7 @@ use darkfi_sdk::{ blockchain::block_version, crypto::{MerkleNode, MerkleTree}, hex::decode_hex_arr, + monotree::{Hash as StateHash, EMPTY_HASH}, AsHex, }; #[cfg(feature = "async-serial")] @@ -83,14 +84,17 @@ pub struct Header { /// The block's nonce. This value changes arbitrarily with mining. pub nonce: u64, /// Merkle tree root of the transactions hashes contained in this block - pub root: MerkleNode, + pub transactions_root: MerkleNode, + /// Contracts states Monotree(SMT) root this block commits to + pub state_root: StateHash, } impl Header { pub fn new(previous: HeaderHash, height: u32, timestamp: Timestamp, nonce: u64) -> Self { let version = block_version(height); - let root = MerkleTree::new(1).root(0).unwrap(); - Self { version, previous, height, timestamp, nonce, root } + let transactions_root = MerkleTree::new(1).root(0).unwrap(); + let state_root = *EMPTY_HASH; + Self { version, previous, height, timestamp, nonce, transactions_root, state_root } } /// Compute the header's hash @@ -121,7 +125,7 @@ impl Default for Header { impl fmt::Display for Header { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = format!( - "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n}}", + "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n}}", "Header", "Hash", self.hash(), @@ -135,8 +139,10 @@ impl fmt::Display for Header { self.timestamp, "Nonce", self.nonce, - "Root", - self.root, + "Transactions Root", + self.transactions_root, + "State Root", + blake3::hash(&self.state_root), ); write!(f, "{}", s) diff --git a/src/blockchain/mod.rs b/src/blockchain/mod.rs index 57c03a8ff..75ea3467c 100644 --- a/src/blockchain/mod.rs +++ b/src/blockchain/mod.rs @@ -18,7 +18,7 @@ use std::sync::{Arc, Mutex}; -use darkfi_sdk::tx::TransactionHash; +use darkfi_sdk::{monotree::Monotree, tx::TransactionHash}; use log::debug; use sled_overlay::{sled, sled::Transactional}; @@ -218,6 +218,12 @@ impl Blockchain { self.blocks.get_last() } + /// Retrieve the last block header. + pub fn last_header(&self) -> Result
    { + let (_, hash) = self.last()?; + Ok(self.headers.get(&[hash], true)?[0].clone().unwrap()) + } + /// Retrieve the last block info. pub fn last_block(&self) -> Result { let (_, hash) = self.last()?; @@ -389,6 +395,11 @@ impl Blockchain { Ok(()) } + + /// Generate a Monotree(SMT) containing all contracts states checksums. + pub fn get_state_monotree(&self) -> Result { + self.contracts.get_state_monotree(&self.sled_db) + } } /// Atomic pointer to sled db overlay. @@ -584,4 +595,11 @@ impl BlockchainOverlay { Ok(Arc::new(Mutex::new(Self { overlay, headers, blocks, transactions, contracts }))) } + + /// Generate a Monotree(SMT) containing all contracts states checksums. + /// A clone is used so we are not affected by the opened trees during + /// checksum computing. + pub fn get_state_monotree(&self) -> Result { + self.full_clone()?.lock().unwrap().contracts.get_state_monotree() + } } diff --git a/src/contract/test-harness/src/lib.rs b/src/contract/test-harness/src/lib.rs index 6c04d7ed2..8f9b54822 100644 --- a/src/contract/test-harness/src/lib.rs +++ b/src/contract/test-harness/src/lib.rs @@ -22,11 +22,11 @@ use std::{ }; use darkfi::{ - blockchain::{BlockInfo, BlockchainOverlay}, + blockchain::{BlockInfo, Blockchain, BlockchainOverlay}, runtime::vm_runtime::Runtime, tx::Transaction, util::{pcg::Pcg32, time::Timestamp}, - validator::{Validator, ValidatorConfig, ValidatorPtr}, + validator::{utils::deploy_native_contracts, Validator, ValidatorConfig, ValidatorPtr}, zk::{empty_witnesses, halo2::Field, ProvingKey, ZkCircuit}, zkas::ZkBinary, Result, @@ -268,6 +268,14 @@ impl TestHarness { proving_keys.insert(namespace, (proving_key, zkbin)); } + // Compute genesis contracts states monotree root + let sled_db = sled::Config::new().temporary(true).open()?; + vks::inject(&sled_db, &vks)?; + let overlay = BlockchainOverlay::new(&Blockchain::new(&sled_db)?)?; + deploy_native_contracts(&overlay, 90).await?; + genesis_block.header.state_root = + overlay.lock().unwrap().contracts.get_state_monotree()?.get_headroot()?.unwrap(); + // Create `Wallet` instances let mut holders_map = HashMap::new(); for holder in holders { diff --git a/src/contract/test-harness/src/money_pow_reward.rs b/src/contract/test-harness/src/money_pow_reward.rs index 8fe7d161f..7dc0599d1 100644 --- a/src/contract/test-harness/src/money_pow_reward.rs +++ b/src/contract/test-harness/src/money_pow_reward.rs @@ -17,8 +17,9 @@ */ use darkfi::{ - blockchain::{BlockInfo, Header}, + blockchain::{BlockInfo, BlockchainOverlay, Header}, tx::{ContractCallLeaf, Transaction, TransactionBuilder}, + validator::verification::apply_producer_transaction, Result, }; use darkfi_money_contract::{ @@ -27,7 +28,7 @@ use darkfi_money_contract::{ MoneyFunction, MONEY_CONTRACT_ZKAS_MINT_NS_V1, }; use darkfi_sdk::{ - crypto::{contract_id::MONEY_CONTRACT_ID, MerkleNode}, + crypto::{contract_id::MONEY_CONTRACT_ID, MerkleNode, MerkleTree}, ContractCall, }; use darkfi_serial::AsyncEncodable; @@ -128,6 +129,19 @@ impl TestHarness { // Add producer transaction to the block block.append_txs(vec![tx]); + // Compute block contracts states monotree root + let overlay = BlockchainOverlay::new(&wallet.validator.blockchain)?; + let _ = apply_producer_transaction( + &overlay, + block.header.height, + wallet.validator.consensus.module.read().await.target, + block.txs.last().unwrap(), + &mut MerkleTree::new(1), + ) + .await?; + block.header.state_root = + overlay.lock().unwrap().contracts.get_state_monotree()?.get_headroot()?.unwrap(); + // Attach signature block.sign(&wallet.keypair.secret); diff --git a/src/error.rs b/src/error.rs index f6eab41d0..e78992b95 100644 --- a/src/error.rs +++ b/src/error.rs @@ -330,6 +330,12 @@ pub enum Error { #[error("Provided output hash is greater than current target")] PoWInvalidOutHash, + #[error("Contracts states monotree root missing")] + ContractsStatesRootNotFoundError, + + #[error("Contracts states monotree root missmatch: {0} - {1}")] + ContractsStatesRootError(String, String), + // =============== // Database errors // =============== diff --git a/src/validator/consensus.rs b/src/validator/consensus.rs index 523ecabd5..b05190e33 100644 --- a/src/validator/consensus.rs +++ b/src/validator/consensus.rs @@ -18,7 +18,7 @@ use std::collections::{HashMap, HashSet}; -use darkfi_sdk::{crypto::MerkleTree, tx::TransactionHash}; +use darkfi_sdk::{crypto::MerkleTree, monotree::Monotree, tx::TransactionHash}; use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable}; use log::{debug, info, warn}; use num_bigint::BigUint; @@ -230,6 +230,9 @@ impl Consensus { fork.hashes_rank += hash_distance_sq; } + // Rebuild fork contracts states monotree + fork.compute_monotree()?; + // Drop forks lock drop(forks); @@ -638,6 +641,35 @@ impl Consensus { debug!(target: "validator::consensus::reset_pow_module", "PoW module reset successfully!"); Ok(()) } + + /// Auxiliary function to check current contracts states checksums + /// Monotree(SMT) validity in all active forks and canonical. + pub async fn healthcheck(&self) -> Result<()> { + // Grab a lock over current forks + let lock = self.forks.read().await; + + // Rebuild current canonical contract states checksums monotree + let state_monotree = self.blockchain.get_state_monotree()?; + + // Check that the root matches last block header state root + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + let last_block_state_root = self.blockchain.last_header()?.state_root; + if state_root != last_block_state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&last_block_state_root).to_string(), + )); + } + + // Check each fork health + for fork in lock.iter() { + fork.healthcheck()?; + } + + Ok(()) + } } /// This struct represents a block proposal, used for consensus. @@ -673,8 +705,10 @@ pub struct Fork { pub blockchain: Blockchain, /// Overlay cache over canonical Blockchain pub overlay: BlockchainOverlayPtr, - /// Current PoW module state, + /// Current PoW module state pub module: PoWModule, + /// Current contracts states checksums Monotree(SMT) + pub state_monotree: Monotree, /// Fork proposal hashes sequence pub proposals: Vec, /// Fork proposal overlay diffs sequence @@ -691,6 +725,8 @@ impl Fork { pub async fn new(blockchain: Blockchain, module: PoWModule) -> Result { let mempool = blockchain.get_pending_txs()?.iter().map(|tx| tx.hash()).collect(); let overlay = BlockchainOverlay::new(&blockchain)?; + // Build current contract states checksums monotree + let state_monotree = overlay.lock().unwrap().get_state_monotree()?; // Retrieve last block difficulty to access current ranks let last_difficulty = blockchain.last_block_difficulty()?; let targets_rank = last_difficulty.ranks.targets_rank; @@ -699,6 +735,7 @@ impl Fork { blockchain, overlay, module, + state_monotree, proposals: vec![], diffs: vec![], mempool, @@ -764,17 +801,23 @@ impl Fork { } /// Auxiliary function to retrieve unproposed valid transactions, - /// along with their total gas used and total paid fees. + /// along with their total gas used, total paid fees and the overlay + /// used to verify the transactions for further processing. + /// + /// Note: Always remember to purge new trees from the overlay if not needed. pub async fn unproposed_txs( &self, blockchain: &Blockchain, verifying_block_height: u32, block_target: u32, verify_fees: bool, - ) -> Result<(Vec, u64, u64)> { + ) -> Result<(Vec, u64, u64, BlockchainOverlayPtr)> { + // Clone forks' overlay + let overlay = self.overlay.lock().unwrap().full_clone()?; + // Check if our mempool is not empty if self.mempool.is_empty() { - return Ok((vec![], 0, 0)) + return Ok((vec![], 0, 0, overlay)) } // Transactions Merkle tree @@ -787,9 +830,6 @@ impl Fork { // Map of ZK proof verifying keys for the current transaction batch let mut vks: HashMap<[u8; 32], HashMap> = HashMap::new(); - // Clone forks' overlay - let overlay = self.overlay.lock().unwrap().full_clone()?; - // Grab all current proposals transactions hashes let proposals_txs = overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?; @@ -851,7 +891,7 @@ impl Fork { unproposed_txs.push(unproposed_tx); } - Ok((unproposed_txs, total_gas_used, total_gas_paid)) + Ok((unproposed_txs, total_gas_used, total_gas_paid, overlay)) } /// Auxiliary function to create a full clone using BlockchainOverlay::full_clone. @@ -861,6 +901,7 @@ impl Fork { let blockchain = self.blockchain.clone(); let overlay = self.overlay.lock().unwrap().full_clone()?; let module = self.module.clone(); + let state_monotree = self.state_monotree.clone(); let proposals = self.proposals.clone(); let diffs = self.diffs.clone(); let mempool = self.mempool.clone(); @@ -871,6 +912,7 @@ impl Fork { blockchain, overlay, module, + state_monotree, proposals, diffs, mempool, @@ -878,4 +920,46 @@ impl Fork { hashes_rank, }) } + + /// Build current contract states checksums monotree. + pub fn compute_monotree(&mut self) -> Result<()> { + self.state_monotree = self.overlay.lock().unwrap().get_state_monotree()?; + Ok(()) + } + + /// Auxiliary function to check current contracts states checksums + /// Monotree(SMT) validity. + /// + /// Note: This should be executed on fresh forks and/or when + /// a fork doesn't contain changes over the last appended + // proposal. + pub fn healthcheck(&self) -> Result<()> { + // Rebuild current contract states checksums monotree + let state_monotree = self.overlay.lock().unwrap().get_state_monotree()?; + + // Check that it matches forks' tree + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + let Some(fork_state_root) = self.state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + if state_root != fork_state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&fork_state_root).to_string(), + )); + } + + // Check that the root matches last block header state root + let last_block_state_root = self.last_proposal()?.block.header.state_root; + if state_root != last_block_state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&last_block_state_root).to_string(), + )); + } + + Ok(()) + } } diff --git a/src/validator/mod.rs b/src/validator/mod.rs index 27167cc5b..85d8cc0c7 100644 --- a/src/validator/mod.rs +++ b/src/validator/mod.rs @@ -414,7 +414,7 @@ impl Validator { /// block hash matches the expected header one. /// Note: this function should only be used for blocks received using a /// checkpoint, since in that case we enforce the node to follow the sequence, - /// assuming they all its blocks are valid. Additionally, it will update + /// assuming all its blocks are valid. Additionally, it will update /// any forks to a single empty one, holding the updated module. pub async fn add_checkpoint_blocks( &self, @@ -437,6 +437,9 @@ impl Validator { // Grab current PoW module to validate each block let mut module = self.consensus.module.read().await.clone(); + // Grab current contracts states monotree to validate each block + let mut state_monotree = overlay.lock().unwrap().get_state_monotree()?; + // Keep track of all blocks transactions to remove them from pending txs store let mut removed_txs = vec![]; @@ -448,7 +451,15 @@ impl Validator { // Validate and insert each block for (index, block) in blocks.iter().enumerate() { // Verify block - match verify_checkpoint_block(&overlay, block, &headers[index], module.target).await { + match verify_checkpoint_block( + &overlay, + &mut state_monotree, + block, + &headers[index], + module.target, + ) + .await + { Ok(()) => { /* Do nothing */ } // Skip already existing block Err(Error::BlockAlreadyExists(_)) => continue, @@ -536,6 +547,9 @@ impl Validator { // Grab current PoW module to validate each block let mut module = self.consensus.module.read().await.clone(); + // Grab current contracts states monotree to validate each block + let mut state_monotree = overlay.lock().unwrap().get_state_monotree()?; + // Keep track of all blocks transactions to remove them from pending txs store let mut removed_txs = vec![]; @@ -547,7 +561,16 @@ impl Validator { // Validate and insert each block for block in blocks { // Verify block - match verify_block(&overlay, &module, block, previous, self.verify_fees).await { + match verify_block( + &overlay, + &module, + &mut state_monotree, + block, + previous, + self.verify_fees, + ) + .await + { Ok(()) => { /* Do nothing */ } // Skip already existing block Err(Error::BlockAlreadyExists(_)) => { @@ -755,10 +778,23 @@ impl Validator { // Create a PoW module to validate each block let mut module = PoWModule::new(blockchain, pow_target, pow_fixed_difficulty, None)?; + // Grab current contracts states monotree to validate each block + let mut state_monotree = overlay.lock().unwrap().get_state_monotree()?; + // Validate and insert each block for block in &blocks[1..] { // Verify block - if verify_block(&overlay, &module, block, previous, self.verify_fees).await.is_err() { + if verify_block( + &overlay, + &module, + &mut state_monotree, + block, + previous, + self.verify_fees, + ) + .await + .is_err() + { error!(target: "validator::validate_blockchain", "Erroneous block found in set"); overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?; return Err(Error::BlockIsInvalid(block.hash().as_string())) diff --git a/src/validator/verification.rs b/src/validator/verification.rs index 7f65537a7..51eaa1e1f 100644 --- a/src/validator/verification.rs +++ b/src/validator/verification.rs @@ -20,9 +20,13 @@ use std::collections::HashMap; use darkfi_sdk::{ blockchain::block_version, - crypto::{schnorr::SchnorrPublic, ContractId, MerkleTree, PublicKey}, + crypto::{ + schnorr::{SchnorrPublic, Signature}, + ContractId, MerkleTree, PublicKey, + }, dark_tree::dark_forest_leaf_vec_integrity_check, deploy::DeployParamsV1, + monotree::Monotree, pasta::pallas, }; use darkfi_serial::{deserialize_async, serialize_async, AsyncDecodable, AsyncEncodable}; @@ -100,11 +104,29 @@ pub async fn verify_genesis_block( // Append producer transaction to the tree and check tree matches header one append_tx_to_merkle_tree(&mut tree, producer_tx); - if tree.root(0).unwrap() != block.header.root { + if tree.root(0).unwrap() != block.header.transactions_root { error!(target: "validator::verification::verify_genesis_block", "Genesis Merkle tree is invalid"); return Err(Error::BlockIsInvalid(block_hash)) } + // Verify header contracts states root + let state_monotree = overlay.lock().unwrap().get_state_monotree()?; + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + if state_root != block.header.state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&block.header.state_root).to_string(), + )); + } + + // Genesis producer signature must be the Signature::dummy() one(empty) + if block.signature != Signature::dummy() { + error!(target: "validator::verification::verify_genesis_block", "Genesis producer signature is not dummy one"); + return Err(Error::InvalidSignature) + } + // Insert block overlay.lock().unwrap().add_block(block)?; @@ -175,6 +197,7 @@ pub fn validate_blockchain( pub async fn verify_block( overlay: &BlockchainOverlayPtr, module: &PoWModule, + state_monotree: &mut Monotree, block: &BlockInfo, previous: &BlockInfo, verify_fees: bool, @@ -227,11 +250,23 @@ pub async fn verify_block( .await?; // Verify transactions merkle tree root matches header one - if tree.root(0).unwrap() != block.header.root { + if tree.root(0).unwrap() != block.header.transactions_root { error!(target: "validator::verification::verify_block", "Block Merkle tree root is invalid"); return Err(Error::BlockIsInvalid(block_hash.as_string())) } + // Update the provided contracts states monotree and verify header contracts states root + overlay.lock().unwrap().contracts.update_state_monotree(state_monotree)?; + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + if state_root != block.header.state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&block.header.state_root).to_string(), + )); + } + // Verify producer signature verify_producer_signature(block, &public_key)?; @@ -245,6 +280,7 @@ pub async fn verify_block( /// Verify given checkpoint [`BlockInfo`], and apply it to the provided overlay. pub async fn verify_checkpoint_block( overlay: &BlockchainOverlayPtr, + state_monotree: &mut Monotree, block: &BlockInfo, header: &HeaderHash, block_target: u32, @@ -292,11 +328,23 @@ pub async fn verify_checkpoint_block( .await?; // Verify transactions merkle tree root matches header one - if tree.root(0).unwrap() != block.header.root { + if tree.root(0).unwrap() != block.header.transactions_root { error!(target: "validator::verification::verify_checkpoint_block", "Block Merkle tree root is invalid"); return Err(Error::BlockIsInvalid(block_hash.as_string())) } + // Update the provided contracts states monotree and verify header contracts states root + overlay.lock().unwrap().contracts.update_state_monotree(state_monotree)?; + let Some(state_root) = state_monotree.get_headroot()? else { + return Err(Error::ContractsStatesRootNotFoundError); + }; + if state_root != block.header.state_root { + return Err(Error::ContractsStatesRootError( + blake3::hash(&state_root).to_string(), + blake3::hash(&block.header.state_root).to_string(), + )); + } + // Verify producer signature verify_producer_signature(block, &public_key)?; @@ -458,7 +506,7 @@ pub async fn verify_producer_transaction( /// Apply given producer [`Transaction`] to the provided overlay, without formal verification. /// Returns transaction signature public key. Additionally, append its hash to the provided Merkle tree. -async fn apply_producer_transaction( +pub async fn apply_producer_transaction( overlay: &BlockchainOverlayPtr, verifying_block_height: u32, block_target: u32, @@ -803,7 +851,7 @@ pub async fn verify_transaction( /// Apply given [`Transaction`] to the provided overlay. /// Additionally, append its hash to the provided Merkle tree. -async fn apply_transaction( +pub async fn apply_transaction( overlay: &BlockchainOverlayPtr, verifying_block_height: u32, block_target: u32, @@ -1021,15 +1069,22 @@ pub async fn verify_proposal( } // Check if proposal extends any existing forks - let (fork, index) = consensus.find_extended_fork(proposal).await?; + let (mut fork, index) = consensus.find_extended_fork(proposal).await?; // Grab overlay last block let previous = fork.overlay.lock().unwrap().last_block()?; // Verify proposal block (2) - if verify_block(&fork.overlay, &fork.module, &proposal.block, &previous, verify_fees) - .await - .is_err() + if verify_block( + &fork.overlay, + &fork.module, + &mut fork.state_monotree, + &proposal.block, + &previous, + verify_fees, + ) + .await + .is_err() { error!(target: "validator::verification::verify_proposal", "Erroneous proposal block found"); fork.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?; @@ -1046,7 +1101,7 @@ pub async fn verify_proposal( /// 2. Block is valid /// Additional validity rules can be applied. pub async fn verify_fork_proposal( - fork: &Fork, + fork: &mut Fork, proposal: &Proposal, verify_fees: bool, ) -> Result<()> { @@ -1064,9 +1119,16 @@ pub async fn verify_fork_proposal( let previous = fork.overlay.lock().unwrap().last_block()?; // Verify proposal block (2) - if verify_block(&fork.overlay, &fork.module, &proposal.block, &previous, verify_fees) - .await - .is_err() + if verify_block( + &fork.overlay, + &fork.module, + &mut fork.state_monotree, + &proposal.block, + &previous, + verify_fees, + ) + .await + .is_err() { error!(target: "validator::verification::verify_fork_proposal", "Erroneous proposal block found"); fork.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?;