diff --git a/bin/darkfid2/src/rpc_blockchain.rs b/bin/darkfid2/src/rpc_blockchain.rs index 85bba853d..dc12ef22b 100644 --- a/bin/darkfid2/src/rpc_blockchain.rs +++ b/bin/darkfid2/src/rpc_blockchain.rs @@ -56,7 +56,7 @@ impl Darkfid { pub async fn merkle_roots(&self, id: Value, _params: &[Value]) -> JsonResult { let roots: Vec = match self.validator_state.read().await.blockchain.merkle_roots.get_all() { - Ok(v) => v.iter().map(|x| x.unwrap()).collect(), + Ok(v) => v, Err(e) => { error!("Failed getting merkle roots from rootstore: {}", e); return jsonrpc::error(InternalError, None, id).into() diff --git a/src/blockchain/blockstore.rs b/src/blockchain/blockstore.rs index cb07584ea..8e7d0cccc 100644 --- a/src/blockchain/blockstore.rs +++ b/src/blockchain/blockstore.rs @@ -1,8 +1,5 @@ -use log::debug; -use sled::Batch; - use crate::{ - consensus::{util::Timestamp, Block}, + consensus::{Block, Timestamp}, util::serial::{deserialize, serialize}, Error, Result, }; @@ -10,6 +7,9 @@ use crate::{ const SLED_BLOCK_TREE: &[u8] = b"_blocks"; const SLED_BLOCK_ORDER_TREE: &[u8] = b"_block_order"; +/// The `BlockStore` is a `sled` tree storing all the blockchain's blocks +/// where the key is the block's hash, and value is the serialized block. +#[derive(Clone)] pub struct BlockStore(sled::Tree); impl BlockStore { @@ -18,9 +18,10 @@ impl BlockStore { let tree = db.open_tree(SLED_BLOCK_TREE)?; let store = Self(tree); - // In case the store is empty, create the genesis block. + // In case the store is empty, initialize it with the genesis block. if store.0.is_empty() { - store.insert(&[Block::genesis_block(genesis_ts, genesis_data)])?; + let genesis_block = Block::genesis_block(genesis_ts, genesis_data); + store.insert(&[genesis_block])?; } Ok(store) @@ -29,12 +30,14 @@ impl BlockStore { /// Insert a slice of [`Block`] into the blockstore. With sled, the /// operation is done as a batch. /// The blocks are hashed with BLAKE3 and this blockhash is used as - /// the key, while value is the serialized block itself. + /// the key, while value is the serialized [`Block`] itself. + /// On success, the function returns the block hashes in the same order. pub fn insert(&self, blocks: &[Block]) -> Result> { let mut ret = Vec::with_capacity(blocks.len()); - let mut batch = Batch::default(); - for i in blocks { - let serialized = serialize(i); + let mut batch = sled::Batch::default(); + + for block in blocks { + let serialized = serialize(block); let blockhash = blake3::hash(&serialized); batch.insert(blockhash.as_bytes(), serialized); ret.push(blockhash); @@ -44,19 +47,26 @@ impl BlockStore { Ok(ret) } + /// Check if the blockstore contains a given blockhash. + pub fn contains(&self, blockhash: &blake3::Hash) -> Result { + Ok(self.0.contains_key(blockhash.as_bytes())?) + } + /// Fetch given blockhashes from the blockstore. - /// The resulting vector contains `Option` which is `Some` if the block - /// was found in the blockstore, and `None`, if it has not. + /// The resulting vector contains `Option`, which is `Some` if the block + /// was found in the blockstore, and otherwise it is `None`, if it has not. + /// The second parameter is a boolean which tells the function to fail in + /// case at least one block was not found. pub fn get(&self, blockhashes: &[blake3::Hash], strict: bool) -> Result>> { let mut ret = Vec::with_capacity(blockhashes.len()); - for i in blockhashes { - if let Some(found) = self.0.get(i.as_bytes())? { + for hash in blockhashes { + if let Some(found) = self.0.get(hash.as_bytes())? { let block = deserialize(&found)?; ret.push(Some(block)); } else { if strict { - let s = i.to_hex().as_str().to_string(); + let s = hash.to_hex().as_str().to_string(); return Err(Error::BlockNotFound(s)) } ret.push(None); @@ -66,50 +76,52 @@ impl BlockStore { Ok(ret) } - /// Check if the blockstore contains a given blockhash. - pub fn contains(&self, blockhash: blake3::Hash) -> Result { - Ok(self.0.contains_key(blockhash.as_bytes())?) - } - - /// Retrieve all blocks. + /// Retrieve all blocks from the blockstore in the form of a tuple + /// (`blockhash`, `block`). /// Be careful as this will try to load everything in memory. - pub fn get_all(&self) -> Result>> { + pub fn get_all(&self) -> Result> { let mut blocks = vec![]; + let iterator = self.0.into_iter().enumerate(); for (_, r) in iterator { let (k, v) = r.unwrap(); let hash_bytes: [u8; 32] = k.as_ref().try_into().unwrap(); let block = deserialize(&v)?; - blocks.push(Some((hash_bytes.into(), block))); + blocks.push((hash_bytes.into(), block)); } Ok(blocks) } } +/// The `BlockOrderStore` is a `sled` tree storing the order of the +/// blockchain's slots, where the key is the slot uid, and the value is +/// the block's hash. [`BlockStore`] can be queried with this hash. pub struct BlockOrderStore(sled::Tree); impl BlockOrderStore { - /// Opens a new or existing `BlockOderStore` on the given sled database. + /// Opens a new or existing `BlockOrderStore` on the given sled database. pub fn new(db: &sled::Db, genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Result { let tree = db.open_tree(SLED_BLOCK_ORDER_TREE)?; let store = Self(tree); - // In case the store is empty, create the genesis block. + // In case the store is empty, initialize it with the genesis block. if store.0.is_empty() { - let block = Block::genesis_block(genesis_ts, genesis_data); - let blockhash = blake3::hash(&serialize(&block)); - store.insert(&[block.sl], &[blockhash])?; + let genesis_block = Block::genesis_block(genesis_ts, genesis_data); + let blockhash = blake3::hash(&serialize(&genesis_block)); + store.insert(&[genesis_block.sl], &[blockhash])?; } Ok(store) } - /// Insert a slice of slots and blockhashes into the store. - /// The block slot is used as the key, and the hash as value. + /// Insert a slice of slots and blockhashes into the store. With sled, the + /// operation is done as a batch. + /// The block slot is used as the key, and the blockhash is used as value. pub fn insert(&self, slots: &[u64], hashes: &[blake3::Hash]) -> Result<()> { assert_eq!(slots.len(), hashes.len()); - let mut batch = Batch::default(); + let mut batch = sled::Batch::default(); + for (i, sl) in slots.iter().enumerate() { batch.insert(&sl.to_be_bytes(), hashes[i].as_bytes()); } @@ -118,20 +130,27 @@ impl BlockOrderStore { Ok(()) } - /// Retrieve all hashes given slots. - pub fn get(&self, slots: &[u64], strict: bool) -> Result>> { - //let mut ret = Vec::with_capacity(slots.len()); - let mut ret = vec![]; + /// Check if the blockorderstore contains a given slot. + pub fn contains(&self, slot: u64) -> Result { + Ok(self.0.contains_key(slot.to_be_bytes())?) + } - for i in slots { - if let Some(found) = self.0.get(i.to_be_bytes())? { + /// Fetch given slots from the blockorderstore. + /// The resulting vector contains `Option`, which is `Some` if the slot + /// was found in the blockstore, and otherwise it is `None`, if it has not. + /// The second parameter is a boolean which tells the function to fail in + /// case at least one slot was not found. + pub fn get(&self, slots: &[u64], strict: bool) -> Result>> { + let mut ret = Vec::with_capacity(slots.len()); + + for slot in slots { + if let Some(found) = self.0.get(slot.to_be_bytes())? { let hash_bytes: [u8; 32] = found.as_ref().try_into().unwrap(); let hash = blake3::Hash::from(hash_bytes); ret.push(Some(hash)); } else { if strict { - debug!("BlockOrderStore::get() Slot {} not found", i); - return Err(Error::SlotNotFound(*i)) + return Err(Error::SlotNotFound(*slot)) } ret.push(None); } @@ -140,7 +159,28 @@ impl BlockOrderStore { Ok(ret) } - /// Retrieve n hashes after given slot. + /// Retrieve all slots from the blockorderstore in the form of a tuple + /// (`slot`, `blockhash`). + /// Be careful as this will try to load everything in memory. + pub fn get_all(&self) -> Result> { + let mut slots = vec![]; + + let iterator = self.0.into_iter().enumerate(); + for (_, r) in iterator { + let (k, v) = r.unwrap(); + let slot_bytes: [u8; 8] = k.as_ref().try_into().unwrap(); + let hash_bytes: [u8; 32] = v.as_ref().try_into().unwrap(); + let slot = u64::from_be_bytes(slot_bytes); + let hash = blake3::Hash::from(hash_bytes); + slots.push((slot, hash)); + } + + Ok(slots) + } + + /// Fetch n hashes after given slot. In the iteration, if a slot is not + /// found, the iteration stops and the function returns what it has found + /// so far in the `BlockOrderStore`. pub fn get_after(&self, slot: u64, n: u64) -> Result> { let mut ret = vec![]; @@ -153,42 +193,25 @@ impl BlockOrderStore { let block_hash = deserialize(&found.1)?; ret.push(block_hash); counter += 1; - } else { - break + continue } + break } Ok(ret) } - /// Retrieve the last block hash in the tree, based on the Ord - /// implementation for Vec. - pub fn get_last(&self) -> Result> { - if let Some(found) = self.0.last()? { - let slot_bytes: [u8; 8] = found.0.as_ref().try_into().unwrap(); - let hash_bytes: [u8; 32] = found.1.as_ref().try_into().unwrap(); - let slot = u64::from_be_bytes(slot_bytes); - let hash = blake3::Hash::from(hash_bytes); - return Ok(Some((slot, hash))) - } + /// Fetch the last block hash in the tree, based on the `Ord` + /// implementation for `Vec`. This should not be able to + /// fail because we initialize the store with the genesis block. + pub fn get_last(&self) -> Result<(u64, blake3::Hash)> { + let found = self.0.last()?.unwrap(); - Ok(None) - } + let slot_bytes: [u8; 8] = found.0.as_ref().try_into().unwrap(); + let hash_bytes: [u8; 32] = found.1.as_ref().try_into().unwrap(); + let slot = u64::from_be_bytes(slot_bytes); + let hash = blake3::Hash::from(hash_bytes); - /// Retrieve all block hashes. - /// Be careful as this will try to load everything in memory. - pub fn get_all(&self) -> Result>> { - let mut ret = vec![]; - let iterator = self.0.into_iter().enumerate(); - for (_, r) in iterator { - let (k, v) = r.unwrap(); - let slot_bytes: [u8; 8] = k.as_ref().try_into().unwrap(); - let hash_bytes: [u8; 32] = v.as_ref().try_into().unwrap(); - let slot = u64::from_be_bytes(slot_bytes); - let hash = blake3::Hash::from(hash_bytes); - ret.push(Some((slot, hash))); - } - - Ok(ret) + Ok((slot, hash)) } } diff --git a/src/blockchain/metadatastore.rs b/src/blockchain/metadatastore.rs index 93866040e..aa6ab3aec 100644 --- a/src/blockchain/metadatastore.rs +++ b/src/blockchain/metadatastore.rs @@ -1,5 +1,3 @@ -use sled::Batch; - use crate::{ consensus::{Block, StreamletMetadata, Timestamp}, util::serial::{deserialize, serialize}, @@ -8,14 +6,19 @@ use crate::{ const SLED_STREAMLET_METADATA_TREE: &[u8] = b"_streamlet_metadata"; +/// The `StreamletMetadataStore` is a `sled` tree storing all the blockchain's +/// blocks' metadata used by the Streamlet consensus protocol, where the key +/// is the block's hash, and the value is the serialized metadata. +#[derive(Clone)] pub struct StreamletMetadataStore(sled::Tree); impl StreamletMetadataStore { + /// Opens a new or existing `StreamletMetadataStore` on the given sled database. pub fn new(db: &sled::Db, genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Result { let tree = db.open_tree(SLED_STREAMLET_METADATA_TREE)?; let store = Self(tree); - // In case the store is empty, add genesis metadata. + // In case the store is empty, initialize it with the genesis block. if store.0.is_empty() { let genesis_block = Block::genesis_block(genesis_ts, genesis_data); let genesis_hash = blake3::hash(&serialize(&genesis_block)); @@ -33,14 +36,14 @@ impl StreamletMetadataStore { Ok(store) } - /// Insert [`StreamletMetadata`] into the `MetadataStore`. - /// The blockhash for the metadata is used as the key, - /// where value is the serialized metadata. - pub fn insert(&self, blocks: &[blake3::Hash], metadatas: &[StreamletMetadata]) -> Result<()> { - assert_eq!(blocks.len(), metadatas.len()); - let mut batch = Batch::default(); + /// Insert a slice of blockhashes and respective metadata into the store. + /// With sled, the operation is done as a batch. + /// The block hash is used as the key, and the metadata is used as value. + pub fn insert(&self, hashes: &[blake3::Hash], metadatas: &[StreamletMetadata]) -> Result<()> { + assert_eq!(hashes.len(), metadatas.len()); + let mut batch = sled::Batch::default(); - for (i, hash) in blocks.iter().enumerate() { + for (i, hash) in hashes.iter().enumerate() { batch.insert(hash.as_bytes(), serialize(&metadatas[i])); } @@ -48,21 +51,30 @@ impl StreamletMetadataStore { Ok(()) } - /// Retrieve `StreamletMetadata` by given blockhashes. + /// Check if the metadata store contains a given block hash + pub fn contains(&self, hash: &blake3::Hash) -> Result { + Ok(self.0.contains_key(hash.as_bytes())?) + } + + /// Fetch given blockhashes from the store. The resulting vector contains + /// `Option`, which is `Some` if the slot was found in the blockstore, and + /// otherwise it is `None`, if it has not. The second parameter is a boolean + /// which tells the function to fail in case at least one blockhash was not + /// found. pub fn get( &self, - blockhashes: &[blake3::Hash], + hashes: &[blake3::Hash], strict: bool, ) -> Result>> { - let mut ret = Vec::with_capacity(blockhashes.len()); + let mut ret = Vec::with_capacity(hashes.len()); - for i in blockhashes { - if let Some(found) = self.0.get(i.as_bytes())? { + for hash in hashes { + if let Some(found) = self.0.get(hash.as_bytes())? { let sm = deserialize(&found)?; ret.push(Some(sm)); } else { if strict { - let s = i.to_hex().as_str().to_string(); + let s = hash.to_hex().as_str().to_string(); return Err(Error::BlockMetadataNotFound(s)) } ret.push(None); @@ -72,18 +84,20 @@ impl StreamletMetadataStore { Ok(ret) } - /// Retrieve all `StreamletMetadata`. - /// Be careful as this will try to lead everything in memory. - pub fn get_all(&self) -> Result>> { - let mut metadata = vec![]; + /// Retrieve all metadata from the store in the form of a tuple + /// (`hash`, `metadata`). + /// Be careful as this will try to load everything in memory. + pub fn get_all(&self) -> Result> { + let mut ret = vec![]; + let iterator = self.0.into_iter().enumerate(); for (_, r) in iterator { let (k, v) = r.unwrap(); let hash_bytes: [u8; 32] = k.as_ref().try_into().unwrap(); let m = deserialize(&v)?; - metadata.push(Some((hash_bytes.into(), m))); + ret.push((hash_bytes.into(), m)); } - Ok(metadata) + Ok(ret) } } diff --git a/src/blockchain/mod.rs b/src/blockchain/mod.rs index 5053cf8b7..e92f3c616 100644 --- a/src/blockchain/mod.rs +++ b/src/blockchain/mod.rs @@ -3,7 +3,7 @@ use std::io; use log::debug; use crate::{ - consensus::{block::BlockInfo, util::Timestamp, Block, BlockProposal}, + consensus::{Block, BlockInfo, Timestamp}, impl_vec, util::serial::{Decodable, Encodable, ReadExt, VarInt, WriteExt}, Result, @@ -24,6 +24,7 @@ pub use rootstore::RootStore; pub mod txstore; pub use txstore::TxStore; +/// Structure holding all sled trees that comprise the concept of Blockchain. pub struct Blockchain { /// Blocks sled tree pub blocks: BlockStore, @@ -40,19 +41,24 @@ pub struct Blockchain { } impl Blockchain { + /// Instantiate a new `Blockchain` with the given `sled` database. pub fn new(db: &sled::Db, genesis_ts: Timestamp, genesis_data: blake3::Hash) -> Result { let blocks = BlockStore::new(db, genesis_ts, genesis_data)?; let order = BlockOrderStore::new(db, genesis_ts, genesis_data)?; - let transactions = TxStore::new(db)?; let streamlet_metadata = StreamletMetadataStore::new(db, genesis_ts, genesis_data)?; + let transactions = TxStore::new(db)?; let nullifiers = NullifierStore::new(db)?; let merkle_roots = RootStore::new(db)?; Ok(Self { blocks, order, transactions, streamlet_metadata, nullifiers, merkle_roots }) } - /// Batch insert [`BlockInfo`]s. - pub fn add(&mut self, blocks: &[BlockInfo]) -> Result> { + /// Insert a given slice of [`BlockInfo`] into the blockchain database. + /// This functions wraps all the logic of separating the block into specific + /// data that can be fed into the different trees of the database. + /// Upon success, the functions returns a vector of the block hashes that + /// were given and appended to the ledger. + pub fn add(&self, blocks: &[BlockInfo]) -> Result> { let mut ret = Vec::with_capacity(blocks.len()); for block in blocks { @@ -67,19 +73,35 @@ impl Blockchain { // Store block order self.order.insert(&[block.sl], &[blockhash[0]])?; - // Store Streamlet metadata + // Store streamlet metadata self.streamlet_metadata.insert(&[blockhash[0]], &[block.sm.clone()])?; + + // NOTE: The nullifiers and Merkle roots are applied in the state + // transition apply function. } Ok(ret) } - /// Retrieve blocks by given hashes. Fails if any of them are not found. - pub fn get_blocks_by_hash(&self, blockhashes: &[blake3::Hash]) -> Result> { - let mut ret = Vec::with_capacity(blockhashes.len()); + /// Check if the given [`BlockInfo`] is in the database and all trees. + pub fn has_block(&self, block: &BlockInfo) -> Result { + let blockhash = match self.order.get(&[block.sl], true) { + Ok(v) => v[0].unwrap(), + Err(_) => return Ok(false), + }; - let blocks = self.blocks.get(blockhashes, true)?; - let metadata = self.streamlet_metadata.get(blockhashes, true)?; + // TODO: Check if we have all transactions + + // Check provided info produces the same hash + return Ok(blockhash == block.blockhash()) + } + + /// Retrieve [`BlockInfo`]s by given hashes. Fails if any of them are not found. + pub fn get_blocks_by_hash(&self, hashes: &[blake3::Hash]) -> Result> { + let mut ret = Vec::with_capacity(hashes.len()); + + let blocks = self.blocks.get(hashes, true)?; + let metadata = self.streamlet_metadata.get(hashes, true)?; for (i, block) in blocks.iter().enumerate() { let block = block.clone().unwrap(); @@ -95,7 +117,7 @@ impl Blockchain { Ok(ret) } - /// Retrieve blocks by given slots. + /// Retrieve [`BlockInfo`]s by given slots. Does not fail if any of them are not found. pub fn get_blocks_by_slot(&self, slots: &[u64]) -> Result> { debug!("get_blocks_by_slot(): {:?}", slots); let blockhashes = self.order.get(slots, false)?; @@ -108,35 +130,15 @@ impl Blockchain { self.get_blocks_by_hash(&hashes) } - /// Retrieve n blocks after start slot. + /// Retrieve n blocks after given start slot. pub fn get_blocks_after(&self, slot: u64, n: u64) -> Result> { - debug!("get_blocks_after(): {:?} - {:?}", slot, n); + debug!("get_blocks_after(): {} -> {}", slot, n); let hashes = self.order.get_after(slot, n)?; - self.get_blocks_by_hash(&hashes) } - /// Check if the given [`BlockInfo`] is in the database - pub fn has_block(&self, info: &BlockInfo) -> Result { - let hashes = match self.order.get(&[info.sl], true) { - Ok(v) => v, - Err(_) => return Ok(false), - }; - - if let Some(found) = &hashes[0] { - // Check provided info produces the same hash - // TODO: This BlockProposal::to_proposal_hash function should be in a better place. - let blockhash = - BlockProposal::to_proposal_hash(info.st, info.sl, &info.txs, &info.metadata); - - return Ok(&blockhash == found) - } - - Ok(false) - } - - /// Retrieve the last block slot and hash - pub fn last(&self) -> Result> { + /// Retrieve the last block slot and hash. + pub fn last(&self) -> Result<(u64, blake3::Hash)> { self.order.get_last() } } diff --git a/src/blockchain/nfstore.rs b/src/blockchain/nfstore.rs index 1c0f33975..3b4d1059f 100644 --- a/src/blockchain/nfstore.rs +++ b/src/blockchain/nfstore.rs @@ -1,5 +1,3 @@ -use sled::Batch; - use crate::{ crypto::nullifier::Nullifier, util::serial::{deserialize, serialize}, @@ -8,6 +6,10 @@ use crate::{ const SLED_NULLIFIER_TREE: &[u8] = b"_nullifiers"; +/// The `NullifierStore` is a `sled` tree storing all the nullifiers seen +/// in existing blocks. The key is the nullifier itself, while the value +/// is an empty vector that's not used. As a sidenote, perhaps we could +/// hold the transaction hash where the nullifier was seen in the value. #[derive(Clone)] pub struct NullifierStore(sled::Tree); @@ -18,31 +20,35 @@ impl NullifierStore { Ok(Self(tree)) } - /// Insert a slice of [`Nullifier`] into the nullifier store. - pub fn insert(&self, nullifiers: &[Nullifier]) -> Result<()> { - let mut batch = Batch::default(); - for i in nullifiers { - batch.insert(serialize(i), vec![] as Vec); + /// Insert a slice of [`Nullifier`] into the store. With sled, the + /// operation is done as a batch. The nullifier is used as a key, + /// while the value is an empty vector. + pub fn insert(&self, nfs: &[Nullifier]) -> Result<()> { + let mut batch = sled::Batch::default(); + + for nf in nfs { + batch.insert(serialize(nf), vec![] as Vec); } self.0.apply_batch(batch)?; Ok(()) } - /// Check whether given nullifier is in the database + /// Check if the nullifierstore contains a given nullifier. pub fn contains(&self, nullifier: &Nullifier) -> Result { Ok(self.0.contains_key(serialize(nullifier))?) } - /// Retrieve all nullifiers. + /// Retrieve all nullifiers from the store. /// Be careful as this will try to load everything in memory. - pub fn get_all(&self) -> Result>> { + pub fn get_all(&self) -> Result> { let mut nfs = vec![]; + let iterator = self.0.into_iter().enumerate(); for (_, r) in iterator { let (k, _) = r.unwrap(); let nullifier = deserialize(&k)?; - nfs.push(Some(nullifier)) + nfs.push(nullifier); } Ok(nfs) diff --git a/src/blockchain/rootstore.rs b/src/blockchain/rootstore.rs index bcb5aa02a..6b89e23de 100644 --- a/src/blockchain/rootstore.rs +++ b/src/blockchain/rootstore.rs @@ -1,5 +1,3 @@ -use sled::Batch; - use crate::{ crypto::merkle_node::MerkleNode, util::serial::{deserialize, serialize}, @@ -8,6 +6,9 @@ use crate::{ const SLED_ROOTS_TREE: &[u8] = b"_merkleroots"; +/// The `RootStore` is a `sled` tree storing all the Merkle roots seen +/// in existing blocks. The key is the Merkle root itself, while the value +/// is an empty vector that's not used. #[derive(Clone)] pub struct RootStore(sled::Tree); @@ -18,31 +19,35 @@ impl RootStore { Ok(Self(tree)) } - /// Insert a slice of [`MerkleNode`] on the given sled database. + /// Insert a slice of [`MerkleNode`] into the store. With sled, the + /// operation is done as a batch. The Merkle root is used as a key, + /// while the value is an empty vector. pub fn insert(&self, roots: &[MerkleNode]) -> Result<()> { - let mut batch = Batch::default(); - for i in roots { - batch.insert(serialize(i), vec![] as Vec); + let mut batch = sled::Batch::default(); + + for root in roots { + batch.insert(serialize(root), vec![] as Vec); } self.0.apply_batch(batch)?; Ok(()) } - /// Check whether given root is in the database + /// Check if the rootstore contains a given Merkle root. pub fn contains(&self, root: &MerkleNode) -> Result { Ok(self.0.contains_key(serialize(root))?) } - /// Retrieve all merkle roots. + /// Retrieve all Merkle roots from the store. /// Be careful as this will try to load everything in memory. - pub fn get_all(&self) -> Result>> { + pub fn get_all(&self) -> Result> { let mut roots = vec![]; + let iterator = self.0.into_iter().enumerate(); for (_, r) in iterator { let (k, _) = r.unwrap(); let root = deserialize(&k)?; - roots.push(Some(root)); + roots.push(root); } Ok(roots) diff --git a/src/blockchain/txstore.rs b/src/blockchain/txstore.rs index 6300e53e9..c7ac8edac 100644 --- a/src/blockchain/txstore.rs +++ b/src/blockchain/txstore.rs @@ -1,5 +1,3 @@ -use sled::Batch; - use crate::{ tx::Transaction, util::serial::{deserialize, serialize}, @@ -8,6 +6,10 @@ use crate::{ const SLED_TX_TREE: &[u8] = b"_transactions"; +/// The `TxStore` is a `sled` tree storing all the blockchain's +/// transactions where the key is the transaction hash, and the value is +/// the serialized transaction. +#[derive(Clone)] pub struct TxStore(sled::Tree); impl TxStore { @@ -19,13 +21,16 @@ impl TxStore { /// Insert a slice of [`Transaction`] into the txstore. With sled, the /// operation is done as a batch. - /// The transactions are hashed with BLAKE3 and this hash is - /// used as the key, while value is the serialized tx itself. - pub fn insert(&self, txs: &[Transaction]) -> Result> { - let mut ret = Vec::with_capacity(txs.len()); - let mut batch = Batch::default(); - for i in txs { - let serialized = serialize(i); + /// The transactions are hashed with BLAKE3 and this hash is used as + /// the key, while the value is the serialized [`Transaction`] itself. + /// On success, the function returns the transaction hashes in the same + /// order as the input transactions. + pub fn insert(&self, transactions: &[Transaction]) -> Result> { + let mut ret = Vec::with_capacity(transactions.len()); + let mut batch = sled::Batch::default(); + + for tx in transactions { + let serialized = serialize(tx); let txhash = blake3::hash(&serialized); batch.insert(txhash.as_bytes(), serialized); ret.push(txhash); @@ -35,27 +40,26 @@ impl TxStore { Ok(ret) } - /// Check if the txstore contains a given transaction. - pub fn contains(&self, txid: blake3::Hash) -> Result { + /// Check if the txstore contains a given transaction hash. + pub fn contains(&self, txid: &blake3::Hash) -> Result { Ok(self.0.contains_key(txid.as_bytes())?) } - /// Fetch requested transactions from the txstore. The `strict` param - /// will make the function fail if a transaction has not been found. - pub fn get( - &self, - tx_hashes: &[blake3::Hash], - strict: bool, - ) -> Result>> { - let mut ret: Vec> = Vec::with_capacity(tx_hashes.len()); + /// Fetch given tx hashes from the txstore. + /// The resulting vector contains `Option`, which is `Some` if the tx + /// was found in the txstore, and otherwise it is `None`, if it has not. + /// The second parameter is a boolean which tells the function to fail in + /// case at least one block was not found. + pub fn get(&self, txids: &[blake3::Hash], strict: bool) -> Result>> { + let mut ret = Vec::with_capacity(txids.len()); - for i in tx_hashes { - if let Some(found) = self.0.get(i.as_bytes())? { + for txid in txids { + if let Some(found) = self.0.get(txid.as_bytes())? { let tx = deserialize(&found)?; ret.push(Some(tx)); } else { if strict { - let s = i.to_hex().as_str().to_string(); + let s = txid.to_hex().as_str().to_string(); return Err(Error::TransactionNotFound(s)) } ret.push(None); @@ -65,16 +69,18 @@ impl TxStore { Ok(ret) } - /// Retrieve all transactions. + /// Retrieve all transactions from the txstore in the form of a tuple + /// (`tx_hash`, `tx`). /// Be careful as this will try to load everything in memory. - pub fn get_all(&self) -> Result>> { + pub fn get_all(&self) -> Result> { let mut txs = vec![]; + let iterator = self.0.into_iter().enumerate(); for (_, r) in iterator { let (k, v) = r.unwrap(); let hash_bytes: [u8; 32] = k.as_ref().try_into().unwrap(); let tx = deserialize(&v)?; - txs.push(Some((hash_bytes.into(), tx))); + txs.push((hash_bytes.into(), tx)); } Ok(txs) diff --git a/src/consensus/block.rs b/src/consensus/block.rs index 62e5b02b5..8328b755d 100644 --- a/src/consensus/block.rs +++ b/src/consensus/block.rs @@ -38,6 +38,11 @@ impl Block { Self::new(genesis_data, 0, vec![], metadata) } + + /// Calculate the block hash + pub fn blockhash(&self) -> blake3::Hash { + blake3::hash(&serialize(self)) + } } /// Auxiliary structure used for blockchain syncing. @@ -80,6 +85,19 @@ impl BlockInfo { ) -> Self { Self { st, sl, txs, metadata, sm } } + + /// Calculate the block hash + pub fn blockhash(&self) -> blake3::Hash { + let block: Block = self.clone().into(); + block.blockhash() + } +} + +impl From for Block { + fn from(b: BlockInfo) -> Self { + let txids = b.txs.iter().map(|x| blake3::hash(&serialize(x))).collect(); + Self { st: b.st, sl: b.sl, txs: txids, metadata: b.metadata } + } } impl net::Message for BlockInfo { diff --git a/src/consensus/proto/protocol_tx.rs b/src/consensus/proto/protocol_tx.rs index 57a0871eb..7c2463605 100644 --- a/src/consensus/proto/protocol_tx.rs +++ b/src/consensus/proto/protocol_tx.rs @@ -64,7 +64,7 @@ impl ProtocolTx { let tx_hash = blake3::hash(&serialize(&tx_copy)); let tx_in_txstore = - match self.state.read().await.blockchain.transactions.contains(tx_hash) { + match self.state.read().await.blockchain.transactions.contains(&tx_hash) { Ok(v) => v, Err(e) => { error!("handle_receive_tx(): Failed querying txstore: {}", e); diff --git a/src/consensus/state.rs b/src/consensus/state.rs index 29aff04a5..a8a557ab8 100644 --- a/src/consensus/state.rs +++ b/src/consensus/state.rs @@ -207,7 +207,7 @@ impl ValidatorState { return Ok(epoch) } - let (last_sl, _) = self.blockchain.last()?.unwrap(); + let (last_sl, _) = self.blockchain.last()?; Ok(last_sl) } @@ -327,7 +327,7 @@ impl ValidatorState { let hash = match longest_notarized_chain { Some(chain) => chain.proposals.last().unwrap().hash(), - None => self.blockchain.last()?.unwrap().1, + None => self.blockchain.last()?.1, }; Ok((hash, index)) @@ -476,7 +476,7 @@ impl ValidatorState { None => (), } - let (last_sl, last_block) = self.blockchain.last()?.unwrap(); + let (last_sl, last_block) = self.blockchain.last()?; if proposal.block.st != last_block || proposal.block.sl <= last_sl { debug!("find_extended_chain_index(): Proposal doesn't extend any known chain"); return Ok(-2) diff --git a/src/consensus/task/block_sync.rs b/src/consensus/task/block_sync.rs index de1203972..2251b944c 100644 --- a/src/consensus/task/block_sync.rs +++ b/src/consensus/task/block_sync.rs @@ -29,7 +29,7 @@ pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Resu // Node sends the last known block hash of the canonical blockchain // and loops until the response is the same block (used to utilize // batch requests). - let mut last = state.read().await.blockchain.last()?.unwrap(); + let mut last = state.read().await.blockchain.last()?; info!("Last known block: {:?} - {:?}", last.0, last.1); loop { @@ -63,7 +63,7 @@ pub async fn block_sync_task(p2p: net::P2pPtr, state: ValidatorStatePtr) -> Resu debug!("block_sync_task(): Appending blocks to ledger"); state.write().await.blockchain.add(&resp.blocks)?; - let last_received = state.read().await.blockchain.last()?.unwrap(); + let last_received = state.read().await.blockchain.last()?; info!("Last received block: {:?} - {:?}", last_received.0, last_received.1); if last == last_received {