diff --git a/bin/explorer/explorerd/src/config.rs b/bin/explorer/explorerd/src/config.rs index 8b0cff3e6..bfe0f632a 100644 --- a/bin/explorer/explorerd/src/config.rs +++ b/bin/explorer/explorerd/src/config.rs @@ -94,7 +94,8 @@ impl Default for ExplorerConfig { } } -/// Attempts to convert a [`PathBuff`] to an [`ExplorerConfig`] by loading and parsing from specified file path. +/// Attempts to convert a [`PathBuf`] to an [`ExplorerConfig`] by +/// loading and parsing from specified file path. impl TryFrom<&PathBuf> for ExplorerConfig { type Error = Error; fn try_from(path: &PathBuf) -> Result { diff --git a/bin/explorer/explorerd/src/error.rs b/bin/explorer/explorerd/src/error.rs index a2115f792..e728cc6b0 100644 --- a/bin/explorer/explorerd/src/error.rs +++ b/bin/explorer/explorerd/src/error.rs @@ -52,14 +52,15 @@ impl From for Error { } } -/// Conversion from [`ExplorerdRpcError`] to [`RpcError`] +/// Conversion from [`ExplorerdError`] to [`RpcError`] impl From for RpcError { fn from(err: ExplorerdError) -> Self { RpcError::ServerError(Arc::new(err)) } } -/// Helper function to convert `ExplorerdRpcError` into error code with corresponding error message. +/// Helper function to convert `ExplorerdError` into error code with +/// corresponding error message. pub fn to_error_code_message(e: &ExplorerdError) -> (i32, String) { match e { ExplorerdError::PingDarkfidFailed(_) => (ERROR_CODE_PING_DARKFID_FAILED, e.to_string()), diff --git a/bin/explorer/explorerd/src/service/blocks.rs b/bin/explorer/explorerd/src/service/blocks.rs index 604de05b6..3ca5418c6 100644 --- a/bin/explorer/explorerd/src/service/blocks.rs +++ b/bin/explorer/explorerd/src/service/blocks.rs @@ -118,9 +118,10 @@ impl ExplorerService { /// Adds the provided [`BlockInfo`] to the block explorer database. /// - /// This function processes each transaction in the block, calculating and updating the - /// latest [`GasMetrics`] for non-genesis blocks and for transactions that are not - /// PoW rewards. PoW reward transactions update the contract runtime state as required. + /// This function processes each transaction in the block, calculating + /// and updating the latest [`crate::store::metrics::GasMetrics`] for + /// non-genesis blocks and for transactions that are not PoW rewards. + /// PoW reward transactions update the contract runtime state as required. /// After processing all transactions, the block is permanently persisted to /// the explorer database. pub async fn put_block(&self, block: &BlockInfo) -> Result<()> { @@ -295,17 +296,19 @@ impl ExplorerService { Ok(block_records) } - /// Resets the [`ExplorerDb::blockchain::blocks`] and [`ExplorerDb::blockchain::transactions`] - /// trees to a specified height by removing entries above the `reset_height`, returning a result - /// that indicates success or failure. + /// Resets the `blocks` and `transactions` trees to a specified + /// height by removing entries above the `reset_height`, + /// returning a result that indicates success or failure. /// - /// The function retrieves the last explorer block and iteratively rolls back entries - /// in the [`BlockStore::main`], [`BlockStore::order`], and [`BlockStore::difficulty`] trees - /// to the specified `reset_height`. It also resets the [`TxStore::main`] and - /// [`TxStore::location`] trees to reflect the transaction state at the given height. + /// The function retrieves the last explorer block and iteratively + /// rolls back entries in the `main`, `order`, and `difficulty` + /// trees to the specified `reset_height`. It also resets the `main` + /// and `location` trees to reflect the transaction state at + /// the given height. /// - /// This operation is performed atomically using a sled transaction applied across the affected sled - /// trees, ensuring consistency and avoiding partial updates. + /// This operation is performed atomically using a sled transaction + /// applied across the affected sled trees, ensuring consistency and + /// avoiding partial updates. pub fn reset_to_height(&self, reset_height: u32) -> Result<()> { let block_store = &self.db.blockchain.blocks; let tx_store = &self.db.blockchain.transactions; diff --git a/bin/explorer/explorerd/src/store/contract_metadata.rs b/bin/explorer/explorerd/src/store/contract_metadata.rs index fef0660c3..d65257304 100644 --- a/bin/explorer/explorerd/src/store/contract_metadata.rs +++ b/bin/explorer/explorerd/src/store/contract_metadata.rs @@ -62,7 +62,7 @@ pub struct ContractMetaStore { /// Pointer to the underlying sled database used by the store and its associated overlay. pub sled_db: sled::Db, - /// Primary sled tree for storing contract metadata, utilizing [`ContractId::to_string`] as keys + /// Primary sled tree for storing contract metadata, utilizing [`ContractId`] as keys /// and serialized [`ContractMetaData`] as values. pub main: sled::Tree, diff --git a/bin/explorer/explorerd/src/store/metrics.rs b/bin/explorer/explorerd/src/store/metrics.rs index 387fb2f94..b88a0e71a 100644 --- a/bin/explorer/explorerd/src/store/metrics.rs +++ b/bin/explorer/explorerd/src/store/metrics.rs @@ -247,7 +247,8 @@ pub struct MetricsStore { // Temporarily disable unused warnings until the store is integrated with the explorer #[allow(dead_code)] impl MetricsStore { - /// Creates a [`MetricsStore`] instance by opening the necessary trees in the provided sled database [`Db`] + /// Creates a [`MetricsStore`] instance by opening the necessary + /// trees in the provided sled database [`sled::Db`] pub fn new(db: &sled::Db) -> Result { let main = db.open_tree(SLED_GAS_METRICS_TREE)?; let tx_gas_data = db.open_tree(SLED_TX_GAS_DATA_TREE)?; @@ -406,7 +407,8 @@ struct MetricsStoreOverlay { } impl MetricsStoreOverlay { - /// Instantiate a [`MetricsStoreOverlay`] over the provided [`SledDbPtr`] instance. + /// Instantiate a [`MetricsStoreOverlay`] over the provided + /// [`sled::Db`] instance. pub fn new(db: sled::Db) -> Result { // Create overlay pointer let overlay = Arc::new(Mutex::new(SledDbOverlay::new(&db, vec![]))); diff --git a/src/dht/tasks.rs b/src/dht/tasks.rs index befc2c113..4770b8787 100644 --- a/src/dht/tasks.rs +++ b/src/dht/tasks.rs @@ -173,9 +173,9 @@ pub async fn dht_refinery_task(handler: Arc) -> Result<()> { /// If the bucket is already full, we ping the least recently seen node in the /// bucket: if successful it becomes the most recently seen node, if the ping /// fails we remove it and add the new node. -/// [`Dht::update_node()`] increments a channel's usage count (in the direct -/// session) and triggers this task. This task decrements the usage count -/// using [`Dht::cleanup_channel()`]. +/// [`crate::dht::Dht::update_node()`] increments a channel's usage count +/// (in the direct session) and triggers this task. This task decrements the +/// usage count using [`crate::dht::Dht::cleanup_channel()`]. pub async fn add_node_task(handler: Arc) -> Result<()> { let dht = handler.dht(); loop { diff --git a/src/net/channel.rs b/src/net/channel.rs index 3ebf3f844..900b24f1d 100644 --- a/src/net/channel.rs +++ b/src/net/channel.rs @@ -105,8 +105,8 @@ pub struct Channel { pub version: OnceCell>, /// Channel debug info pub info: ChannelInfo, - /// Map holding a `MeteringQueue` for each [`Message`] to perform - /// rate limiting of propagation towards the stream. + /// Map holding a `MeteringQueue` for each [`crate::net::Message`] + /// to perform rate limiting of propagation towards the stream. metering_map: AsyncMutex>, } diff --git a/src/net/hosts.rs b/src/net/hosts.rs index 87d847dc8..1b249aac9 100644 --- a/src/net/hosts.rs +++ b/src/net/hosts.rs @@ -63,30 +63,30 @@ use crate::{ /// utilities. /// /// `HostColor`: -/// White: Hosts that have passed the `GreylistRefinery` successfully. +/// > `White`: Hosts that have passed the `GreylistRefinery` successfully. /// -/// Gold: Hosts we have been able to establish a connection to in `OutboundSession`. +/// > `Gold`: Hosts we have been able to establish a connection to in `OutboundSession`. /// -/// Grey: Recently received hosts that are checked by the `GreylistRefinery` and -/// upgraded to the whitelist if valid. If they're inaccessible by the Refinery -/// they will be deleted. +/// > `Grey`: Recently received hosts that are checked by the `GreylistRefinery` and +/// > upgraded to the whitelist if valid. If they're inaccessible by the Refinery +/// > they will be deleted. /// -/// Black: hostile hosts that are strictly avoided for the duration of the program. +/// > `Black`: hostile hosts that are strictly avoided for the duration of the program. /// -/// Dark: hosts that do not match our transports, but that we continue to share with -/// other peers. We do not keep darklist entries that are older than one day. -/// This is to avoid peers propagating nodes that may be faulty. We assume that -/// within the one day period, the nodes will be picked up by peers that accept -/// the transports and can refine them to remove inactive peers. Dark list hosts -/// are otherwise ignored. +/// > `Dark`: hosts that do not match our transports, but that we continue to share with +/// > other peers. We do not keep darklist entries that are older than one day. +/// > This is to avoid peers propagating nodes that may be faulty. We assume that +/// > within the one day period, the nodes will be picked up by peers that accept +/// > the transports and can refine them to remove inactive peers. Dark list hosts +/// > are otherwise ignored. /// /// `HostState`: a set of mutually exclusive states that can be Insert, Refine, Connect, Suspend /// or Connected. The state is `None` when the corresponding host has been removed from the /// HostRegistry. /// -/// TODO: Use HostState::Free `age` variable to implement a pruning logic that deletes peers from -/// the registry once they have bypassed a certain age threshold. -/// +//TODO: Use HostState::Free `age` variable to implement a pruning logic that deletes peers from +//the registry once they have bypassed a certain age threshold. + // An array containing all possible local host strings // TODO: This could perhaps be more exhaustive? pub const LOCAL_HOST_STRS: [&str; 2] = ["localhost", "localhost.localdomain"]; diff --git a/src/runtime/import/merkle.rs b/src/runtime/import/merkle.rs index e15e9552f..ac62b8d76 100644 --- a/src/runtime/import/merkle.rs +++ b/src/runtime/import/merkle.rs @@ -33,7 +33,7 @@ use crate::runtime::vm_runtime::{ContractSection, Env}; /// Adds data to merkle tree. The tree, database connection, and new data to add is /// read from `ptr` at offset specified by `len`. /// Returns `0` on success; otherwise, returns an error-code corresponding to a -/// [`ContractError`] (defined in the SDK). +/// [`darkfi_sdk::error::ContractError`] (defined in the SDK). /// See also the method `merkle_add` in `sdk/src/merkle.rs`. /// /// Permissions: update diff --git a/src/runtime/import/util.rs b/src/runtime/import/util.rs index 28d67589c..1c9702440 100644 --- a/src/runtime/import/util.rs +++ b/src/runtime/import/util.rs @@ -54,7 +54,7 @@ pub(crate) fn drk_log(mut ctx: FunctionEnvMut, ptr: WasmPtr, len: u32) /// The data will be read from `ptr` at a memory offset specified by `len`. /// /// Returns `SUCCESS` on success, otherwise returns an error code corresponding -/// to a [`ContractError`]. +/// to a [`darkfi_sdk::error::ContractError`]. /// /// Permissions: metadata, exec pub(crate) fn set_return_data(mut ctx: FunctionEnvMut, ptr: WasmPtr, len: u32) -> i64 { diff --git a/src/sdk/src/wasm/merkle.rs b/src/sdk/src/wasm/merkle.rs index 02435b67d..aaa325da4 100644 --- a/src/sdk/src/wasm/merkle.rs +++ b/src/sdk/src/wasm/merkle.rs @@ -40,12 +40,12 @@ use crate::{ /// /// Inside `db_info` we store: /// -/// * The [latest root hash:32] under `root_key`. +/// * The \[latest root hash:32\] under `root_key`. /// * The incremental merkle tree under `tree_key`. /// /// Inside `db_roots` we store: /// -/// * All [merkle root:32]s as keys. The value is the current [tx_hash:32][call_idx:1]. +/// * All \[merkle root:32\]s as keys. The value is the current \[tx_hash:32\]\[call_idx:1\]. /// If no new values are added, then the root key is updated to the current (tx_hash, call_idx). pub fn merkle_add( db_info: DbHandle, @@ -85,11 +85,11 @@ pub fn merkle_add( /// /// Inside `db_info` we store: /// -/// * The [latest root hash:32] under `root_key`. +/// * The \[latest root hash:32\] under `root_key`. /// /// Inside `db_roots` we store: /// -/// * All [merkle root:32]s as keys. The value is the current [tx_hash:32][call_idx:1]. +/// * All \[merkle root:32\]s as keys. The value is the current \[tx_hash:32\]\[call_idx:1\]. /// If no new values are added, then the root key is updated to the current (tx_hash, call_idx). pub fn sparse_merkle_insert_batch( db_info: DbHandle, diff --git a/src/validator/mod.rs b/src/validator/mod.rs index c197e4758..a39aac682 100644 --- a/src/validator/mod.rs +++ b/src/validator/mod.rs @@ -414,7 +414,7 @@ impl Validator { } /// Apply provided set of [`BlockInfo`] without doing formal verification. - /// A set of ['HeaderHash`] is also provided, to verify that the provided + /// A set of [`HeaderHash`] is also provided, to verify that the provided /// block hash matches the expected header one. /// Note: this function should only be used for blocks received using a /// checkpoint, since in that case we enforce the node to follow the sequence,