chore: fix rustdoc broken doc links and warnings

This commit is contained in:
oars
2026-01-02 22:22:38 +03:00
parent 6c0dafbceb
commit e641ac09f4
12 changed files with 52 additions and 45 deletions

View File

@@ -94,7 +94,8 @@ impl Default for ExplorerConfig {
}
}
/// Attempts to convert a [`PathBuff`] to an [`ExplorerConfig`] by loading and parsing from specified file path.
/// Attempts to convert a [`PathBuf`] to an [`ExplorerConfig`] by
/// loading and parsing from specified file path.
impl TryFrom<&PathBuf> for ExplorerConfig {
type Error = Error;
fn try_from(path: &PathBuf) -> Result<Self> {

View File

@@ -52,14 +52,15 @@ impl From<ExplorerdError> for Error {
}
}
/// Conversion from [`ExplorerdRpcError`] to [`RpcError`]
/// Conversion from [`ExplorerdError`] to [`RpcError`]
impl From<ExplorerdError> for RpcError {
fn from(err: ExplorerdError) -> Self {
RpcError::ServerError(Arc::new(err))
}
}
/// Helper function to convert `ExplorerdRpcError` into error code with corresponding error message.
/// Helper function to convert `ExplorerdError` into error code with
/// corresponding error message.
pub fn to_error_code_message(e: &ExplorerdError) -> (i32, String) {
match e {
ExplorerdError::PingDarkfidFailed(_) => (ERROR_CODE_PING_DARKFID_FAILED, e.to_string()),

View File

@@ -118,9 +118,10 @@ impl ExplorerService {
/// Adds the provided [`BlockInfo`] to the block explorer database.
///
/// This function processes each transaction in the block, calculating and updating the
/// latest [`GasMetrics`] for non-genesis blocks and for transactions that are not
/// PoW rewards. PoW reward transactions update the contract runtime state as required.
/// This function processes each transaction in the block, calculating
/// and updating the latest [`crate::store::metrics::GasMetrics`] for
/// non-genesis blocks and for transactions that are not PoW rewards.
/// PoW reward transactions update the contract runtime state as required.
/// After processing all transactions, the block is permanently persisted to
/// the explorer database.
pub async fn put_block(&self, block: &BlockInfo) -> Result<()> {
@@ -295,17 +296,19 @@ impl ExplorerService {
Ok(block_records)
}
/// Resets the [`ExplorerDb::blockchain::blocks`] and [`ExplorerDb::blockchain::transactions`]
/// trees to a specified height by removing entries above the `reset_height`, returning a result
/// that indicates success or failure.
/// Resets the `blocks` and `transactions` trees to a specified
/// height by removing entries above the `reset_height`,
/// returning a result that indicates success or failure.
///
/// The function retrieves the last explorer block and iteratively rolls back entries
/// in the [`BlockStore::main`], [`BlockStore::order`], and [`BlockStore::difficulty`] trees
/// to the specified `reset_height`. It also resets the [`TxStore::main`] and
/// [`TxStore::location`] trees to reflect the transaction state at the given height.
/// The function retrieves the last explorer block and iteratively
/// rolls back entries in the `main`, `order`, and `difficulty`
/// trees to the specified `reset_height`. It also resets the `main`
/// and `location` trees to reflect the transaction state at
/// the given height.
///
/// This operation is performed atomically using a sled transaction applied across the affected sled
/// trees, ensuring consistency and avoiding partial updates.
/// This operation is performed atomically using a sled transaction
/// applied across the affected sled trees, ensuring consistency and
/// avoiding partial updates.
pub fn reset_to_height(&self, reset_height: u32) -> Result<()> {
let block_store = &self.db.blockchain.blocks;
let tx_store = &self.db.blockchain.transactions;

View File

@@ -62,7 +62,7 @@ pub struct ContractMetaStore {
/// Pointer to the underlying sled database used by the store and its associated overlay.
pub sled_db: sled::Db,
/// Primary sled tree for storing contract metadata, utilizing [`ContractId::to_string`] as keys
/// Primary sled tree for storing contract metadata, utilizing [`ContractId`] as keys
/// and serialized [`ContractMetaData`] as values.
pub main: sled::Tree,

View File

@@ -247,7 +247,8 @@ pub struct MetricsStore {
// Temporarily disable unused warnings until the store is integrated with the explorer
#[allow(dead_code)]
impl MetricsStore {
/// Creates a [`MetricsStore`] instance by opening the necessary trees in the provided sled database [`Db`]
/// Creates a [`MetricsStore`] instance by opening the necessary
/// trees in the provided sled database [`sled::Db`]
pub fn new(db: &sled::Db) -> Result<Self> {
let main = db.open_tree(SLED_GAS_METRICS_TREE)?;
let tx_gas_data = db.open_tree(SLED_TX_GAS_DATA_TREE)?;
@@ -406,7 +407,8 @@ struct MetricsStoreOverlay {
}
impl MetricsStoreOverlay {
/// Instantiate a [`MetricsStoreOverlay`] over the provided [`SledDbPtr`] instance.
/// Instantiate a [`MetricsStoreOverlay`] over the provided
/// [`sled::Db`] instance.
pub fn new(db: sled::Db) -> Result<Self> {
// Create overlay pointer
let overlay = Arc::new(Mutex::new(SledDbOverlay::new(&db, vec![])));

View File

@@ -173,9 +173,9 @@ pub async fn dht_refinery_task<H: DhtHandler>(handler: Arc<H>) -> Result<()> {
/// If the bucket is already full, we ping the least recently seen node in the
/// bucket: if successful it becomes the most recently seen node, if the ping
/// fails we remove it and add the new node.
/// [`Dht::update_node()`] increments a channel's usage count (in the direct
/// session) and triggers this task. This task decrements the usage count
/// using [`Dht::cleanup_channel()`].
/// [`crate::dht::Dht::update_node()`] increments a channel's usage count
/// (in the direct session) and triggers this task. This task decrements the
/// usage count using [`crate::dht::Dht::cleanup_channel()`].
pub async fn add_node_task<H: DhtHandler>(handler: Arc<H>) -> Result<()> {
let dht = handler.dht();
loop {

View File

@@ -105,8 +105,8 @@ pub struct Channel {
pub version: OnceCell<Arc<VersionMessage>>,
/// Channel debug info
pub info: ChannelInfo,
/// Map holding a `MeteringQueue` for each [`Message`] to perform
/// rate limiting of propagation towards the stream.
/// Map holding a `MeteringQueue` for each [`crate::net::Message`]
/// to perform rate limiting of propagation towards the stream.
metering_map: AsyncMutex<HashMap<String, MeteringQueue>>,
}

View File

@@ -63,30 +63,30 @@ use crate::{
/// utilities.
///
/// `HostColor`:
/// White: Hosts that have passed the `GreylistRefinery` successfully.
/// > `White`: Hosts that have passed the `GreylistRefinery` successfully.
///
/// Gold: Hosts we have been able to establish a connection to in `OutboundSession`.
/// > `Gold`: Hosts we have been able to establish a connection to in `OutboundSession`.
///
/// Grey: Recently received hosts that are checked by the `GreylistRefinery` and
/// upgraded to the whitelist if valid. If they're inaccessible by the Refinery
/// they will be deleted.
/// > `Grey`: Recently received hosts that are checked by the `GreylistRefinery` and
/// > upgraded to the whitelist if valid. If they're inaccessible by the Refinery
/// > they will be deleted.
///
/// Black: hostile hosts that are strictly avoided for the duration of the program.
/// > `Black`: hostile hosts that are strictly avoided for the duration of the program.
///
/// Dark: hosts that do not match our transports, but that we continue to share with
/// other peers. We do not keep darklist entries that are older than one day.
/// This is to avoid peers propagating nodes that may be faulty. We assume that
/// within the one day period, the nodes will be picked up by peers that accept
/// the transports and can refine them to remove inactive peers. Dark list hosts
/// are otherwise ignored.
/// > `Dark`: hosts that do not match our transports, but that we continue to share with
/// > other peers. We do not keep darklist entries that are older than one day.
/// > This is to avoid peers propagating nodes that may be faulty. We assume that
/// > within the one day period, the nodes will be picked up by peers that accept
/// > the transports and can refine them to remove inactive peers. Dark list hosts
/// > are otherwise ignored.
///
/// `HostState`: a set of mutually exclusive states that can be Insert, Refine, Connect, Suspend
/// or Connected. The state is `None` when the corresponding host has been removed from the
/// HostRegistry.
///
/// TODO: Use HostState::Free `age` variable to implement a pruning logic that deletes peers from
/// the registry once they have bypassed a certain age threshold.
///
//TODO: Use HostState::Free `age` variable to implement a pruning logic that deletes peers from
//the registry once they have bypassed a certain age threshold.
// An array containing all possible local host strings
// TODO: This could perhaps be more exhaustive?
pub const LOCAL_HOST_STRS: [&str; 2] = ["localhost", "localhost.localdomain"];

View File

@@ -33,7 +33,7 @@ use crate::runtime::vm_runtime::{ContractSection, Env};
/// Adds data to merkle tree. The tree, database connection, and new data to add is
/// read from `ptr` at offset specified by `len`.
/// Returns `0` on success; otherwise, returns an error-code corresponding to a
/// [`ContractError`] (defined in the SDK).
/// [`darkfi_sdk::error::ContractError`] (defined in the SDK).
/// See also the method `merkle_add` in `sdk/src/merkle.rs`.
///
/// Permissions: update

View File

@@ -54,7 +54,7 @@ pub(crate) fn drk_log(mut ctx: FunctionEnvMut<Env>, ptr: WasmPtr<u8>, len: u32)
/// The data will be read from `ptr` at a memory offset specified by `len`.
///
/// Returns `SUCCESS` on success, otherwise returns an error code corresponding
/// to a [`ContractError`].
/// to a [`darkfi_sdk::error::ContractError`].
///
/// Permissions: metadata, exec
pub(crate) fn set_return_data(mut ctx: FunctionEnvMut<Env>, ptr: WasmPtr<u8>, len: u32) -> i64 {

View File

@@ -40,12 +40,12 @@ use crate::{
///
/// Inside `db_info` we store:
///
/// * The [latest root hash:32] under `root_key`.
/// * The \[latest root hash:32\] under `root_key`.
/// * The incremental merkle tree under `tree_key`.
///
/// Inside `db_roots` we store:
///
/// * All [merkle root:32]s as keys. The value is the current [tx_hash:32][call_idx:1].
/// * All \[merkle root:32\]s as keys. The value is the current \[tx_hash:32\]\[call_idx:1\].
/// If no new values are added, then the root key is updated to the current (tx_hash, call_idx).
pub fn merkle_add(
db_info: DbHandle,
@@ -85,11 +85,11 @@ pub fn merkle_add(
///
/// Inside `db_info` we store:
///
/// * The [latest root hash:32] under `root_key`.
/// * The \[latest root hash:32\] under `root_key`.
///
/// Inside `db_roots` we store:
///
/// * All [merkle root:32]s as keys. The value is the current [tx_hash:32][call_idx:1].
/// * All \[merkle root:32\]s as keys. The value is the current \[tx_hash:32\]\[call_idx:1\].
/// If no new values are added, then the root key is updated to the current (tx_hash, call_idx).
pub fn sparse_merkle_insert_batch(
db_info: DbHandle,

View File

@@ -414,7 +414,7 @@ impl Validator {
}
/// Apply provided set of [`BlockInfo`] without doing formal verification.
/// A set of ['HeaderHash`] is also provided, to verify that the provided
/// A set of [`HeaderHash`] is also provided, to verify that the provided
/// block hash matches the expected header one.
/// Note: this function should only be used for blocks received using a
/// checkpoint, since in that case we enforce the node to follow the sequence,