validator: chore format comments to match style

This commit is contained in:
skoupidi
2026-02-11 16:01:14 +02:00
parent fb11efb45d
commit cc782bfea2
7 changed files with 213 additions and 143 deletions

View File

@@ -43,7 +43,8 @@ use crate::{
/// Gas limit for total block transactions(50 full transactions).
pub const BLOCK_GAS_LIMIT: u64 = GAS_LIMIT * MAX_TX_CALLS as u64 * 50;
/// This struct represents the information required by the consensus algorithm
/// This struct represents the information required by the consensus
/// algorithm.
pub struct Consensus {
/// Canonical (confirmed) blockchain
pub blockchain: Blockchain,
@@ -182,10 +183,11 @@ impl Consensus {
Ok(())
}
/// Given a proposal, find the fork chain it extends, and return its full clone.
/// If the proposal extends the fork not on its tail, a new fork is created and
/// we re-apply the proposals up to the extending one. If proposal extends canonical,
/// a new fork is created. Additionally, we return the fork index if a new fork
/// Given a proposal, find the fork chain it extends, and return
/// its full clone. If the proposal extends the fork not on its
/// tail, a new fork is created and we re-apply the proposals up to
/// the extending one. If proposal extends canonical, a new fork is
/// created. Additionally, we return the fork index if a new fork
/// was not created, so caller can replace the fork.
pub async fn find_extended_fork(&self, proposal: &Proposal) -> Result<(Fork, Option<usize>)> {
// Check if proposal extends any fork
@@ -252,12 +254,13 @@ impl Consensus {
/// Check if best fork proposals can be confirmed.
/// Consensus confirmation logic:
/// - If the current best fork has reached greater length than the security threshold,
/// and no other fork exist with same rank, first proposal(s) in that fork can be
/// appended to canonical/confirmed blockchain.
/// - If the current best fork has reached greater length than the
/// security threshold, and no other fork exist with same rank,
/// first proposal(s) in that fork can be appended to
/// canonical/confirmed blockchain.
///
/// When best fork can be confirmed, first block(s) should be appended to canonical,
/// and forks should be rebuilt.
/// When best fork can be confirmed, first block(s) should be
/// appended to canonical, and forks should be rebuilt.
pub async fn confirmation(&self) -> Result<Option<usize>> {
debug!(target: "validator::consensus::confirmation", "Started confirmation check");
@@ -292,8 +295,8 @@ impl Consensus {
Ok(Some(index))
}
/// Auxiliary function to find the index of a fork containing the provided
/// header hash in its proposals.
/// Auxiliary function to find the index of a fork containing the
/// provided header hash in its proposals.
fn find_fork_by_header(&self, fork_header: &HeaderHash) -> Option<usize> {
for (index, fork) in self.forks.iter().enumerate() {
for p in fork.proposals.iter().rev() {
@@ -305,8 +308,8 @@ impl Consensus {
None
}
/// Auxiliary function to retrieve the fork header hash of provided height.
/// The fork is identified by the provided header hash.
/// Auxiliary function to retrieve the fork header hash of provided
/// height. The fork is identified by the provided header hash.
pub async fn get_fork_header_hash(
&self,
height: u32,
@@ -322,9 +325,9 @@ impl Consensus {
Ok(header)
}
/// Auxiliary function to retrieve the fork headers of provided hashes.
/// The fork is identified by the provided header hash. If fork doesn't
/// exists, an empty vector is returned.
/// Auxiliary function to retrieve the fork headers of provided
/// hashes. The fork is identified by the provided header hash. If
/// fork doesn't exists, an empty vector is returned.
pub async fn get_fork_headers(
&self,
headers: &[HeaderHash],
@@ -339,9 +342,9 @@ impl Consensus {
Ok(headers)
}
/// Auxiliary function to retrieve the fork proposals of provided hashes.
/// The fork is identified by the provided header hash. If fork doesn't
/// exists, an empty vector is returned.
/// Auxiliary function to retrieve the fork proposals of provided
/// hashes. The fork is identified by the provided header hash. If
/// fork doesn't exists, an empty vector is returned.
pub async fn get_fork_proposals(
&self,
headers: &[HeaderHash],
@@ -360,10 +363,11 @@ impl Consensus {
Ok(proposals)
}
/// Auxiliary function to retrieve a fork proposals, starting from provided tip.
/// If provided tip is too far behind, unknown, or fork doesn't exists, an empty
/// vector is returned. The fork is identified by the optional provided header hash.
/// If its `None`, we use our best fork.
/// Auxiliary function to retrieve a fork proposals, starting from
/// provided tip. If provided tip is too far behind, unknown, or
/// fork doesn't exists, an empty vector is returned. The fork is
/// identified by the optional provided header hash. If its `None`,
/// we use our best fork.
pub async fn get_fork_proposals_after(
&self,
tip: HeaderHash,
@@ -530,13 +534,15 @@ impl Consensus {
let mut iter = keep.iter();
self.forks.retain(|_| *iter.next().unwrap());
// Remove confirmed proposals txs from the unporposed txs sled tree
// Remove confirmed proposals txs from the unporposed txs sled
// tree.
self.blockchain.remove_pending_txs_hashes(&confirmed_txs_hashes)?;
Ok(())
}
/// Auxiliary function to fully purge current forks and leave only a new empty fork.
/// Auxiliary function to fully purge current forks and leave only
/// a new empty fork.
pub async fn purge_forks(&mut self) -> Result<()> {
debug!(target: "validator::consensus::purge_forks", "Purging current forks...");
self.forks = vec![Fork::new(self.blockchain.clone(), self.module.clone()).await?];
@@ -721,7 +727,8 @@ impl Fork {
})
}
/// Auxiliary function to append a proposal and update current fork rank.
/// Auxiliary function to append a proposal and update current fork
/// rank.
pub async fn append_proposal(&mut self, proposal: &Proposal) -> Result<()> {
// Grab next mine target and difficulty
let (next_target, next_difficulty) = self.module.next_mine_target_and_difficulty()?;
@@ -799,17 +806,20 @@ impl Fork {
let mut total_gas_used = 0_u64;
let mut total_gas_paid = 0_u64;
// Map of ZK proof verifying keys for the current transaction batch
// Map of ZK proof verifying keys for the current transaction
// batch.
let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
// Grab all current proposals transactions hashes
let proposals_txs = self.overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?;
// Iterate through all pending transactions in the forks' mempool
// Iterate through all pending transactions in the forks'
// mempool.
let mut unproposed_txs = vec![];
let mut erroneous_txs = vec![];
for tx in &self.mempool {
// If the hash is contained in the proposals transactions vec, skip it
// If the hash is contained in the proposals transactions
// vec, skip it.
if proposals_txs.contains(tx) {
continue
}
@@ -857,7 +867,8 @@ impl Fork {
// Calculate current accumulated gas usage
let accumulated_gas_usage = total_gas_used.saturating_add(tx_gas_used);
// Check gas limit - if accumulated gas used exceeds it, break out of loop
// Check gas limit - if accumulated gas used exceeds it,
// break out of loop.
if accumulated_gas_usage > BLOCK_GAS_LIMIT {
warn!(
target: "validator::consensus::unproposed_txs",

View File

@@ -21,7 +21,8 @@ use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
use crate::zkas::{Opcode, VarType, ZkBinary};
/// Fixed fee for verifying Schnorr signatures using the Pallas elliptic curve
/// Fixed fee for verifying Schnorr signatures using the Pallas
/// elliptic curve.
pub const PALLAS_SCHNORR_SIGNATURE_FEE: u64 = 1000;
/// Calculate the gas use for verifying a given zkas circuit.
@@ -32,7 +33,8 @@ pub fn circuit_gas_use(zkbin: &ZkBinary) -> u64 {
// Constants each with a cost of 10
accumulator = accumulator.saturating_add(10u64.saturating_mul(zkbin.constants.len() as u64));
// Literals each with a cost of 10 (for now there's only 1 type of literal)
// Literals each with a cost of 10 (for now there's only 1 type of
// literal).
accumulator = accumulator.saturating_add(10u64.saturating_mul(zkbin.literals.len() as u64));
// Witnesses have cost by type
@@ -96,10 +98,11 @@ pub fn circuit_gas_use(zkbin: &ZkBinary) -> u64 {
accumulator
}
/// Auxiliary struct representing the full gas usage breakdown of a transaction.
/// Auxiliary struct representing the full gas usage breakdown of a
/// transaction.
///
/// This data is used for accounting of fees, providing details relating to
/// resource consumption across different transactions.
/// This data is used for accounting of fees, providing details
/// relating to resource consumption across different transactions.
#[derive(Default, Clone, Eq, PartialEq, SerialEncodable, SerialDecodable)]
pub struct GasData {
/// Wasm calls gas consumption
@@ -115,7 +118,8 @@ pub struct GasData {
}
impl GasData {
/// Calculates the total gas used by summing all individual gas usage fields.
/// Calculates the total gas used by summing all individual gas
/// usage fields.
pub fn total_gas_used(&self) -> u64 {
self.wasm
.saturating_add(self.zk_circuits)
@@ -124,7 +128,8 @@ impl GasData {
}
}
/// Implements custom debug trait to include [`GasData::total_gas_used`].
/// Implements custom debug trait to include
/// [`GasData::total_gas_used`].
impl std::fmt::Debug for GasData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GasData")

View File

@@ -101,7 +101,8 @@ impl Validator {
info!(target: "validator::new", "Initializing Blockchain");
let blockchain = Blockchain::new(db)?;
// Create an overlay over whole blockchain so we can write stuff
// Create an overlay over whole blockchain so we can write
// stuff.
let overlay = BlockchainOverlay::new(&blockchain)?;
// Deploy native wasm contracts
@@ -201,7 +202,8 @@ impl Validator {
let tx_vec = [tx.clone()];
let mut valid = false;
// Iterate over node forks to verify transaction validity in their overlays
// Iterate over node forks to verify transaction validity in
// their overlays.
for fork in self.consensus.forks.iter_mut() {
// Clone fork state
let fork_clone = fork.full_clone()?;
@@ -270,7 +272,8 @@ impl Validator {
let tx_vec = [tx.clone()];
let mut valid = false;
// Iterate over node forks to verify transaction validity in their overlays
// Iterate over node forks to verify transaction validity
// in their overlays.
for fork in self.consensus.forks.iter_mut() {
// Clone fork state
let fork_clone = fork.full_clone()?;
@@ -303,7 +306,8 @@ impl Validator {
fork.mempool.retain(|x| *x != tx_hash);
}
// Remove pending transaction if it's not valid for canonical or any fork
// Remove pending transaction if it's not valid for
// canonical or any fork.
if !valid {
removed_txs.push(tx)
}
@@ -418,10 +422,12 @@ impl Validator {
// Grab current PoW module to validate each block
let mut module = self.consensus.module.clone();
// Keep track of all blocks transactions to remove them from pending txs store
// Keep track of all blocks transactions to remove them from
// pending txs store.
let mut removed_txs = vec![];
// Keep track of all block database state diffs and their inverse
// Keep track of all block database state diffs and their
// inverse.
let mut diffs_heights = vec![];
let mut diffs = vec![];
let mut inverse_diffs = vec![];
@@ -520,10 +526,12 @@ impl Validator {
// Grab current PoW module to validate each block
let mut module = self.consensus.module.clone();
// Keep track of all blocks transactions to remove them from pending txs store
// Keep track of all blocks transactions to remove them from
// pending txs store.
let mut removed_txs = vec![];
// Keep track of all block database state diffs and their inverse
// Keep track of all block database state diffs and their
// inverse.
let mut diffs_heights = vec![];
let mut diffs = vec![];
let mut inverse_diffs = vec![];
@@ -603,7 +611,8 @@ impl Validator {
// Store the block diffs
self.blockchain.blocks.insert_state_inverse_diff(&diffs_heights, &inverse_diffs)?;
// Purge pending erroneous txs since canonical state has been changed
// Purge pending erroneous txs since canonical state has been
// changed.
self.blockchain.remove_pending_txs(&removed_txs)?;
self.purge_pending_txs().await?;
@@ -812,7 +821,8 @@ impl Validator {
self.consensus.best_current_fork().await
}
/// Auxiliary function to retrieve current best fork next block height.
/// Auxiliary function to retrieve current best fork next block
/// height.
pub async fn best_fork_next_block_height(&self) -> Result<u32> {
let index = best_fork_index(&self.consensus.forks)?;
let fork = &self.consensus.forks[index];
@@ -821,8 +831,8 @@ impl Validator {
Ok(next_block_height)
}
/// Auxiliary function to reset the validator blockchain and consensus states
/// to the provided block height.
/// Auxiliary function to reset the validator blockchain and
/// consensus states to the provided block height.
pub async fn reset_to_height(&mut self, height: u32) -> Result<()> {
info!(target: "validator::reset_to_height", "Resetting validator to height: {height}");
// Reset our databasse to provided height

View File

@@ -82,7 +82,8 @@ pub const RANDOMX_KEY_CHANGING_HEIGHT: u32 = 2048;
/// RandomX VM key change delay
pub const RANDOMX_KEY_CHANGE_DELAY: u32 = 64;
/// This struct represents the information required by the PoW algorithm
/// This struct represents the information required by the PoW
/// algorithm.
#[derive(Clone)]
pub struct PoWModule {
/// Genesis block timestamp
@@ -109,8 +110,9 @@ pub struct PoWModule {
}
impl PoWModule {
// Initialize a new `PowModule` for provided target over provided `Blockchain`.
// Optionally, a fixed difficulty can be set and/or initialize before some height.
/// Initialize a new `PowModule` for provided target over provided
/// `Blockchain`. Optionally, a fixed difficulty can be set and/or
/// initialize before some height.
pub fn new(
blockchain: Blockchain,
target: u32,
@@ -120,7 +122,8 @@ impl PoWModule {
// Retrieve genesis block timestamp
let genesis = blockchain.genesis_block()?.header.timestamp;
// Retrieving last BUF_SIZE difficulties from blockchain to build the buffers
// Retrieving last BUF_SIZE difficulties from blockchain to
// build the buffers.
let mut timestamps = RingBuffer::<Timestamp, BUF_SIZE>::new();
let mut difficulties = RingBuffer::<BigUint, BUF_SIZE>::new();
let mut cumulative_difficulty = BigUint::zero();
@@ -134,7 +137,8 @@ impl PoWModule {
cumulative_difficulty = difficulty.cumulative_difficulty;
}
// If a fixed difficulty has been set, assert its greater than zero
// If a fixed difficulty has been set, assert its greater than
// zero.
if let Some(diff) = &fixed_difficulty {
assert!(diff > &BigUint::zero());
}
@@ -162,12 +166,13 @@ impl PoWModule {
})
}
/// Compute the next mining difficulty, based on current ring buffers.
/// If ring buffers contain 2 or less items, difficulty 1 is returned.
/// If a fixed difficulty has been set, this function will always
/// return that after first 2 difficulties.
/// Compute the next mining difficulty, based on current ring
/// buffers. If ring buffers contain 2 or less items, difficulty 1
/// is returned. If a fixed difficulty has been set, this function
/// will always return that after first 2 difficulties.
pub fn next_difficulty(&self) -> Result<BigUint> {
// Retrieve first DIFFICULTY_WINDOW timestamps from the ring buffer
// Retrieve first DIFFICULTY_WINDOW timestamps from the ring
// buffer.
let mut timestamps: Vec<Timestamp> =
self.timestamps.iter().take(DIFFICULTY_WINDOW).cloned().collect();
@@ -260,7 +265,8 @@ impl PoWModule {
Ok(self.verify_timestamp_by_median(timestamp))
}
/// Verify provided block timestamp is valid and matches certain criteria.
/// Verify provided block timestamp is valid and matches certain
/// criteria.
pub fn verify_timestamp_by_median(&self, timestamp: Timestamp) -> bool {
// Check timestamp is after genesis one
if timestamp <= self.genesis {
@@ -567,8 +573,8 @@ pub fn generate_mining_vms(
Ok(vms)
}
/// Mine provided header, based on provided PoW module next mine target,
/// using provided RandomX VMs setup.
/// Mine provided header, based on provided PoW module next mine
/// target, using provided RandomX VMs setup.
pub fn mine_block(
vms: &[Arc<RandomXVM>],
target: &BigUint,
@@ -640,7 +646,8 @@ pub fn mine_block(
let out_hash = BigUint::from_bytes_le(&out_hash);
if out_hash <= target {
found_header.store(true, Ordering::SeqCst);
thread_header.nonce = last_nonce; // Since out hash refers to previous run nonce
// Since out hash refers to previous run nonce
thread_header.nonce = last_nonce;
found_nonce.store(thread_header.nonce, Ordering::SeqCst);
debug!(target: "validator::pow::randomx_vms_mine", "[MINER] Thread #{t} found block header using nonce {}",
thread_header.nonce

View File

@@ -41,7 +41,8 @@ impl Default for RandomXFactory {
}
impl RandomXFactory {
/// Create a new RandomXFactory with the specified maximum number of VMs.
/// Create a new RandomXFactory with the specified maximum number
/// of VMs.
pub fn new(max_vms: usize) -> Self {
Self { vms: HashMap::new(), max_vms }
}

View File

@@ -44,13 +44,14 @@ pub static MAX_32_BYTES: LazyLock<BigUint> = LazyLock::new(|| BigUint::from_byte
///
/// If overlay already contains the contracts, it will just open the
/// necessary db and trees, and give back what it has. This means that
/// on subsequent runs, our native contracts will already be in a deployed
/// state, so what we actually do here is a redeployment. This kind of
/// operation should only modify the contract's state in case it wasn't
/// deployed before (meaning the initial run). Otherwise, it shouldn't
/// touch anything, or just potentially update the db schemas or whatever
/// is necessary. This logic should be handled in the init function of
/// the actual contract, so make sure the native contracts handle this well.
/// on subsequent runs, our native contracts will already be in a
/// deployed state, so what we actually do here is a redeployment. This
/// kind of operation should only modify the contract's state in case
/// it wasn't deployed before (meaning the initial run). Otherwise, it
/// shouldn't touch anything, or just potentially update the db schemas
/// or whatever is necessary. This logic should be handled in the init
/// function of the actual contract, so make sure the native contracts
/// handle this well.
pub async fn deploy_native_contracts(
overlay: &BlockchainOverlayPtr,
block_target: u32,
@@ -148,11 +149,12 @@ pub fn header_rank(module: &mut PoWModule, header: &Header) -> Result<(BigUint,
Ok((difficulty, target_distance_sq, hash_distance_sq))
}
/// Compute a block's rank, assuming that its valid, based on provided mining target.
/// Compute a block's rank, assuming that its valid, based on provided
/// mining target.
///
/// Block's rank is the tuple of its squared mining target distance from max 32 bytes int,
/// along with its squared RandomX hash number distance from max 32 bytes int.
/// Genesis block has rank (0, 0).
/// Block's rank is the tuple of its squared mining target distance
/// from max 32 bytes int, along with its squared RandomX hash number
/// distance from max 32 bytes int. Genesis block has rank (0, 0).
pub fn block_rank(block: &BlockInfo, target: &BigUint) -> Result<(BigUint, BigUint)> {
// Genesis block has rank 0
if block.header.height == 0 {
@@ -177,7 +179,8 @@ pub fn block_rank(block: &BlockInfo, target: &BigUint) -> Result<(BigUint, BigUi
Ok((target_distance_sq, hash_distance_sq))
}
/// Auxiliary function to calculate the middle value between provided u64 numbers
/// Auxiliary function to calculate the middle value between provided
/// u64 numbers.
pub fn get_mid(a: u64, b: u64) -> u64 {
(a / 2) + (b / 2) + ((a - 2 * (a / 2)) + (b - 2 * (b / 2))) / 2
}
@@ -198,9 +201,9 @@ pub fn median(mut v: Vec<u64>) -> u64 {
v[n]
}
/// Given a proposal, find the index of a fork chain it extends, along with the specific
/// extended proposal index. Additionally, check that proposal doesn't already exists in any
/// fork chain.
/// Given a proposal, find the index of a fork chain it extends, along
/// with the specific extended proposal index. Additionally, check that
/// proposal doesn't already exists in any fork chain.
pub fn find_extended_fork_index(forks: &[Fork], proposal: &Proposal) -> Result<(usize, usize)> {
// Grab provided proposal hash
let proposal_hash = proposal.hash;

View File

@@ -86,12 +86,14 @@ pub async fn verify_genesis_block(
_ => return Err(Error::BlockIsInvalid(block_hash)),
}
// Verify transactions vector contains at least one(producers) transaction
// Verify transactions vector contains at least one(producers)
// transaction.
if block.txs.is_empty() {
return Err(Error::BlockContainsNoTransactions(block_hash))
}
// Genesis producer transaction must be the Transaction::default() one(empty)
// Genesis producer transaction must be the Transaction::default()
// one(empty).
let producer_tx = block.txs.last().unwrap();
if producer_tx != &Transaction::default() {
error!(target: "validator::verification::verify_genesis_block", "Genesis producer transaction is not default one");
@@ -112,14 +114,16 @@ pub async fn verify_genesis_block(
return Err(e)
}
// Append producer transaction to the tree and check tree matches header one
// Append producer transaction to the tree and check tree matches
// header one.
append_tx_to_merkle_tree(&mut tree, producer_tx);
if tree.root(0).unwrap() != block.header.transactions_root {
error!(target: "validator::verification::verify_genesis_block", "Genesis Merkle tree is invalid");
return Err(Error::BlockIsInvalid(block_hash))
}
// Update the contracts states monotree and verify header contracts states root
// Update the contracts states monotree and verify header contracts
// states root.
let diff = overlay.lock().unwrap().overlay.lock().unwrap().diff(diffs)?;
let state_root = overlay.lock().unwrap().contracts.update_state_monotree(&diff)?;
if state_root != block.header.state_root {
@@ -129,7 +133,8 @@ pub async fn verify_genesis_block(
));
}
// Genesis producer signature must be the Signature::dummy() one(empty)
// Genesis producer signature must be the Signature::dummy()
// one(empty).
if block.signature != Signature::dummy() {
error!(target: "validator::verification::verify_genesis_block", "Genesis producer signature is not dummy one");
return Err(Error::InvalidSignature)
@@ -146,7 +151,8 @@ pub async fn verify_genesis_block(
///
/// A block is considered valid when the following rules apply:
/// 1. Block version is correct for its height
/// 2. Previous hash is equal to the hash of the provided previous block
/// 2. Previous hash is equal to the hash of the provided previous
/// block
/// 3. Block height increments previous block height by 1
/// 4. Timestamp is valid based on PoWModule validation
/// 5. Block header Proof of Work data are valid
@@ -242,7 +248,8 @@ pub async fn verify_block(
// Validate block, using its previous
validate_block(block, previous, module, is_new)?;
// Verify transactions vector contains at least one(producers) transaction
// Verify transactions vector contains at least one(producers)
// transaction.
if block.txs.is_empty() {
return Err(Error::BlockContainsNoTransactions(block_hash.as_string()))
}
@@ -283,7 +290,8 @@ pub async fn verify_block(
return Err(Error::BlockIsInvalid(block_hash.as_string()))
}
// Update the contracts states monotree and verify header contracts states root
// Update the contracts states monotree and verify header contracts
// states root.
let diff = overlay.lock().unwrap().overlay.lock().unwrap().diff(diffs)?;
let state_root = overlay.lock().unwrap().contracts.update_state_monotree(&diff)?;
if state_root != block.header.state_root {
@@ -329,7 +337,8 @@ pub async fn verify_checkpoint_block(
return Err(Error::BlockIsInvalid(block_hash.as_string()))
}
// Verify transactions vector contains at least one(producers) transaction
// Verify transactions vector contains at least one(producers)
// transaction.
if block.txs.is_empty() {
return Err(Error::BlockContainsNoTransactions(block_hash.as_string()))
}
@@ -363,7 +372,8 @@ pub async fn verify_checkpoint_block(
return Err(Error::BlockIsInvalid(block_hash.as_string()))
}
// Update the contracts states monotree and verify header contracts states root
// Update the contracts states monotree and verify header contracts
// states root.
let diff = overlay.lock().unwrap().overlay.lock().unwrap().diff(diffs)?;
let state_root = overlay.lock().unwrap().contracts.update_state_monotree(&diff)?;
if state_root != block.header.state_root {
@@ -383,8 +393,8 @@ pub async fn verify_checkpoint_block(
Ok(())
}
/// Verify block proposer signature, using the producer transaction signature as signing key
/// over blocks header hash.
/// Verify block proposer signature, using the producer transaction
/// signature as signing key over blocks header hash.
pub fn verify_producer_signature(block: &BlockInfo, public_key: &PublicKey) -> Result<()> {
if !public_key.verify(block.header.hash().inner(), &block.signature) {
warn!(target: "validator::verification::verify_producer_signature", "Proposer {public_key} signature could not be verified");
@@ -396,9 +406,9 @@ pub fn verify_producer_signature(block: &BlockInfo, public_key: &PublicKey) -> R
/// Verify provided producer [`Transaction`].
///
/// Verify WASM execution, signatures, and ZK proofs and apply it to the provided,
/// provided overlay. Returns transaction signature public key. Additionally,
/// append its hash to the provided Merkle tree.
/// Verify WASM execution, signatures, and ZK proofs and apply it to
/// the provided overlay. Returns transaction signature public key.
/// Additionally, append its hash to the provided Merkle tree.
pub async fn verify_producer_transaction(
overlay: &BlockchainOverlayPtr,
verifying_block_height: u32,
@@ -487,28 +497,32 @@ pub async fn verify_producer_transaction(
let signature_public_key = *sig_pub.last().unwrap();
sig_table.push(sig_pub);
// After getting the metadata, we run the "exec" function with the same runtime
// and the same payload. We keep the returned state update in a buffer, prefixed
// by the call function ID, enforcing the state update function in the contract.
// After getting the metadata, we run the "exec" function with the
// same runtime and the same payload. We keep the returned state
// update in a buffer, prefixed by the call function ID, enforcing
// the state update function in the contract.
debug!(target: "validator::verification::verify_producer_transaction", "Executing \"exec\" call");
let mut state_update = vec![call.data.data[0]];
state_update.append(&mut runtime.exec(&payload)?);
debug!(target: "validator::verification::verify_producer_transaction", "Successfully executed \"exec\" call");
// If that was successful, we apply the state update in the ephemeral overlay.
// If that was successful, we apply the state update in the
// ephemeral overlay.
debug!(target: "validator::verification::verify_producer_transaction", "Executing \"apply\" call");
runtime.apply(&state_update)?;
debug!(target: "validator::verification::verify_producer_transaction", "Successfully executed \"apply\" call");
// When we're done executing over the tx's contract call, we now move on with verification.
// First we verify the signatures as that's cheaper, and then finally we verify the ZK proofs.
// When we're done executing over the tx's contract call, we now
// move on with verification. First we verify the signatures as
// that's cheaper, and then finally we verify the ZK proofs.
debug!(target: "validator::verification::verify_producer_transaction", "Verifying signatures for transaction {tx_hash}");
if sig_table.len() != tx.signatures.len() {
error!(target: "validator::verification::verify_producer_transaction", "Incorrect number of signatures in tx {tx_hash}");
return Err(TxVerifyFailed::MissingSignatures.into())
}
// TODO: Go through the ZK circuits that have to be verified and account for the opcodes.
// TODO: Go through the ZK circuits that have to be verified and
// account for the opcodes.
if let Err(e) = tx.verify_sigs(sig_table) {
error!(target: "validator::verification::verify_producer_transaction", "Signature verification for tx {tx_hash} failed: {e}");
@@ -532,8 +546,9 @@ pub async fn verify_producer_transaction(
Ok(signature_public_key)
}
/// Apply given producer [`Transaction`] to the provided overlay, without formal verification.
/// Returns transaction signature public key. Additionally, append its hash to the provided Merkle tree.
/// Apply given producer [`Transaction`] to the provided overlay,
/// without formal verification. Returns transaction signature public
/// key. Additionally, append its hash to the provided Merkle tree.
pub async fn apply_producer_transaction(
overlay: &BlockchainOverlayPtr,
verifying_block_height: u32,
@@ -588,15 +603,17 @@ pub async fn apply_producer_transaction(
let signature_public_key = *sig_pub.last().unwrap();
// After getting the metadata, we run the "exec" function with the same runtime
// and the same payload. We keep the returned state update in a buffer, prefixed
// by the call function ID, enforcing the state update function in the contract.
// After getting the metadata, we run the "exec" function with the
// same runtime and the same payload. We keep the returned state
// update in a buffer, prefixed by the call function ID, enforcing
// the state update function in the contract.
debug!(target: "validator::verification::apply_producer_transaction", "Executing \"exec\" call");
let mut state_update = vec![call.data.data[0]];
state_update.append(&mut runtime.exec(&payload)?);
debug!(target: "validator::verification::apply_producer_transaction", "Successfully executed \"exec\" call");
// If that was successful, we apply the state update in the ephemeral overlay.
// If that was successful, we apply the state update in the
// ephemeral overlay.
debug!(target: "validator::verification::apply_producer_transaction", "Executing \"apply\" call");
runtime.apply(&state_update)?;
debug!(target: "validator::verification::apply_producer_transaction", "Successfully executed \"apply\" call");
@@ -609,9 +626,9 @@ pub async fn apply_producer_transaction(
Ok(signature_public_key)
}
/// Verify WASM execution, signatures, and ZK proofs for a given [`Transaction`],
/// and apply it to the provided overlay. Additionally, append its hash to the
/// provided Merkle tree.
/// Verify WASM execution, signatures, and ZK proofs for a given
/// [`Transaction`], and apply it to the provided overlay.
/// Additionally, append its hash to the provided Merkle tree.
pub async fn verify_transaction(
overlay: &BlockchainOverlayPtr,
verifying_block_height: u32,
@@ -647,7 +664,8 @@ pub async fn verify_transaction(
let mut fee_call_idx = 0;
if verify_fee {
// Verify that there is a single money fee call in the transaction
// Verify that there is a single money fee call in the
// transaction.
let mut found_fee = false;
for (call_idx, call) in tx.calls.iter().enumerate() {
if !call.data.is_money_fee() {
@@ -679,10 +697,12 @@ pub async fn verify_transaction(
let mut payload = vec![];
tx.calls.encode_async(&mut payload).await?;
// Define a buffer in case we want to use a different payload in a specific call
// Define a buffer in case we want to use a different payload in a
// specific call.
let mut _call_payload = vec![];
// We'll also take note of all the circuits in a Vec so we can calculate their verification cost.
// We'll also take note of all the circuits in a Vec so we can
// calculate their verification cost.
let mut circuits_to_verify = vec![];
// Iterate over all calls to get the metadata
@@ -737,15 +757,19 @@ pub async fn verify_transaction(
debug!(target: "validator::verification::verify_transaction", "Successfully executed \"metadata\" call");
// Here we'll look up verifying keys and insert them into the per-contract map.
// TODO: This vk map can potentially use a lot of RAM. Perhaps load keys on-demand at verification time?
// Here we'll look up verifying keys and insert them into the
// per-contract map.
// TODO: This vk map can potentially use a lot of RAM. Perhaps
// load keys on-demand at verification time?
debug!(target: "validator::verification::verify_transaction", "Performing VerifyingKey lookups from the sled db");
for (zkas_ns, _) in &zkp_pub {
let inner_vk_map = verifying_keys.get_mut(&call.data.contract_id.to_bytes()).unwrap();
// TODO: This will be a problem in case of ::deploy, unless we force a different
// namespace and disable updating existing circuit. Might be a smart idea to do
// so in order to have to care less about being able to verify historical txs.
// TODO: This will be a problem in case of ::deploy, unless
// we force a different namespace and disable updating
// existing circuit. Might be a smart idea to do so in
// order to have to care less about being able to verify
// historical txs.
if inner_vk_map.contains_key(zkas_ns.as_str()) {
continue
}
@@ -760,21 +784,23 @@ pub async fn verify_transaction(
zkp_table.push(zkp_pub);
sig_table.push(sig_pub);
// After getting the metadata, we run the "exec" function with the same runtime
// and the same payload. We keep the returned state update in a buffer, prefixed
// by the call function ID, enforcing the state update function in the contract.
// After getting the metadata, we run the "exec" function with
// the same runtime and the same payload. We keep the returned
// state update in a buffer, prefixed by the call function ID,
// enforcing the state update function in the contract.
debug!(target: "validator::verification::verify_transaction", "Executing \"exec\" call");
let mut state_update = vec![call.data.data[0]];
state_update.append(&mut runtime.exec(call_payload)?);
debug!(target: "validator::verification::verify_transaction", "Successfully executed \"exec\" call");
// If that was successful, we apply the state update in the ephemeral overlay.
// If that was successful, we apply the state update in the
// ephemeral overlay.
debug!(target: "validator::verification::verify_transaction", "Executing \"apply\" call");
runtime.apply(&state_update)?;
debug!(target: "validator::verification::verify_transaction", "Successfully executed \"apply\" call");
// If this call is supposed to deploy a new contract, we have to instantiate
// a new `Runtime` and run its deploy function.
// If this call is supposed to deploy a new contract, we have
// to instantiate a new `Runtime` and run its deploy function.
if call.data.is_deployment()
/* DeployV1 */
{
@@ -801,8 +827,8 @@ pub async fn verify_transaction(
gas_data.deployments = gas_data.deployments.saturating_add(deploy_gas_used);
}
// At this point we're done with the call and move on to the next one.
// Accumulate the WASM gas used.
// At this point we're done with the call and move on to the
// next one. Accumulate the WASM gas used.
let wasm_gas_used = runtime.gas_used();
debug!(target: "validator::verification::verify_transaction", "The gas used for WASM call {call:?} of transaction {tx_hash}: {wasm_gas_used}");
@@ -816,7 +842,8 @@ pub async fn verify_transaction(
.saturating_add(serialize_async(tx).await.len() as u64);
debug!(target: "validator::verification::verify_transaction", "The gas used for signature of transaction {tx_hash}: {}", gas_data.signatures);
// The ZK circuit fee is calculated using a function in validator/fees.rs
// The ZK circuit fee is calculated using a function in
// validator/fees.rs.
for zkbin in circuits_to_verify.iter() {
let zk_circuit_gas_used = circuit_gas_use(zkbin);
debug!(target: "validator::verification::verify_transaction", "The gas used for ZK circuit in namespace {} of transaction {tx_hash}: {zk_circuit_gas_used}", zkbin.namespace);
@@ -825,7 +852,8 @@ pub async fn verify_transaction(
gas_data.zk_circuits = gas_data.zk_circuits.saturating_add(zk_circuit_gas_used);
}
// Store the calculated total gas used to avoid recalculating it for subsequent uses
// Store the calculated total gas used to avoid recalculating it
// for subsequent uses.
let total_gas_used = gas_data.total_gas_used();
if verify_fee {
@@ -844,7 +872,8 @@ pub async fn verify_transaction(
// Compute the required fee for this transaction
let required_fee = compute_fee(&total_gas_used);
// Check that enough fee has been paid for the used gas in this transaction
// Check that enough fee has been paid for the used gas in this
// transaction.
if required_fee > fee {
error!(
target: "validator::verification::verify_transaction",
@@ -858,10 +887,10 @@ pub async fn verify_transaction(
gas_data.paid = fee;
}
// When we're done looping and executing over the tx's contract calls and
// (optionally) made sure that enough fee was paid, we now move on with
// verification. First we verify the transaction signatures and then we
// verify any accompanying ZK proofs.
// When we're done looping and executing over the tx's contract
// calls and (optionally) made sure that enough fee was paid, we
// now move on with verification. First we verify the transaction
// signatures and then we verify any accompanying ZK proofs.
debug!(target: "validator::verification::verify_transaction", "Verifying signatures for transaction {tx_hash}");
if sig_table.len() != tx.signatures.len() {
error!(
@@ -930,20 +959,22 @@ pub async fn apply_transaction(
idx as u8,
)?;
// Run the "exec" function. We keep the returned state update in a buffer, prefixed
// by the call function ID, enforcing the state update function in the contract.
// Run the "exec" function. We keep the returned state update
// in a buffer, prefixed by the call function ID, enforcing the
// state update function in the contract.
debug!(target: "validator::verification::apply_transaction", "Executing \"exec\" call");
let mut state_update = vec![call.data.data[0]];
state_update.append(&mut runtime.exec(&payload)?);
debug!(target: "validator::verification::apply_transaction", "Successfully executed \"exec\" call");
// If that was successful, we apply the state update in the ephemeral overlay
// If that was successful, we apply the state update in the
// ephemeral overlay.
debug!(target: "validator::verification::apply_transaction", "Executing \"apply\" call");
runtime.apply(&state_update)?;
debug!(target: "validator::verification::apply_transaction", "Successfully executed \"apply\" call");
// If this call is supposed to deploy a new contract, we have to instantiate
// a new `Runtime` and run its deploy function.
// If this call is supposed to deploy a new contract, we have
// to instantiate a new `Runtime` and run its deploy function.
if call.data.is_deployment()
/* DeployV1 */
{
@@ -1042,7 +1073,8 @@ pub async fn verify_transactions(
// Calculate current accumulated gas usage
let accumulated_gas_usage = total_gas_used.saturating_add(tx_gas_used);
// Check gas limit - if accumulated gas used exceeds it, break out of loop
// Check gas limit - if accumulated gas used exceeds it, break
// out of loop.
if accumulated_gas_usage > BLOCK_GAS_LIMIT {
warn!(
target: "validator::verification::verify_transactions",
@@ -1066,9 +1098,10 @@ pub async fn verify_transactions(
Ok((total_gas_used, total_gas_paid))
}
/// Apply given set of [`Transaction`] in sequence, without formal verification.
/// In case any of the transactions fail, they will be returned to the caller as an error.
/// Additionally, their hash is appended to the provided Merkle tree.
/// Apply given set of [`Transaction`] in sequence, without formal
/// verification. In case any of the transactions fail, they will be
/// returned to the caller as an error. Additionally, their hash is
/// appended to the provided Merkle tree.
async fn apply_transactions(
overlay: &BlockchainOverlayPtr,
verifying_block_height: u32,