mirror of
https://github.com/darkrenaissance/darkfi.git
synced 2026-01-10 07:08:05 -05:00
contract/test-harness: integrate new validator
This commit is contained in:
@@ -92,9 +92,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusGenesisStake).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
@@ -113,7 +111,15 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusGenesisStake).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, false).await?;
|
||||
let erroneous_txs = wallet
|
||||
.validator
|
||||
.read()
|
||||
.await
|
||||
.verify_transactions(txs, slot, false)
|
||||
.await
|
||||
.err()
|
||||
.unwrap()
|
||||
.retrieve_erroneous_txs()?;
|
||||
assert_eq!(erroneous_txs.len(), erroneous);
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ impl TestHarness {
|
||||
let timer = Instant::now();
|
||||
|
||||
// Proposals always extend genesis block
|
||||
let fork_hash = wallet.state.read().await.consensus.genesis_block;
|
||||
let fork_hash = wallet.validator.read().await.consensus.genesis_block;
|
||||
|
||||
// Building Consensus::Propose params
|
||||
let proposal_call_debris = ConsensusProposalCallBuilder {
|
||||
@@ -103,9 +103,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusProposal).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
@@ -124,7 +122,15 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusProposal).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, false).await?;
|
||||
let erroneous_txs = wallet
|
||||
.validator
|
||||
.read()
|
||||
.await
|
||||
.verify_transactions(txs, slot, false)
|
||||
.await
|
||||
.err()
|
||||
.unwrap()
|
||||
.retrieve_erroneous_txs()?;
|
||||
assert_eq!(erroneous_txs.len(), erroneous);
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ impl TestHarness {
|
||||
let (burn_pk, burn_zkbin) = self.proving_keys.get(&MONEY_CONTRACT_ZKAS_BURN_NS_V1).unwrap();
|
||||
let tx_action_benchmark =
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusStake).unwrap();
|
||||
let epoch = wallet.state.read().await.consensus.time_keeper.slot_epoch(slot);
|
||||
let epoch = wallet.validator.read().await.consensus.time_keeper.slot_epoch(slot);
|
||||
let timer = Instant::now();
|
||||
|
||||
// Building Money::Stake params
|
||||
@@ -128,9 +128,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusStake).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.consensus_staked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -125,9 +125,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstake).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ impl TestHarness {
|
||||
self.proving_keys.get(&CONSENSUS_CONTRACT_ZKAS_MINT_NS_V1).unwrap();
|
||||
let tx_action_benchmark =
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstakeRequest).unwrap();
|
||||
let epoch = wallet.state.read().await.consensus.time_keeper.slot_epoch(slot);
|
||||
let epoch = wallet.validator.read().await.consensus.time_keeper.slot_epoch(slot);
|
||||
let timer = Instant::now();
|
||||
|
||||
// Building Consensus::Unstake params
|
||||
@@ -116,9 +116,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstakeRequest).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.consensus_unstaked_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
@@ -137,7 +135,15 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::ConsensusUnstakeRequest).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, false).await?;
|
||||
let erroneous_txs = wallet
|
||||
.validator
|
||||
.read()
|
||||
.await
|
||||
.verify_transactions(txs, slot, false)
|
||||
.await
|
||||
.err()
|
||||
.unwrap()
|
||||
.retrieve_erroneous_txs()?;
|
||||
assert_eq!(erroneous_txs.len(), erroneous);
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -19,11 +19,10 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use darkfi::{
|
||||
consensus::{
|
||||
SlotCheckpoint, ValidatorState, ValidatorStatePtr, TESTNET_BOOTSTRAP_TIMESTAMP,
|
||||
TESTNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_TIMESTAMP, TESTNET_INITIAL_DISTRIBUTION,
|
||||
},
|
||||
consensus::{SlotCheckpoint, TESTNET_GENESIS_HASH_BYTES, TESTNET_GENESIS_TIMESTAMP},
|
||||
runtime::vm_runtime::SMART_CONTRACT_ZKAS_DB_NAME,
|
||||
util::time::TimeKeeper,
|
||||
validator::{Validator, ValidatorConfig, ValidatorPtr},
|
||||
wallet::{WalletDb, WalletPtr},
|
||||
zk::{empty_witnesses, ProvingKey, ZkCircuit},
|
||||
zkas::ZkBinary,
|
||||
@@ -114,7 +113,7 @@ pub enum TxAction {
|
||||
pub struct Wallet {
|
||||
pub keypair: Keypair,
|
||||
pub token_mint_authority: Keypair,
|
||||
pub state: ValidatorStatePtr,
|
||||
pub validator: ValidatorPtr,
|
||||
pub money_merkle_tree: MerkleTree,
|
||||
pub consensus_staked_merkle_tree: MerkleTree,
|
||||
pub consensus_unstaked_merkle_tree: MerkleTree,
|
||||
@@ -131,18 +130,13 @@ impl Wallet {
|
||||
// Use pregenerated vks
|
||||
vks::inject(&sled_db)?;
|
||||
|
||||
let state = ValidatorState::new(
|
||||
&sled_db,
|
||||
*TESTNET_BOOTSTRAP_TIMESTAMP,
|
||||
*TESTNET_GENESIS_TIMESTAMP,
|
||||
*TESTNET_GENESIS_HASH_BYTES,
|
||||
*TESTNET_INITIAL_DISTRIBUTION,
|
||||
wallet.clone(),
|
||||
faucet_pubkeys.to_vec(),
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
// Generate validator
|
||||
// NOTE: we are not using consensus constants here so we
|
||||
// don't get circular dependencies.
|
||||
let time_keeper = TimeKeeper::new(*TESTNET_GENESIS_TIMESTAMP, 10, 90, 0);
|
||||
let config =
|
||||
ValidatorConfig::new(time_keeper, *TESTNET_GENESIS_HASH_BYTES, faucet_pubkeys.to_vec());
|
||||
let validator = Validator::new(&sled_db, config).await?;
|
||||
|
||||
// Create necessary Merkle trees for tracking
|
||||
let money_merkle_tree = MerkleTree::new(100);
|
||||
@@ -157,7 +151,7 @@ impl Wallet {
|
||||
Ok(Self {
|
||||
keypair,
|
||||
token_mint_authority,
|
||||
state,
|
||||
validator,
|
||||
money_merkle_tree,
|
||||
consensus_staked_merkle_tree,
|
||||
consensus_unstaked_merkle_tree,
|
||||
@@ -201,7 +195,7 @@ impl TestHarness {
|
||||
|
||||
// Get the zkas circuits and build proving keys
|
||||
let mut proving_keys = HashMap::new();
|
||||
let alice_sled = alice.state.read().await.blockchain.sled_db.clone();
|
||||
let alice_sled = alice.validator.read().await.blockchain.sled_db.clone();
|
||||
|
||||
macro_rules! mkpk {
|
||||
($db:expr, $ns:expr) => {
|
||||
@@ -217,7 +211,7 @@ impl TestHarness {
|
||||
}
|
||||
|
||||
if contracts.contains(&"money".to_string()) {
|
||||
let db_handle = alice.state.read().await.blockchain.contracts.lookup(
|
||||
let db_handle = alice.validator.read().await.blockchain.contracts.lookup(
|
||||
&alice_sled,
|
||||
&MONEY_CONTRACT_ID,
|
||||
SMART_CONTRACT_ZKAS_DB_NAME,
|
||||
@@ -229,7 +223,7 @@ impl TestHarness {
|
||||
}
|
||||
|
||||
if contracts.contains(&"consensus".to_string()) {
|
||||
let db_handle = alice.state.read().await.blockchain.contracts.lookup(
|
||||
let db_handle = alice.validator.read().await.blockchain.contracts.lookup(
|
||||
&alice_sled,
|
||||
&CONSENSUS_CONTRACT_ID,
|
||||
SMART_CONTRACT_ZKAS_DB_NAME,
|
||||
@@ -240,7 +234,7 @@ impl TestHarness {
|
||||
}
|
||||
|
||||
if contracts.contains(&"dao".to_string()) {
|
||||
let db_handle = alice.state.read().await.blockchain.contracts.lookup(
|
||||
let db_handle = alice.validator.read().await.blockchain.contracts.lookup(
|
||||
&alice_sled,
|
||||
&DAO_CONTRACT_ID,
|
||||
SMART_CONTRACT_ZKAS_DB_NAME,
|
||||
@@ -419,7 +413,7 @@ impl TestHarness {
|
||||
pub async fn get_slot_checkpoint_by_slot(&self, slot: u64) -> Result<SlotCheckpoint> {
|
||||
let faucet = self.holders.get(&Holder::Faucet).unwrap();
|
||||
let slot_checkpoint =
|
||||
faucet.state.read().await.blockchain.get_slot_checkpoints_by_slot(&[slot])?[0]
|
||||
faucet.validator.read().await.blockchain.get_slot_checkpoints_by_slot(&[slot])?[0]
|
||||
.clone()
|
||||
.unwrap();
|
||||
|
||||
@@ -430,7 +424,7 @@ impl TestHarness {
|
||||
// We grab the genesis slot to generate slot checkpoint
|
||||
// using same consensus parameters
|
||||
let faucet = self.holders.get(&Holder::Faucet).unwrap();
|
||||
let genesis_block = faucet.state.read().await.consensus.genesis_block;
|
||||
let genesis_block = faucet.validator.read().await.consensus.genesis_block;
|
||||
let fork_hashes = vec![genesis_block];
|
||||
let fork_previous_hashes = vec![genesis_block];
|
||||
let genesis_slot = self.get_slot_checkpoint_by_slot(0).await?;
|
||||
@@ -445,7 +439,12 @@ impl TestHarness {
|
||||
|
||||
// Store generated slot checkpoint
|
||||
for wallet in self.holders.values() {
|
||||
wallet.state.write().await.receive_slot_checkpoints(&[slot_checkpoint.clone()]).await?;
|
||||
wallet
|
||||
.validator
|
||||
.write()
|
||||
.await
|
||||
.receive_slot_checkpoints(&[slot_checkpoint.clone()])
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(slot_checkpoint)
|
||||
|
||||
@@ -101,9 +101,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyAirdrop).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(params.outputs[0].coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -92,9 +92,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyGenesisMint).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
@@ -113,7 +111,15 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyGenesisMint).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, false).await?;
|
||||
let erroneous_txs = wallet
|
||||
.validator
|
||||
.read()
|
||||
.await
|
||||
.verify_transactions(txs, slot, false)
|
||||
.await
|
||||
.err()
|
||||
.unwrap()
|
||||
.retrieve_erroneous_txs()?;
|
||||
assert_eq!(erroneous_txs.len(), erroneous);
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
@@ -165,9 +165,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyOtcSwap).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
if append {
|
||||
for output in ¶ms.outputs {
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(output.coin.inner()));
|
||||
|
||||
@@ -93,9 +93,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTokenMint).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(params.output.coin.inner()));
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
@@ -153,9 +151,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTokenFreeze).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -116,9 +116,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTransfer).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, true).await?;
|
||||
if append {
|
||||
for output in ¶ms.outputs {
|
||||
wallet.money_merkle_tree.append(MerkleNode::from(output.coin.inner()));
|
||||
@@ -142,8 +140,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTransfer).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, true).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(txs, slot, true).await?;
|
||||
if append {
|
||||
for params in txs_params {
|
||||
for output in ¶ms.outputs {
|
||||
@@ -167,9 +164,7 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTransfer).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs =
|
||||
wallet.state.read().await.verify_transactions(&[tx.clone()], slot, false).await?;
|
||||
assert!(erroneous_txs.is_empty());
|
||||
wallet.validator.read().await.verify_transactions(&[tx.clone()], slot, false).await?;
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
Ok(())
|
||||
@@ -187,7 +182,15 @@ impl TestHarness {
|
||||
self.tx_action_benchmarks.get_mut(&TxAction::MoneyTransfer).unwrap();
|
||||
let timer = Instant::now();
|
||||
|
||||
let erroneous_txs = wallet.state.read().await.verify_transactions(txs, slot, false).await?;
|
||||
let erroneous_txs = wallet
|
||||
.validator
|
||||
.read()
|
||||
.await
|
||||
.verify_transactions(txs, slot, false)
|
||||
.await
|
||||
.err()
|
||||
.unwrap()
|
||||
.retrieve_erroneous_txs()?;
|
||||
assert_eq!(erroneous_txs.len(), erroneous);
|
||||
tx_action_benchmark.verify_times.push(timer.elapsed());
|
||||
|
||||
|
||||
15
src/error.rs
15
src/error.rs
@@ -424,6 +424,7 @@ pub enum Error {
|
||||
#[error(transparent)]
|
||||
ClientFailed(#[from] ClientFailed),
|
||||
|
||||
#[cfg(feature = "tx")]
|
||||
#[error(transparent)]
|
||||
TxVerifyFailed(#[from] TxVerifyFailed),
|
||||
|
||||
@@ -451,6 +452,20 @@ pub enum Error {
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
#[cfg(feature = "tx")]
|
||||
impl Error {
|
||||
/// Auxiliary function to retrieve the vector of erroneous
|
||||
/// transactions from a TxVerifyFailed error.
|
||||
/// In any other case, we return the error itself.
|
||||
pub fn retrieve_erroneous_txs(&self) -> Result<Vec<crate::tx::Transaction>> {
|
||||
if let Self::TxVerifyFailed(TxVerifyFailed::ErroneousTxs(erroneous_txs)) = self {
|
||||
return Ok(erroneous_txs.clone())
|
||||
};
|
||||
|
||||
Err(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tx")]
|
||||
/// Transaction verification errors
|
||||
#[derive(Debug, Clone, thiserror::Error)]
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
*/
|
||||
|
||||
pub mod error;
|
||||
pub use error::{ClientFailed, ClientResult, Error, Result, TxVerifyFailed};
|
||||
pub use error::{ClientFailed, ClientResult, Error, Result};
|
||||
|
||||
#[cfg(feature = "blockchain")]
|
||||
pub mod blockchain;
|
||||
|
||||
@@ -31,8 +31,9 @@ use log::{debug, error};
|
||||
use rand::{CryptoRng, RngCore};
|
||||
|
||||
use crate::{
|
||||
error::TxVerifyFailed,
|
||||
zk::{proof::VerifyingKey, Proof},
|
||||
Error, Result, TxVerifyFailed,
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
macro_rules! zip {
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
/* This file is part of DarkFi (https://dark.fi)
|
||||
*
|
||||
* Copyright (C) 2020-2023 Dyne.org foundation
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
use crate::{blockchain::Blockchain, util::time::TimeKeeper};
|
||||
|
||||
/// This struct represents the information required by the consensus algorithm
|
||||
pub struct Consensus {
|
||||
/// Canonical (finalized) blockchain
|
||||
pub blockchain: Blockchain,
|
||||
/// Helper structure to calculate time related operations
|
||||
pub time_keeper: TimeKeeper,
|
||||
/// Genesis block hash
|
||||
pub genesis_block: blake3::Hash,
|
||||
}
|
||||
|
||||
impl Consensus {
|
||||
pub fn new(
|
||||
blockchain: Blockchain,
|
||||
time_keeper: TimeKeeper,
|
||||
genesis_block: blake3::Hash,
|
||||
) -> Self {
|
||||
Self { blockchain, time_keeper, genesis_block }
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,24 @@
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/// DarkFi consensus
|
||||
pub mod consensus;
|
||||
pub use consensus::Consensus;
|
||||
use crate::{blockchain::Blockchain, util::time::TimeKeeper};
|
||||
|
||||
/// This struct represents the information required by the consensus algorithm
|
||||
pub struct Consensus {
|
||||
/// Canonical (finalized) blockchain
|
||||
pub blockchain: Blockchain,
|
||||
/// Helper structure to calculate time related operations
|
||||
pub time_keeper: TimeKeeper,
|
||||
/// Genesis block hash
|
||||
pub genesis_block: blake3::Hash,
|
||||
}
|
||||
|
||||
impl Consensus {
|
||||
pub fn new(
|
||||
blockchain: Blockchain,
|
||||
time_keeper: TimeKeeper,
|
||||
genesis_block: blake3::Hash,
|
||||
) -> Self {
|
||||
Self { blockchain, time_keeper, genesis_block }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,9 +16,364 @@
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/// DarkFi validator
|
||||
pub mod validator;
|
||||
pub use validator::{Validator, ValidatorConfig, ValidatorPtr};
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
|
||||
use async_std::sync::{Arc, RwLock};
|
||||
use darkfi_sdk::{
|
||||
crypto::{PublicKey, CONSENSUS_CONTRACT_ID, DAO_CONTRACT_ID, MONEY_CONTRACT_ID},
|
||||
pasta::pallas,
|
||||
};
|
||||
use darkfi_serial::{serialize, Decodable, Encodable, WriteExt};
|
||||
use log::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
blockchain::{Blockchain, BlockchainOverlay, BlockchainOverlayPtr},
|
||||
consensus::SlotCheckpoint,
|
||||
error::TxVerifyFailed,
|
||||
runtime::vm_runtime::Runtime,
|
||||
tx::Transaction,
|
||||
util::time::TimeKeeper,
|
||||
zk::VerifyingKey,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// DarkFi consensus
|
||||
pub mod consensus;
|
||||
use consensus::Consensus;
|
||||
|
||||
/// Configuration for initializing [`Validator`]
|
||||
pub struct ValidatorConfig {
|
||||
/// Helper structure to calculate time related operations
|
||||
pub time_keeper: TimeKeeper,
|
||||
/// Genesis block
|
||||
pub genesis_block: blake3::Hash,
|
||||
/// Whitelisted faucet pubkeys (testnet stuff)
|
||||
pub faucet_pubkeys: Vec<PublicKey>,
|
||||
}
|
||||
|
||||
impl ValidatorConfig {
|
||||
pub fn new(
|
||||
time_keeper: TimeKeeper,
|
||||
genesis_block: blake3::Hash,
|
||||
faucet_pubkeys: Vec<PublicKey>,
|
||||
) -> Self {
|
||||
Self { time_keeper, genesis_block, faucet_pubkeys }
|
||||
}
|
||||
}
|
||||
|
||||
/// Atomic pointer to validator.
|
||||
pub type ValidatorPtr = Arc<RwLock<Validator>>;
|
||||
|
||||
/// This struct represents a DarkFi validator node.
|
||||
pub struct Validator {
|
||||
/// Canonical (finalized) blockchain
|
||||
pub blockchain: Blockchain,
|
||||
/// Hot/Live data used by the consensus algorithm
|
||||
pub consensus: Consensus,
|
||||
}
|
||||
|
||||
impl Validator {
|
||||
pub async fn new(db: &sled::Db, config: ValidatorConfig) -> Result<ValidatorPtr> {
|
||||
info!(target: "validator", "Initializing Validator");
|
||||
|
||||
info!(target: "validator", "Initializing Blockchain");
|
||||
// TODO: Initialize chain, then check if its empty, so we can add the
|
||||
// genesis block and its transactions
|
||||
let blockchain = Blockchain::new(db, config.time_keeper.genesis_ts, config.genesis_block)?;
|
||||
|
||||
info!(target: "validator", "Initializing Consensus");
|
||||
let consensus =
|
||||
Consensus::new(blockchain.clone(), config.time_keeper, config.genesis_block);
|
||||
|
||||
// =====================
|
||||
// NATIVE WASM CONTRACTS
|
||||
// =====================
|
||||
// This is the current place where native contracts are being deployed.
|
||||
// When the `Blockchain` object is created, it doesn't care whether it
|
||||
// already has the contract data or not. If there's existing data, it
|
||||
// will just open the necessary db and trees, and give back what it has.
|
||||
// This means that on subsequent runs our native contracts will already
|
||||
// be in a deployed state, so what we actually do here is a redeployment.
|
||||
// This kind of operation should only modify the contract's state in case
|
||||
// it wasn't deployed before (meaning the initial run). Otherwise, it
|
||||
// shouldn't touch anything, or just potentially update the db schemas or
|
||||
// whatever is necessary. This logic should be handled in the init function
|
||||
// of the actual contract, so make sure the native contracts handle this well.
|
||||
|
||||
// The faucet pubkeys are pubkeys which are allowed to create clear inputs
|
||||
// in the Money contract.
|
||||
let money_contract_deploy_payload = serialize(&config.faucet_pubkeys);
|
||||
|
||||
// The DAO contract uses an empty payload to deploy itself.
|
||||
let dao_contract_deploy_payload = vec![];
|
||||
|
||||
// The Consensus contract uses an empty payload to deploy itself.
|
||||
let consensus_contract_deploy_payload = vec![];
|
||||
|
||||
let native_contracts = vec![
|
||||
(
|
||||
"Money Contract",
|
||||
*MONEY_CONTRACT_ID,
|
||||
include_bytes!("../contract/money/money_contract.wasm").to_vec(),
|
||||
money_contract_deploy_payload,
|
||||
),
|
||||
(
|
||||
"DAO Contract",
|
||||
*DAO_CONTRACT_ID,
|
||||
include_bytes!("../contract/dao/dao_contract.wasm").to_vec(),
|
||||
dao_contract_deploy_payload,
|
||||
),
|
||||
(
|
||||
"Consensus Contract",
|
||||
*CONSENSUS_CONTRACT_ID,
|
||||
include_bytes!("../contract/consensus/consensus_contract.wasm").to_vec(),
|
||||
consensus_contract_deploy_payload,
|
||||
),
|
||||
];
|
||||
|
||||
info!(target: "validator", "Deploying native WASM contracts");
|
||||
let blockchain_overlay = BlockchainOverlay::new(&blockchain)?;
|
||||
|
||||
for nc in native_contracts {
|
||||
info!(target: "validator", "Deploying {} with ContractID {}", nc.0, nc.1);
|
||||
|
||||
let mut runtime = Runtime::new(
|
||||
&nc.2[..],
|
||||
blockchain_overlay.clone(),
|
||||
nc.1,
|
||||
consensus.time_keeper.clone(),
|
||||
)?;
|
||||
|
||||
runtime.deploy(&nc.3)?;
|
||||
|
||||
info!(target: "validator", "Successfully deployed {}", nc.0);
|
||||
}
|
||||
|
||||
// Write the changes to the actual chain db
|
||||
blockchain_overlay.lock().unwrap().overlay.lock().unwrap().apply()?;
|
||||
|
||||
info!(target: "validator", "Finished deployment of native WASM contracts");
|
||||
|
||||
// Create the actual state
|
||||
let state = Arc::new(RwLock::new(Self { blockchain, consensus }));
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
// ==========================
|
||||
// State transition functions
|
||||
// ==========================
|
||||
// TODO TESTNET: Write down all cases below
|
||||
// State transition checks should be happening in the following cases for a sync node:
|
||||
// 1) When a finalized block is received
|
||||
// 2) When a transaction is being broadcasted to us
|
||||
// State transition checks should be happening in the following cases for a consensus participating node:
|
||||
// 1) When a finalized block is received
|
||||
// 2) When a transaction is being broadcasted to us
|
||||
// ==========================
|
||||
|
||||
/// Append to canonical state received finalized slot checkpoints from block sync task.
|
||||
// TODO: integrate this to receive_blocks, as slot checkpoints will be part of received block.
|
||||
pub async fn receive_slot_checkpoints(
|
||||
&mut self,
|
||||
slot_checkpoints: &[SlotCheckpoint],
|
||||
) -> Result<()> {
|
||||
debug!(target: "validator", "receive_slot_checkpoints(): Appending slot checkpoints to ledger");
|
||||
let current_slot = self.consensus.time_keeper.current_slot();
|
||||
let mut filtered = vec![];
|
||||
for slot_checkpoint in slot_checkpoints {
|
||||
if slot_checkpoint.slot > current_slot {
|
||||
warn!(target: "validator", "receive_slot_checkpoints(): Ignoring future slot checkpoint: {}", slot_checkpoint.slot);
|
||||
continue
|
||||
}
|
||||
filtered.push(slot_checkpoint.clone());
|
||||
}
|
||||
self.blockchain.add_slot_checkpoints(&filtered[..])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate WASM execution, signatures, and ZK proofs for a given [`Transaction`].
|
||||
async fn verify_transaction(
|
||||
&self,
|
||||
blockchain_overlay: BlockchainOverlayPtr,
|
||||
tx: &Transaction,
|
||||
time_keeper: &TimeKeeper,
|
||||
verifying_keys: &mut HashMap<[u8; 32], HashMap<String, VerifyingKey>>,
|
||||
) -> Result<()> {
|
||||
let tx_hash = tx.hash();
|
||||
debug!(target: "validator", "Validating transaction {}", tx_hash);
|
||||
|
||||
// Table of public inputs used for ZK proof verification
|
||||
let mut zkp_table = vec![];
|
||||
// Table of public keys used for signature verification
|
||||
let mut sig_table = vec![];
|
||||
|
||||
// Iterate over all calls to get the metadata
|
||||
for (idx, call) in tx.calls.iter().enumerate() {
|
||||
debug!(target: "validator", "Executing contract call {}", idx);
|
||||
|
||||
// Write the actual payload data
|
||||
let mut payload = vec![];
|
||||
payload.write_u32(idx as u32)?; // Call index
|
||||
tx.calls.encode(&mut payload)?; // Actual call data
|
||||
|
||||
debug!(target: "validator", "Instantiating WASM runtime");
|
||||
let wasm = self.blockchain.wasm_bincode.get(call.contract_id)?;
|
||||
|
||||
let mut runtime = Runtime::new(
|
||||
&wasm,
|
||||
blockchain_overlay.clone(),
|
||||
call.contract_id,
|
||||
time_keeper.clone(),
|
||||
)?;
|
||||
|
||||
debug!(target: "validator", "Executing \"metadata\" call");
|
||||
let metadata = runtime.metadata(&payload)?;
|
||||
|
||||
// Decode the metadata retrieved from the execution
|
||||
let mut decoder = Cursor::new(&metadata);
|
||||
|
||||
// The tuple is (zkasa_ns, public_inputs)
|
||||
let zkp_pub: Vec<(String, Vec<pallas::Base>)> = Decodable::decode(&mut decoder)?;
|
||||
let sig_pub: Vec<PublicKey> = Decodable::decode(&mut decoder)?;
|
||||
// TODO: Make sure we've read all the bytes above.
|
||||
debug!(target: "validator", "Successfully executed \"metadata\" call");
|
||||
|
||||
// Here we'll look up verifying keys and insert them into the per-contract map.
|
||||
debug!(target: "validator", "Performing VerifyingKey lookups from the sled db");
|
||||
for (zkas_ns, _) in &zkp_pub {
|
||||
let inner_vk_map = verifying_keys.get_mut(&call.contract_id.to_bytes()).unwrap();
|
||||
|
||||
// TODO: This will be a problem in case of ::deploy, unless we force a different
|
||||
// namespace and disable updating existing circuit. Might be a smart idea to do
|
||||
// so in order to have to care less about being able to verify historical txs.
|
||||
if inner_vk_map.contains_key(zkas_ns.as_str()) {
|
||||
continue
|
||||
}
|
||||
|
||||
let (_, vk) = self.blockchain.contracts.get_zkas(
|
||||
&self.blockchain.sled_db,
|
||||
&call.contract_id,
|
||||
zkas_ns,
|
||||
)?;
|
||||
|
||||
inner_vk_map.insert(zkas_ns.to_string(), vk);
|
||||
}
|
||||
|
||||
zkp_table.push(zkp_pub);
|
||||
sig_table.push(sig_pub);
|
||||
|
||||
// After getting the metadata, we run the "exec" function with the same runtime
|
||||
// and the same payload.
|
||||
debug!(target: "validator", "Executing \"exec\" call");
|
||||
let state_update = runtime.exec(&payload)?;
|
||||
debug!(target: "validator", "Successfully executed \"exec\" call");
|
||||
|
||||
// If that was successful, we apply the state update in the ephemeral overlay.
|
||||
debug!(target: "validator", "Executing \"apply\" call");
|
||||
runtime.apply(&state_update)?;
|
||||
debug!(target: "validator", "Successfully executed \"apply\" call");
|
||||
|
||||
// At this point we're done with the call and move on to the next one.
|
||||
}
|
||||
|
||||
// When we're done looping and executing over the tx's contract calls, we now
|
||||
// move on with verification. First we verify the signatures as that's cheaper,
|
||||
// and then finally we verify the ZK proofs.
|
||||
debug!(target: "validator", "Verifying signatures for transaction {}", tx_hash);
|
||||
if sig_table.len() != tx.signatures.len() {
|
||||
error!(target: "validator", "Incorrect number of signatures in tx {}", tx_hash);
|
||||
return Err(TxVerifyFailed::MissingSignatures.into())
|
||||
}
|
||||
|
||||
// TODO: Go through the ZK circuits that have to be verified and account for the opcodes.
|
||||
|
||||
if let Err(e) = tx.verify_sigs(sig_table) {
|
||||
error!(target: "validator", "Signature verification for tx {} failed: {}", tx_hash, e);
|
||||
return Err(TxVerifyFailed::InvalidSignature.into())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "Signature verification successful");
|
||||
|
||||
debug!(target: "validator", "Verifying ZK proofs for transaction {}", tx_hash);
|
||||
if let Err(e) = tx.verify_zkps(verifying_keys, zkp_table).await {
|
||||
error!(target: "consensus::validator", "ZK proof verification for tx {} failed: {}", tx_hash, e);
|
||||
return Err(TxVerifyFailed::InvalidZkProof.into())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "ZK proof verification successful");
|
||||
debug!(target: "validator", "Transaction {} verified successfully", tx_hash);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a set of [`Transaction`] in sequence and apply them if all are valid.
|
||||
/// In case any of the transactions fail, they will be returned to the caller.
|
||||
/// The function takes a boolean called `write` which tells it to actually write
|
||||
/// the state transitions to the database.
|
||||
pub async fn verify_transactions(
|
||||
&self,
|
||||
txs: &[Transaction],
|
||||
verifying_slot: u64,
|
||||
write: bool,
|
||||
) -> Result<()> {
|
||||
debug!(target: "validator", "Verifying {} transactions", txs.len());
|
||||
|
||||
debug!(target: "validator", "Instantiating BlockchainOverlay");
|
||||
let blockchain_overlay = BlockchainOverlay::new(&self.blockchain)?;
|
||||
|
||||
// Tracker for failed txs
|
||||
let mut erroneous_txs = vec![];
|
||||
|
||||
// Map of ZK proof verifying keys for the current transaction batch
|
||||
let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
|
||||
|
||||
// Initialize the map
|
||||
for tx in txs {
|
||||
for call in &tx.calls {
|
||||
vks.insert(call.contract_id.to_bytes(), HashMap::new());
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a time keeper using transaction verifying slot
|
||||
let time_keeper = TimeKeeper::new(
|
||||
self.consensus.time_keeper.genesis_ts,
|
||||
self.consensus.time_keeper.epoch_length,
|
||||
self.consensus.time_keeper.slot_time,
|
||||
verifying_slot,
|
||||
);
|
||||
|
||||
// Iterate over transactions and attempt to verify them
|
||||
for tx in txs {
|
||||
blockchain_overlay.lock().unwrap().checkpoint();
|
||||
if let Err(e) = self
|
||||
.verify_transaction(blockchain_overlay.clone(), tx, &time_keeper, &mut vks)
|
||||
.await
|
||||
{
|
||||
warn!(target: "validator", "Transaction verification failed: {}", e);
|
||||
erroneous_txs.push(tx.clone());
|
||||
// TODO: verify this works as expected
|
||||
blockchain_overlay.lock().unwrap().revert_to_checkpoint()?;
|
||||
}
|
||||
}
|
||||
|
||||
let lock = blockchain_overlay.lock().unwrap();
|
||||
let mut overlay = lock.overlay.lock().unwrap();
|
||||
if !erroneous_txs.is_empty() {
|
||||
warn!(target: "validator", "Erroneous transactions found in set");
|
||||
overlay.purge_new_trees()?;
|
||||
return Err(TxVerifyFailed::ErroneousTxs(erroneous_txs).into())
|
||||
}
|
||||
|
||||
if !write {
|
||||
debug!(target: "validator", "Skipping apply of state updates because write=false");
|
||||
overlay.purge_new_trees()?;
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "Applying overlay changes");
|
||||
overlay.apply()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,376 +0,0 @@
|
||||
/* This file is part of DarkFi (https://dark.fi)
|
||||
*
|
||||
* Copyright (C) 2020-2023 Dyne.org foundation
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as
|
||||
* published by the Free Software Foundation, either version 3 of the
|
||||
* License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
|
||||
use async_std::sync::{Arc, RwLock};
|
||||
use darkfi_sdk::{
|
||||
crypto::{PublicKey, CONSENSUS_CONTRACT_ID, DAO_CONTRACT_ID, MONEY_CONTRACT_ID},
|
||||
pasta::pallas,
|
||||
};
|
||||
use darkfi_serial::{serialize, Decodable, Encodable, WriteExt};
|
||||
use log::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
blockchain::{Blockchain, BlockchainOverlay, BlockchainOverlayPtr},
|
||||
consensus::SlotCheckpoint,
|
||||
runtime::vm_runtime::Runtime,
|
||||
tx::Transaction,
|
||||
util::time::TimeKeeper,
|
||||
zk::VerifyingKey,
|
||||
Result, TxVerifyFailed,
|
||||
};
|
||||
|
||||
use super::consensus::Consensus;
|
||||
|
||||
/// Configuration for initializing [`Validator`]
|
||||
pub struct ValidatorConfig {
|
||||
/// Helper structure to calculate time related operations
|
||||
pub time_keeper: TimeKeeper,
|
||||
/// Genesis block
|
||||
pub genesis_block: blake3::Hash,
|
||||
/// Whitelisted faucet pubkeys (testnet stuff)
|
||||
pub faucet_pubkeys: Vec<PublicKey>,
|
||||
}
|
||||
|
||||
impl ValidatorConfig {
|
||||
pub fn new(
|
||||
time_keeper: TimeKeeper,
|
||||
genesis_block: blake3::Hash,
|
||||
faucet_pubkeys: Vec<PublicKey>,
|
||||
) -> Self {
|
||||
Self { time_keeper, genesis_block, faucet_pubkeys }
|
||||
}
|
||||
}
|
||||
|
||||
/// Atomic pointer to validator.
|
||||
pub type ValidatorPtr = Arc<RwLock<Validator>>;
|
||||
|
||||
/// This struct represents a DarkFi validator node.
|
||||
pub struct Validator {
|
||||
/// Canonical (finalized) blockchain
|
||||
pub blockchain: Blockchain,
|
||||
/// Hot/Live data used by the consensus algorithm
|
||||
pub consensus: Consensus,
|
||||
}
|
||||
|
||||
impl Validator {
|
||||
pub async fn new(db: &sled::Db, config: ValidatorConfig) -> Result<ValidatorPtr> {
|
||||
info!(target: "validator", "Initializing Validator");
|
||||
|
||||
info!(target: "validator", "Initializing Blockchain");
|
||||
// TODO: Initialize chain, then check if its empty, so we can add the
|
||||
// genesis block and its transactions
|
||||
let blockchain = Blockchain::new(db, config.time_keeper.genesis_ts, config.genesis_block)?;
|
||||
|
||||
info!(target: "validator", "Initializing Consensus");
|
||||
let consensus =
|
||||
Consensus::new(blockchain.clone(), config.time_keeper, config.genesis_block);
|
||||
|
||||
// =====================
|
||||
// NATIVE WASM CONTRACTS
|
||||
// =====================
|
||||
// This is the current place where native contracts are being deployed.
|
||||
// When the `Blockchain` object is created, it doesn't care whether it
|
||||
// already has the contract data or not. If there's existing data, it
|
||||
// will just open the necessary db and trees, and give back what it has.
|
||||
// This means that on subsequent runs our native contracts will already
|
||||
// be in a deployed state, so what we actually do here is a redeployment.
|
||||
// This kind of operation should only modify the contract's state in case
|
||||
// it wasn't deployed before (meaning the initial run). Otherwise, it
|
||||
// shouldn't touch anything, or just potentially update the db schemas or
|
||||
// whatever is necessary. This logic should be handled in the init function
|
||||
// of the actual contract, so make sure the native contracts handle this well.
|
||||
|
||||
// The faucet pubkeys are pubkeys which are allowed to create clear inputs
|
||||
// in the Money contract.
|
||||
let money_contract_deploy_payload = serialize(&config.faucet_pubkeys);
|
||||
|
||||
// The DAO contract uses an empty payload to deploy itself.
|
||||
let dao_contract_deploy_payload = vec![];
|
||||
|
||||
// The Consensus contract uses an empty payload to deploy itself.
|
||||
let consensus_contract_deploy_payload = vec![];
|
||||
|
||||
let native_contracts = vec![
|
||||
(
|
||||
"Money Contract",
|
||||
*MONEY_CONTRACT_ID,
|
||||
include_bytes!("../contract/money/money_contract.wasm").to_vec(),
|
||||
money_contract_deploy_payload,
|
||||
),
|
||||
(
|
||||
"DAO Contract",
|
||||
*DAO_CONTRACT_ID,
|
||||
include_bytes!("../contract/dao/dao_contract.wasm").to_vec(),
|
||||
dao_contract_deploy_payload,
|
||||
),
|
||||
(
|
||||
"Consensus Contract",
|
||||
*CONSENSUS_CONTRACT_ID,
|
||||
include_bytes!("../contract/consensus/consensus_contract.wasm").to_vec(),
|
||||
consensus_contract_deploy_payload,
|
||||
),
|
||||
];
|
||||
|
||||
info!(target: "validator", "Deploying native WASM contracts");
|
||||
let blockchain_overlay = BlockchainOverlay::new(&blockchain)?;
|
||||
|
||||
for nc in native_contracts {
|
||||
info!(target: "validator", "Deploying {} with ContractID {}", nc.0, nc.1);
|
||||
|
||||
let mut runtime = Runtime::new(
|
||||
&nc.2[..],
|
||||
blockchain_overlay.clone(),
|
||||
nc.1,
|
||||
consensus.time_keeper.clone(),
|
||||
)?;
|
||||
|
||||
runtime.deploy(&nc.3)?;
|
||||
|
||||
info!(target: "validator", "Successfully deployed {}", nc.0);
|
||||
}
|
||||
|
||||
// Write the changes to the actual chain db
|
||||
blockchain_overlay.lock().unwrap().overlay.lock().unwrap().apply()?;
|
||||
|
||||
info!(target: "validator", "Finished deployment of native WASM contracts");
|
||||
|
||||
// Create the actual state
|
||||
let state = Arc::new(RwLock::new(Self { blockchain, consensus }));
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
// ==========================
|
||||
// State transition functions
|
||||
// ==========================
|
||||
// TODO TESTNET: Write down all cases below
|
||||
// State transition checks should be happening in the following cases for a sync node:
|
||||
// 1) When a finalized block is received
|
||||
// 2) When a transaction is being broadcasted to us
|
||||
// State transition checks should be happening in the following cases for a consensus participating node:
|
||||
// 1) When a finalized block is received
|
||||
// 2) When a transaction is being broadcasted to us
|
||||
// ==========================
|
||||
|
||||
/// Append to canonical state received finalized slot checkpoints from block sync task.
|
||||
// TODO: integrate this to receive_blocks, as slot checkpoints will be part of received block.
|
||||
pub async fn receive_slot_checkpoints(
|
||||
&mut self,
|
||||
slot_checkpoints: &[SlotCheckpoint],
|
||||
) -> Result<()> {
|
||||
debug!(target: "validator", "receive_slot_checkpoints(): Appending slot checkpoints to ledger");
|
||||
let current_slot = self.consensus.time_keeper.current_slot();
|
||||
let mut filtered = vec![];
|
||||
for slot_checkpoint in slot_checkpoints {
|
||||
if slot_checkpoint.slot > current_slot {
|
||||
warn!(target: "validator", "receive_slot_checkpoints(): Ignoring future slot checkpoint: {}", slot_checkpoint.slot);
|
||||
continue
|
||||
}
|
||||
filtered.push(slot_checkpoint.clone());
|
||||
}
|
||||
self.blockchain.add_slot_checkpoints(&filtered[..])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate WASM execution, signatures, and ZK proofs for a given [`Transaction`].
|
||||
async fn verify_transaction(
|
||||
&self,
|
||||
blockchain_overlay: BlockchainOverlayPtr,
|
||||
tx: &Transaction,
|
||||
time_keeper: &TimeKeeper,
|
||||
verifying_keys: &mut HashMap<[u8; 32], HashMap<String, VerifyingKey>>,
|
||||
) -> Result<()> {
|
||||
let tx_hash = tx.hash();
|
||||
debug!(target: "validator", "Validating transaction {}", tx_hash);
|
||||
|
||||
// Table of public inputs used for ZK proof verification
|
||||
let mut zkp_table = vec![];
|
||||
// Table of public keys used for signature verification
|
||||
let mut sig_table = vec![];
|
||||
|
||||
// Iterate over all calls to get the metadata
|
||||
for (idx, call) in tx.calls.iter().enumerate() {
|
||||
debug!(target: "validator", "Executing contract call {}", idx);
|
||||
|
||||
// Write the actual payload data
|
||||
let mut payload = vec![];
|
||||
payload.write_u32(idx as u32)?; // Call index
|
||||
tx.calls.encode(&mut payload)?; // Actual call data
|
||||
|
||||
debug!(target: "validator", "Instantiating WASM runtime");
|
||||
let wasm = self.blockchain.wasm_bincode.get(call.contract_id)?;
|
||||
|
||||
let mut runtime = Runtime::new(
|
||||
&wasm,
|
||||
blockchain_overlay.clone(),
|
||||
call.contract_id,
|
||||
time_keeper.clone(),
|
||||
)?;
|
||||
|
||||
debug!(target: "validator", "Executing \"metadata\" call");
|
||||
let metadata = runtime.metadata(&payload)?;
|
||||
|
||||
// Decode the metadata retrieved from the execution
|
||||
let mut decoder = Cursor::new(&metadata);
|
||||
|
||||
// The tuple is (zkasa_ns, public_inputs)
|
||||
let zkp_pub: Vec<(String, Vec<pallas::Base>)> = Decodable::decode(&mut decoder)?;
|
||||
let sig_pub: Vec<PublicKey> = Decodable::decode(&mut decoder)?;
|
||||
// TODO: Make sure we've read all the bytes above.
|
||||
debug!(target: "validator", "Successfully executed \"metadata\" call");
|
||||
|
||||
// Here we'll look up verifying keys and insert them into the per-contract map.
|
||||
debug!(target: "validator", "Performing VerifyingKey lookups from the sled db");
|
||||
for (zkas_ns, _) in &zkp_pub {
|
||||
let inner_vk_map = verifying_keys.get_mut(&call.contract_id.to_bytes()).unwrap();
|
||||
|
||||
// TODO: This will be a problem in case of ::deploy, unless we force a different
|
||||
// namespace and disable updating existing circuit. Might be a smart idea to do
|
||||
// so in order to have to care less about being able to verify historical txs.
|
||||
if inner_vk_map.contains_key(zkas_ns.as_str()) {
|
||||
continue
|
||||
}
|
||||
|
||||
let (_, vk) = self.blockchain.contracts.get_zkas(
|
||||
&self.blockchain.sled_db,
|
||||
&call.contract_id,
|
||||
zkas_ns,
|
||||
)?;
|
||||
|
||||
inner_vk_map.insert(zkas_ns.to_string(), vk);
|
||||
}
|
||||
|
||||
zkp_table.push(zkp_pub);
|
||||
sig_table.push(sig_pub);
|
||||
|
||||
// After getting the metadata, we run the "exec" function with the same runtime
|
||||
// and the same payload.
|
||||
debug!(target: "validator", "Executing \"exec\" call");
|
||||
let state_update = runtime.exec(&payload)?;
|
||||
debug!(target: "validator", "Successfully executed \"exec\" call");
|
||||
|
||||
// If that was successful, we apply the state update in the ephemeral overlay.
|
||||
debug!(target: "validator", "Executing \"apply\" call");
|
||||
runtime.apply(&state_update)?;
|
||||
debug!(target: "validator", "Successfully executed \"apply\" call");
|
||||
|
||||
// At this point we're done with the call and move on to the next one.
|
||||
}
|
||||
|
||||
// When we're done looping and executing over the tx's contract calls, we now
|
||||
// move on with verification. First we verify the signatures as that's cheaper,
|
||||
// and then finally we verify the ZK proofs.
|
||||
debug!(target: "validator", "Verifying signatures for transaction {}", tx_hash);
|
||||
if sig_table.len() != tx.signatures.len() {
|
||||
error!(target: "validator", "Incorrect number of signatures in tx {}", tx_hash);
|
||||
return Err(TxVerifyFailed::MissingSignatures.into())
|
||||
}
|
||||
|
||||
// TODO: Go through the ZK circuits that have to be verified and account for the opcodes.
|
||||
|
||||
if let Err(e) = tx.verify_sigs(sig_table) {
|
||||
error!(target: "validator", "Signature verification for tx {} failed: {}", tx_hash, e);
|
||||
return Err(TxVerifyFailed::InvalidSignature.into())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "Signature verification successful");
|
||||
|
||||
debug!(target: "validator", "Verifying ZK proofs for transaction {}", tx_hash);
|
||||
if let Err(e) = tx.verify_zkps(verifying_keys, zkp_table).await {
|
||||
error!(target: "consensus::validator", "ZK proof verification for tx {} failed: {}", tx_hash, e);
|
||||
return Err(TxVerifyFailed::InvalidZkProof.into())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "ZK proof verification successful");
|
||||
debug!(target: "validator", "Transaction {} verified successfully", tx_hash);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a set of [`Transaction`] in sequence and apply them if all are valid.
|
||||
/// In case any of the transactions fail, they will be returned to the caller.
|
||||
/// The function takes a boolean called `write` which tells it to actually write
|
||||
/// the state transitions to the database.
|
||||
pub async fn verify_transactions(
|
||||
&self,
|
||||
txs: &[Transaction],
|
||||
verifying_slot: u64,
|
||||
write: bool,
|
||||
) -> Result<()> {
|
||||
debug!(target: "validator", "Verifying {} transactions", txs.len());
|
||||
|
||||
debug!(target: "validator", "Instantiating BlockchainOverlay");
|
||||
let blockchain_overlay = BlockchainOverlay::new(&self.blockchain)?;
|
||||
|
||||
// Tracker for failed txs
|
||||
let mut erroneous_txs = vec![];
|
||||
|
||||
// Map of ZK proof verifying keys for the current transaction batch
|
||||
let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
|
||||
|
||||
// Initialize the map
|
||||
for tx in txs {
|
||||
for call in &tx.calls {
|
||||
vks.insert(call.contract_id.to_bytes(), HashMap::new());
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a time keeper using transaction verifying slot
|
||||
let time_keeper = TimeKeeper::new(
|
||||
self.consensus.time_keeper.genesis_ts,
|
||||
self.consensus.time_keeper.epoch_length,
|
||||
self.consensus.time_keeper.slot_time,
|
||||
verifying_slot,
|
||||
);
|
||||
|
||||
// Iterate over transactions and attempt to verify them
|
||||
for tx in txs {
|
||||
blockchain_overlay.lock().unwrap().checkpoint();
|
||||
if let Err(e) = self
|
||||
.verify_transaction(blockchain_overlay.clone(), tx, &time_keeper, &mut vks)
|
||||
.await
|
||||
{
|
||||
warn!(target: "validator", "Transaction verification failed: {}", e);
|
||||
erroneous_txs.push(tx.clone());
|
||||
// TODO: verify this works as expected
|
||||
blockchain_overlay.lock().unwrap().revert_to_checkpoint()?;
|
||||
}
|
||||
}
|
||||
|
||||
let lock = blockchain_overlay.lock().unwrap();
|
||||
let mut overlay = lock.overlay.lock().unwrap();
|
||||
if !erroneous_txs.is_empty() {
|
||||
warn!(target: "validator", "Erroneous transactions found in set");
|
||||
overlay.purge_new_trees()?;
|
||||
return Err(TxVerifyFailed::ErroneousTxs(erroneous_txs).into())
|
||||
}
|
||||
|
||||
if !write {
|
||||
debug!(target: "validator", "Skipping apply of state updates because write=false");
|
||||
overlay.purge_new_trees()?;
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
debug!(target: "validator", "Applying overlay changes");
|
||||
overlay.apply()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user