chore: delete dead code (#705)

This commit is contained in:
sinu.eth
2025-03-03 11:53:20 -08:00
committed by GitHub
parent c384a393bf
commit acabb7761b
11 changed files with 0 additions and 1298 deletions

View File

@@ -1,45 +0,0 @@
[package]
name = "tlsn-universal-hash"
authors = ["TLSNotary Team"]
description = "A crate which implements different hash functions for two-party computation"
keywords = ["tls", "mpc", "2pc", "hash"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.8-pre"
edition = "2021"
[features]
default = ["ghash", "ideal"]
ghash = []
ideal = ["dep:ghash_rc"]
[dependencies]
# tlsn
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
"ideal",
] }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
ghash_rc = { package = "ghash", version = "0.5", optional = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
opaque-debug = { workspace = true }
tracing = { workspace = true }
derive_builder = { workspace = true }
[dev-dependencies]
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
"test-utils",
] }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
"ideal",
] }
ghash_rc = { package = "ghash", version = "0.5" }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand_chacha = { workspace = true }
rand = { workspace = true }
generic-array = { workspace = true }

View File

@@ -1,150 +0,0 @@
use mpz_core::Block;
use mpz_fields::{gf2_128::Gf2_128, Field};
use tracing::instrument;
use super::{
compute_missing_mul_shares, compute_new_add_shares,
state::{Finalized, Init, Intermediate, State},
GhashError,
};
/// The core logic for the 2PC Ghash implementation.
///
/// `GhashCore` will do all the necessary computation.
#[derive(Debug)]
pub(crate) struct GhashCore<T: State = Init> {
/// Inner state.
state: T,
/// Maximum number of message blocks we want to authenticate.
max_block_count: usize,
}
impl GhashCore {
/// Creates a new `GhashCore`.
///
/// # Arguments
///
/// * `max_block_count` - Determines the maximum number of 128-bit message
/// blocks we want to authenticate. Panics if `max_block_count` is 0.
pub(crate) fn new(max_block_count: usize) -> Self {
assert!(max_block_count > 0);
Self {
state: Init,
max_block_count,
}
}
/// Transforms `self` into a `GhashCore<Intermediate>`, holding
/// multiplicative shares of powers of `H`.
///
/// Converts `H` into `H`, `H^3`, `H^5`, ... depending on
/// `self.max_block_count`.
#[instrument(level = "trace", skip_all)]
pub(crate) fn compute_odd_mul_powers(self, mul_share: Gf2_128) -> GhashCore<Intermediate> {
let mut hashkey_powers = vec![mul_share];
compute_missing_mul_shares(&mut hashkey_powers, self.max_block_count);
GhashCore {
state: Intermediate {
odd_mul_shares: hashkey_powers,
cached_add_shares: vec![],
},
max_block_count: self.max_block_count,
}
}
}
impl GhashCore<Intermediate> {
/// Returns odd multiplicative shares of the hashkey.
///
/// Takes into account cached additive shares, so that
/// multiplicative ones for which already an additive one
/// exists, are not returned.
#[instrument(level = "trace", skip_all)]
pub(crate) fn odd_mul_shares(&self) -> Vec<Gf2_128> {
// If we already have some cached additive sharings, we do not need to compute
// new powers. So we compute an offset to ignore them. We divide by 2
// because `self.state.cached_add_shares` contain even and odd powers,
// while `self.state.odd_mul_shares` only have odd powers.
let offset = self.state.cached_add_shares.len() / 2;
self.state.odd_mul_shares[offset..].to_vec()
}
/// Adds new additive shares of hashkey powers by also computing the even
/// ones and transforms `self` into a `GhashCore<Finalized>`.
#[instrument(level = "trace", skip_all)]
pub(crate) fn add_new_add_shares(
mut self,
new_additive_odd_shares: &[Gf2_128],
) -> GhashCore<Finalized> {
compute_new_add_shares(new_additive_odd_shares, &mut self.state.cached_add_shares);
GhashCore {
state: Finalized {
add_shares: self.state.cached_add_shares,
odd_mul_shares: self.state.odd_mul_shares,
},
max_block_count: self.max_block_count,
}
}
}
impl GhashCore<Finalized> {
/// Returns the currently configured maximum message length.
pub(crate) fn get_max_blocks(&self) -> usize {
self.max_block_count
}
/// Generates the GHASH output.
///
/// Computes the 2PC additive share of the GHASH output.
#[instrument(level = "debug", skip_all, err)]
pub(crate) fn finalize(&self, message: &[Block]) -> Result<Block, GhashError> {
if message.len() > self.max_block_count {
return Err(GhashError::InvalidMessageLength);
}
let offset = self.state.add_shares.len() - message.len();
let output: Block = message
.iter()
.zip(self.state.add_shares.iter().rev().skip(offset))
.fold(Gf2_128::zero(), |acc, (block, share)| {
acc + Gf2_128::from(block.reverse_bits()) * *share
})
.into();
Ok(output.reverse_bits())
}
/// Changes the maximum hashkey power.
///
/// If we want to create a GHASH output for a new message, which is longer
/// than the old one, we need to compute the missing shares of the
/// powers of `H`.
#[instrument(level = "debug", skip(self))]
pub(crate) fn change_max_hashkey(
self,
new_highest_hashkey_power: usize,
) -> GhashCore<Intermediate> {
let mut present_odd_mul_shares = self.state.odd_mul_shares;
compute_missing_mul_shares(&mut present_odd_mul_shares, new_highest_hashkey_power);
GhashCore {
state: Intermediate {
odd_mul_shares: present_odd_mul_shares,
cached_add_shares: self.state.add_shares,
},
max_block_count: new_highest_hashkey_power,
}
}
}
#[cfg(test)]
impl<T: State> GhashCore<T> {
pub(crate) fn state(&self) -> &T {
&self.state
}
}

View File

@@ -1,395 +0,0 @@
//! This module implements the AES-GCM's GHASH function in a secure two-party
//! computation (2PC) setting. The parties start with their secret XOR shares of
//! H (the GHASH key) and at the end each gets their XOR share of the GHASH
//! output. The method is described here: <https://tlsnotary.org/how_it_works#section4>.
//!
//! At first we will convert the XOR (additive) share of `H`, into a
//! multiplicative share. This allows us to compute all the necessary powers of
//! `H^n` locally. Note, that it is only required to compute the odd
//! multiplicative powers, because of free squaring. Then each of these
//! multiplicative shares will be converted back into additive shares. The even
//! additive shares can then locally be built by using the odd ones. This way,
//! we can batch nearly all oblivious transfers and reduce the round complexity
//! of the protocol.
//!
//! On the whole, we need a single additive-to-multiplicative (A2M) and `n/2`,
//! where `n` is the number of blocks of message, multiplicative-to-additive
//! (M2A) conversions. Finally, having additive shares of `H^n` for all needed
//! `n`, we can compute an additive share of the GHASH output.
/// Contains the core logic for ghash.
mod core;
/// Contains the different states.
pub(crate) mod state;
pub(crate) use self::core::GhashCore;
use mpz_fields::{compute_product_repeated, gf2_128::Gf2_128};
use thiserror::Error;
use tracing::instrument;
#[derive(Debug, Error)]
pub(crate) enum GhashError {
#[error("Message too long")]
InvalidMessageLength,
}
/// Computes missing odd multiplicative shares of the hashkey powers.
///
/// Checks if depending on the number of `needed` shares, we need more odd
/// multiplicative shares and computes them. Notice that we only need odd
/// multiplicative shares for the OT, because we can derive even additive shares
/// from odd additive shares, which we call free squaring.
///
/// # Arguments
///
/// * `present_odd_mul_shares` - Multiplicative odd shares already present.
/// * `needed` - How many powers we need including odd and
/// even.
#[instrument(level = "trace", skip(present_odd_mul_shares))]
fn compute_missing_mul_shares(present_odd_mul_shares: &mut Vec<Gf2_128>, needed: usize) {
// Divide by 2 and round up.
let needed_odd_powers: usize = needed / 2 + (needed & 1);
let present_odd_len = present_odd_mul_shares.len();
if needed_odd_powers > present_odd_len {
let h_squared = present_odd_mul_shares[0] * present_odd_mul_shares[0];
compute_product_repeated(
present_odd_mul_shares,
h_squared,
needed_odd_powers - present_odd_len,
);
}
}
/// Computes new even (additive) shares from new odd (additive) shares and saves
/// both the new odd shares and the new even shares.
///
/// This function implements the derivation of even additive shares from odd
/// additive shares, which we refer to as free squaring. Every additive share of
/// an even power of `H` can be computed without an OT interaction by squaring
/// the corresponding additive share of an odd power of `H`, e.g. if we have a
/// share of H^3, we can derive the share of H^6 by doing (H^3)^2.
///
/// # Arguments
///
/// * `new_add_odd_shares` - New odd additive shares we got as a result of doing
/// an OT on odd multiplicative shares.
/// * `add_shares` - All additive shares (even and odd) we already have.
/// This is a mutable reference to cached_add_shares in
/// [crate::ghash::state::Intermediate].
#[instrument(level = "trace", skip_all)]
fn compute_new_add_shares(new_add_odd_shares: &[Gf2_128], add_shares: &mut Vec<Gf2_128>) {
for (odd_share, current_odd_power) in new_add_odd_shares
.iter()
.zip((add_shares.len() + 1..).step_by(2))
{
// `add_shares` always have an even number of shares so we simply add the next
// odd share.
add_shares.push(*odd_share);
// Now we need to compute the next even share and add it.
// Note that the n-th index corresponds to the (n+1)-th power, e.g.
// add_shares[4] is the share of H^5.
let mut base_share = add_shares[current_odd_power / 2];
base_share = base_share * base_share;
add_shares.push(base_share);
}
}
#[cfg(test)]
mod tests {
use generic_array::GenericArray;
use ghash_rc::{
universal_hash::{KeyInit, UniversalHash},
GHash,
};
use mpz_core::Block;
use mpz_fields::{gf2_128::Gf2_128, Field};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha12Rng;
use super::{
compute_missing_mul_shares, compute_new_add_shares, compute_product_repeated,
state::{Finalized, Intermediate},
GhashCore,
};
#[test]
fn test_ghash_product_sharing() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// The Ghash key.
let h: Gf2_128 = rng.gen();
let message = Block::random_vec(&mut rng, 10);
let message_len = message.len();
let number_of_powers_needed: usize = message_len / 2 + (message_len & 1);
let (sender, receiver) = setup_ghash_to_intermediate_state(h, message_len);
let mut powers_h = vec![h];
compute_product_repeated(&mut powers_h, h * h, number_of_powers_needed);
// Length check.
assert_eq!(sender.state().odd_mul_shares.len(), number_of_powers_needed);
assert_eq!(
receiver.state().odd_mul_shares.len(),
number_of_powers_needed
);
// Product check.
for (k, (sender_share, receiver_share)) in std::iter::zip(
sender.state().odd_mul_shares.iter(),
receiver.state().odd_mul_shares.iter(),
)
.enumerate()
{
assert_eq!(*sender_share * *receiver_share, powers_h[k]);
}
}
#[test]
fn test_ghash_sum_sharing() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// The Ghash key.
let h: Gf2_128 = rng.gen();
let message = Block::random_vec(&mut rng, 10);
let message_len = message.len();
let (sender, receiver) = setup_ghash_to_intermediate_state(h, message_len);
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let mut powers_h = vec![h];
compute_product_repeated(&mut powers_h, h, message_len);
// Length check.
assert_eq!(
sender.state().add_shares.len(),
message_len + (message_len & 1)
);
assert_eq!(
receiver.state().add_shares.len(),
message_len + (message_len & 1)
);
// Sum check.
for (k, item) in powers_h.iter().enumerate().take(message_len) {
assert_eq!(
sender.state().add_shares[k] + receiver.state().add_shares[k],
*item
);
}
}
#[test]
fn test_ghash_output() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// The Ghash key.
let h: Gf2_128 = rng.gen();
let message = Block::random_vec(&mut rng, 10);
let (sender, receiver) = setup_ghash_to_intermediate_state(h, message.len());
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let output = sender.finalize(&message).unwrap() ^ receiver.finalize(&message).unwrap();
assert_eq!(
output,
ghash_reference_impl(h.to_inner().reverse_bits(), &message)
);
}
#[test]
fn test_ghash_change_message_short() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// The Ghash key.
let h: Gf2_128 = rng.gen();
let message = Block::random_vec(&mut rng, 10);
let (sender, receiver) = setup_ghash_to_intermediate_state(h, message.len());
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let message_short = Block::random_vec(&mut rng, 5);
let (sender, receiver) = (
sender.change_max_hashkey(message_short.len()),
receiver.change_max_hashkey(message_short.len()),
);
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let output =
sender.finalize(&message_short).unwrap() ^ receiver.finalize(&message_short).unwrap();
assert_eq!(
output,
ghash_reference_impl(h.to_inner().reverse_bits(), &message_short)
);
}
#[test]
fn test_ghash_change_message_long() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// The Ghash key.
let h: Gf2_128 = rng.gen();
let message = Block::random_vec(&mut rng, 10);
let (sender, receiver) = setup_ghash_to_intermediate_state(h, message.len());
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let message_long = Block::random_vec(&mut rng, 20);
let (sender, receiver) = (
sender.change_max_hashkey(message_long.len()),
receiver.change_max_hashkey(message_long.len()),
);
let (sender, receiver) = ghash_to_finalized(sender, receiver);
let output =
sender.finalize(&message_long).unwrap() ^ receiver.finalize(&message_long).unwrap();
assert_eq!(
output,
ghash_reference_impl(h.to_inner().reverse_bits(), &message_long)
);
}
#[test]
fn test_compute_missing_mul_shares() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let h: Gf2_128 = rng.gen();
let mut powers: Vec<Gf2_128> = vec![h];
compute_product_repeated(&mut powers, h * h, rng.gen_range(16..128));
let powers_len = powers.len();
let needed = rng.gen_range(1..256);
compute_missing_mul_shares(&mut powers, needed);
// Check length.
if needed / 2 + (needed & 1) <= powers_len {
assert_eq!(powers.len(), powers_len);
} else {
assert_eq!(powers.len(), needed / 2 + (needed & 1))
}
// Check shares.
let first = *powers.first().unwrap();
let factor = first * first;
let mut expected = first;
for share in powers.iter() {
assert_eq!(*share, expected);
expected = expected * factor;
}
}
#[test]
fn test_compute_new_add_shares() {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let new_add_odd_shares: Vec<Gf2_128> = gen_gf2_128_vec();
let mut add_shares: Vec<Gf2_128> = gen_gf2_128_vec();
// We have the invariant that len of add_shares is always even.
if add_shares.len() & 1 == 1 {
add_shares.push(rng.gen());
}
let original_len = add_shares.len();
compute_new_add_shares(&new_add_odd_shares, &mut add_shares);
// Check new length.
assert_eq!(
add_shares.len(),
original_len + 2 * new_add_odd_shares.len()
);
// Check odd shares.
for (k, l) in (original_len..add_shares.len())
.step_by(2)
.zip(0..original_len)
{
assert_eq!(add_shares[k], new_add_odd_shares[l]);
}
// Check even shares.
for k in (original_len + 1..add_shares.len()).step_by(2) {
assert_eq!(add_shares[k], add_shares[k / 2] * add_shares[k / 2]);
}
}
fn gen_gf2_128_vec() -> Vec<Gf2_128> {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// Sample some message.
let message_len: usize = rng.gen_range(16..128);
let mut message: Vec<Gf2_128> = vec![Gf2_128::zero(); message_len];
message.iter_mut().for_each(|x| *x = rng.gen());
message
}
fn ghash_reference_impl(h: u128, message: &[Block]) -> Block {
let mut ghash = GHash::new(&h.to_be_bytes().into());
for el in message {
let block = GenericArray::clone_from_slice(el.to_bytes().as_slice());
ghash.update(&[block]);
}
let ghash_output = ghash.finalize();
Block::from(ghash_output)
}
fn setup_ghash_to_intermediate_state(
hashkey: Gf2_128,
max_hashkey_power: usize,
) -> (GhashCore<Intermediate>, GhashCore<Intermediate>) {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
// Create a multiplicative sharing.
let h1_multiplicative: Gf2_128 = rng.gen();
let h2_multiplicative: Gf2_128 = hashkey * h1_multiplicative.inverse();
let sender = GhashCore::new(max_hashkey_power);
let receiver = GhashCore::new(max_hashkey_power);
let (sender, receiver) = (
sender.compute_odd_mul_powers(h1_multiplicative),
receiver.compute_odd_mul_powers(h2_multiplicative),
);
(sender, receiver)
}
fn ghash_to_finalized(
sender: GhashCore<Intermediate>,
receiver: GhashCore<Intermediate>,
) -> (GhashCore<Finalized>, GhashCore<Finalized>) {
let (add_shares_sender, add_shares_receiver) =
m2a(&sender.odd_mul_shares(), &receiver.odd_mul_shares());
let (sender, receiver) = (
sender.add_new_add_shares(&add_shares_sender),
receiver.add_new_add_shares(&add_shares_receiver),
);
(sender, receiver)
}
fn m2a(first: &[Gf2_128], second: &[Gf2_128]) -> (Vec<Gf2_128>, Vec<Gf2_128>) {
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let mut first_out = vec![];
let mut second_out = vec![];
for (j, k) in first.iter().zip(second.iter()) {
let product = *j * *k;
let first_summand: Gf2_128 = rng.gen();
let second_summand: Gf2_128 = product + first_summand;
first_out.push(first_summand);
second_out.push(second_summand);
}
(first_out, second_out)
}
}

View File

@@ -1,52 +0,0 @@
use mpz_fields::gf2_128::Gf2_128;
mod sealed {
pub(crate) trait Sealed {}
impl Sealed for super::Init {}
impl Sealed for super::Intermediate {}
impl Sealed for super::Finalized {}
}
pub(crate) trait State: sealed::Sealed {}
impl State for Init {}
impl State for Intermediate {}
impl State for Finalized {}
/// Init state for Ghash protocol.
///
/// This is before any OT has taken place.
#[derive(Clone)]
pub(crate) struct Init;
opaque_debug::implement!(Init);
/// Intermediate state for Ghash protocol.
///
/// This is when the additive share has been converted into a multiplicative
/// share and all the needed powers have been computed.
#[derive(Clone)]
pub(crate) struct Intermediate {
pub(super) odd_mul_shares: Vec<Gf2_128>,
// A vec of all additive shares (even and odd) we already have.
// (In order to simplify the code) the n-th index of the vec corresponds to the additive share
// of the (n+1)-th power of H, e.g. the share of H^1 is located at the 0-th index of the vec
// It always contains an even number of consecutive shares starting from the share of H^1 up to
// the share of H^(cached_add_shares.len()).
pub(super) cached_add_shares: Vec<Gf2_128>,
}
opaque_debug::implement!(Intermediate);
/// Final state for Ghash protocol.
///
/// This is when each party can compute a final share of the ghash output,
/// because both now have additive shares of all the powers of `H`.
#[derive(Clone)]
pub(crate) struct Finalized {
pub(super) odd_mul_shares: Vec<Gf2_128>,
pub(super) add_shares: Vec<Gf2_128>,
}
opaque_debug::implement!(Finalized);

View File

@@ -1,19 +0,0 @@
use derive_builder::Builder;
#[derive(Debug, Clone, Builder)]
/// Configuration struct for [Ghash](crate::ghash::Ghash).
pub struct GhashConfig {
/// Initial number of block shares to provision.
#[builder(default = "1026")]
pub initial_block_count: usize,
/// Maximum number of blocks supported.
#[builder(default = "1026")]
pub max_block_count: usize,
}
impl GhashConfig {
/// Creates a new builder for the [GhashConfig].
pub fn builder() -> GhashConfigBuilder {
GhashConfigBuilder::default()
}
}

View File

@@ -1,183 +0,0 @@
//! Ideal GHASH functionality.
use async_trait::async_trait;
use ghash_rc::{
universal_hash::{KeyInit, UniversalHash as UniversalHashReference},
GHash,
};
use mpz_common::{
ideal::{ideal_f2p, Alice, Bob},
Context,
};
use crate::{UniversalHash, UniversalHashError};
/// An ideal GHASH functionality.
#[derive(Debug)]
pub struct IdealGhash<Ctx> {
role: Role,
context: Ctx,
}
#[derive(Debug)]
enum Role {
Alice(Alice<GHash>),
Bob(Bob<GHash>),
}
#[async_trait]
impl<Ctx: Context> UniversalHash for IdealGhash<Ctx> {
async fn set_key(&mut self, key: Vec<u8>) -> Result<(), UniversalHashError> {
match &mut self.role {
Role::Alice(alice) => {
alice
.call(
&mut self.context,
key,
|ghash, alice_key, bob_key: Vec<u8>| {
let key = alice_key
.into_iter()
.zip(bob_key)
.map(|(a, b)| a ^ b)
.collect::<Vec<_>>();
*ghash = GHash::new_from_slice(&key).unwrap();
((), ())
},
)
.await
}
Role::Bob(bob) => {
bob.call(
&mut self.context,
key,
|ghash, alice_key: Vec<u8>, bob_key| {
let key = alice_key
.into_iter()
.zip(bob_key)
.map(|(a, b)| a ^ b)
.collect::<Vec<_>>();
*ghash = GHash::new_from_slice(&key).unwrap();
((), ())
},
)
.await
}
}
Ok(())
}
async fn setup(&mut self) -> Result<(), UniversalHashError> {
Ok(())
}
async fn preprocess(&mut self) -> Result<(), UniversalHashError> {
Ok(())
}
async fn finalize(&mut self, input: Vec<u8>) -> Result<Vec<u8>, UniversalHashError> {
Ok(match &mut self.role {
Role::Alice(alice) => {
alice
.call(
&mut self.context,
input,
|ghash, alice_input, bob_input: Vec<u8>| {
assert_eq!(&alice_input, &bob_input);
let mut ghash = ghash.clone();
ghash.update_padded(&alice_input);
let output = ghash.finalize().to_vec();
let output_bob = vec![0; output.len()];
let output_alice: Vec<u8> = output
.iter()
.zip(output_bob.iter().copied())
.map(|(o, b)| o ^ b)
.collect();
(output_alice, output_bob)
},
)
.await
}
Role::Bob(bob) => {
bob.call(
&mut self.context,
input,
|ghash, alice_input: Vec<u8>, bob_input| {
assert_eq!(&alice_input, &bob_input);
let mut ghash = ghash.clone();
ghash.update_padded(&alice_input);
let output = ghash.finalize();
let output_bob = vec![0; output.len()];
let output_alice: Vec<u8> = output
.iter()
.zip(output_bob.iter().copied())
.map(|(o, b)| o ^ b)
.collect();
(output_alice, output_bob)
},
)
.await
}
})
}
}
/// Creates an ideal GHASH pair.
pub fn ideal_ghash<Ctx: Context>(
context_alice: Ctx,
context_bob: Ctx,
) -> (IdealGhash<Ctx>, IdealGhash<Ctx>) {
let (alice, bob) = ideal_f2p(GHash::new_from_slice(&[0u8; 16]).unwrap());
(
IdealGhash {
role: Role::Alice(alice),
context: context_alice,
},
IdealGhash {
role: Role::Bob(bob),
context: context_bob,
},
)
}
#[cfg(test)]
mod tests {
use super::*;
use mpz_common::executor::test_st_executor;
#[tokio::test]
async fn test_ideal_ghash() {
let (ctx_a, ctx_b) = test_st_executor(8);
let (mut alice, mut bob) = ideal_ghash(ctx_a, ctx_b);
let alice_key = vec![42u8; 16];
let bob_key = vec![69u8; 16];
let key = alice_key
.iter()
.zip(bob_key.iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<_>>();
tokio::try_join!(
alice.set_key(alice_key.clone()),
bob.set_key(bob_key.clone())
)
.unwrap();
let input = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let (output_a, output_b) =
tokio::try_join!(alice.finalize(input.clone()), bob.finalize(input.clone())).unwrap();
let mut ghash = GHash::new_from_slice(&key).unwrap();
ghash.update_padded(&input);
let expected_output = ghash.finalize();
let output: Vec<u8> = output_a.iter().zip(output_b).map(|(a, b)| a ^ b).collect();
assert_eq!(output, expected_output.to_vec());
}
}

View File

@@ -1,35 +0,0 @@
use mpz_share_conversion::{
mock::{mock_converter_pair, MockConverterReceiver, MockConverterSender},
Gf2_128, ReceiverConfig, SenderConfig,
};
use super::{Ghash, GhashConfig};
/// Create a Ghash sender/receiver pair for testing purpose.
pub fn mock_ghash_pair(
sender_config: GhashConfig,
receiver_config: GhashConfig,
) -> (
Ghash<MockConverterSender<Gf2_128>>,
Ghash<MockConverterReceiver<Gf2_128>>,
) {
let (sender, receiver) = mock_converter_pair::<Gf2_128>(
SenderConfig::builder()
.id(format!("{}/converter", sender_config.id))
.record()
.build()
.unwrap(),
ReceiverConfig::builder()
.id(format!("{}/converter", receiver_config.id))
.record()
.build()
.unwrap(),
);
let (sender, receiver) = (
Ghash::new(sender_config, sender),
Ghash::new(receiver_config, receiver),
);
(sender, receiver)
}

View File

@@ -1,362 +0,0 @@
use crate::{
ghash::ghash_core::{
state::{Finalized, Intermediate},
GhashCore,
},
UniversalHash, UniversalHashError,
};
use async_trait::async_trait;
use mpz_common::{Context, Preprocess};
use mpz_core::Block;
use mpz_fields::gf2_128::Gf2_128;
use mpz_share_conversion::{ShareConversionError, ShareConvert};
use std::fmt::Debug;
use tracing::instrument;
mod config;
#[cfg(feature = "ideal")]
pub(crate) mod ideal;
pub use config::{GhashConfig, GhashConfigBuilder, GhashConfigBuilderError};
#[derive(Debug)]
enum State {
Init,
Ready { core: GhashCore<Finalized> },
Error,
}
/// This is the common instance used by both sender and receiver.
///
/// It is an aio wrapper which mostly uses [GhashCore] for computation.
pub struct Ghash<C, Ctx> {
state: State,
config: GhashConfig,
converter: C,
context: Ctx,
}
impl<C, Ctx> Ghash<C, Ctx>
where
C: ShareConvert<Ctx, Gf2_128>,
{
/// Creates a new instance.
///
/// # Arguments
///
/// * `config` - The configuration for this Ghash instance.
/// * `converter` - An instance which allows to convert multiplicative
/// into additive shares and vice versa.
/// * `context` - The context.
pub fn new(config: GhashConfig, converter: C, context: Ctx) -> Self {
Self {
state: State::Init,
config,
converter,
context,
}
}
/// Computes all the additive shares of the hashkey powers.
///
/// We need this when the max block count changes.
#[instrument(level = "debug", skip_all, err)]
async fn compute_add_shares(
&mut self,
core: GhashCore<Intermediate>,
) -> Result<GhashCore<Finalized>, UniversalHashError> {
let odd_mul_shares = core.odd_mul_shares();
let add_shares = self
.converter
.to_additive(&mut self.context, odd_mul_shares)
.await?;
let core = core.add_new_add_shares(&add_shares);
Ok(core)
}
}
impl<C, Ctx> Debug for Ghash<C, Ctx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Ghash")
.field("state", &self.state)
.field("config", &self.config)
.field("converter", &"{{ .. }}".to_string())
.finish()
}
}
#[async_trait]
impl<Ctx, C> UniversalHash for Ghash<C, Ctx>
where
C: Preprocess<Ctx, Error = ShareConversionError> + ShareConvert<Ctx, Gf2_128> + Send,
{
#[instrument(level = "info", fields(thread = %self.context.id()), skip_all, err)]
async fn set_key(&mut self, key: Vec<u8>) -> Result<(), UniversalHashError> {
if key.len() != 16 {
return Err(UniversalHashError::KeyLengthError(16, key.len()));
}
if !matches!(&self.state, State::Init) {
return Err(UniversalHashError::InvalidState(
"Key already set".to_string(),
));
}
let mut h_additive = [0u8; 16];
h_additive.copy_from_slice(key.as_slice());
// GHASH reflects the bits of the key.
let h_additive = Gf2_128::new(u128::from_be_bytes(h_additive).reverse_bits());
let h_multiplicative = self
.converter
.to_multiplicative(&mut self.context, vec![h_additive])
.await?;
let core = GhashCore::new(self.config.initial_block_count);
let core = core.compute_odd_mul_powers(h_multiplicative[0]);
let core = self.compute_add_shares(core).await?;
self.state = State::Ready { core };
Ok(())
}
#[instrument(level = "debug", fields(thread = %self.context.id()), skip_all, err)]
async fn setup(&mut self) -> Result<(), UniversalHashError> {
// We need only half the number of `max_block_count` M2As because of the free
// squaring trick and we need one extra A2M conversion in the beginning.
// Both M2A and A2M, each require a single OLE.
let ole_count = self.config.max_block_count / 2 + 1;
self.converter.alloc(ole_count);
Ok(())
}
#[instrument(level = "debug", fields(thread = %self.context.id()), skip_all, err)]
async fn preprocess(&mut self) -> Result<(), UniversalHashError> {
self.converter.preprocess(&mut self.context).await?;
Ok(())
}
#[instrument(level = "debug", fields(thread = %self.context.id()), skip_all, err)]
async fn finalize(&mut self, mut input: Vec<u8>) -> Result<Vec<u8>, UniversalHashError> {
// Divide by block length and round up.
let block_count = input.len() / 16 + (input.len() % 16 != 0) as usize;
if block_count > self.config.max_block_count {
return Err(UniversalHashError::InputLengthError(input.len()));
}
let state = std::mem::replace(&mut self.state, State::Error);
// Calling finalize when not setup is a fatal error.
let State::Ready { core } = state else {
return Err(UniversalHashError::InvalidState("Key not set".to_string()));
};
// Compute new shares if the block count increased.
let core = if block_count > core.get_max_blocks() {
self.compute_add_shares(core.change_max_hashkey(block_count))
.await?
} else {
core
};
// Pad input to a multiple of 16 bytes.
input.resize(block_count * 16, 0);
// Convert input to blocks.
let blocks = input
.chunks_exact(16)
.map(|chunk| {
let mut block = [0u8; 16];
block.copy_from_slice(chunk);
Block::from(block)
})
.collect::<Vec<Block>>();
let tag = core
.finalize(&blocks)
.expect("Input length should be valid");
// Reinsert state.
self.state = State::Ready { core };
Ok(tag.to_bytes().to_vec())
}
}
#[cfg(test)]
mod tests {
use crate::{
ghash::{Ghash, GhashConfig},
UniversalHash,
};
use ghash_rc::{
universal_hash::{KeyInit, UniversalHash as UniversalHashReference},
GHash as GhashReference,
};
use mpz_common::{executor::test_st_executor, Context};
use mpz_share_conversion::ideal::{ideal_share_converter, IdealShareConverter};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha12Rng;
fn create_pair<Ctx: Context>(
block_count: usize,
context_alice: Ctx,
context_bob: Ctx,
) -> (
Ghash<IdealShareConverter, Ctx>,
Ghash<IdealShareConverter, Ctx>,
) {
let (convert_a, convert_b) = ideal_share_converter();
let config = GhashConfig::builder()
.initial_block_count(block_count)
.build()
.unwrap();
(
Ghash::new(config.clone(), convert_a, context_alice),
Ghash::new(config, convert_b, context_bob),
)
}
#[tokio::test]
async fn test_ghash_output() {
let (ctx_a, ctx_b) = test_st_executor(8);
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let h: u128 = rng.gen();
let sender_key: u128 = rng.gen();
let receiver_key: u128 = h ^ sender_key;
let message: Vec<u8> = (0..128).map(|_| rng.gen()).collect();
let (mut sender, mut receiver) = create_pair(1, ctx_a, ctx_b);
tokio::try_join!(
sender.set_key(sender_key.to_be_bytes().to_vec()),
receiver.set_key(receiver_key.to_be_bytes().to_vec())
)
.unwrap();
let (sender_share, receiver_share) = tokio::try_join!(
sender.finalize(message.clone()),
receiver.finalize(message.clone())
)
.unwrap();
let tag = sender_share
.iter()
.zip(receiver_share.iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<u8>>();
assert_eq!(tag, ghash_reference_impl(h, &message));
}
#[tokio::test]
async fn test_ghash_output_padded() {
let (ctx_a, ctx_b) = test_st_executor(8);
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let h: u128 = rng.gen();
let sender_key: u128 = rng.gen();
let receiver_key: u128 = h ^ sender_key;
// Message length is not a multiple of the block length
let message: Vec<u8> = (0..126).map(|_| rng.gen()).collect();
let (mut sender, mut receiver) = create_pair(1, ctx_a, ctx_b);
tokio::try_join!(
sender.set_key(sender_key.to_be_bytes().to_vec()),
receiver.set_key(receiver_key.to_be_bytes().to_vec())
)
.unwrap();
let (sender_share, receiver_share) = tokio::try_join!(
sender.finalize(message.clone()),
receiver.finalize(message.clone())
)
.unwrap();
let tag = sender_share
.iter()
.zip(receiver_share.iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<u8>>();
assert_eq!(tag, ghash_reference_impl(h, &message));
}
#[tokio::test]
async fn test_ghash_long_message() {
let (ctx_a, ctx_b) = test_st_executor(8);
let mut rng = ChaCha12Rng::from_seed([0; 32]);
let h: u128 = rng.gen();
let sender_key: u128 = rng.gen();
let receiver_key: u128 = h ^ sender_key;
let short_message: Vec<u8> = (0..128).map(|_| rng.gen()).collect();
// A longer message.
let long_message: Vec<u8> = (0..192).map(|_| rng.gen()).collect();
// Create and setup sender and receiver for short message length.
let (mut sender, mut receiver) = create_pair(1, ctx_a, ctx_b);
tokio::try_join!(
sender.set_key(sender_key.to_be_bytes().to_vec()),
receiver.set_key(receiver_key.to_be_bytes().to_vec())
)
.unwrap();
// Compute the shares for the short message.
tokio::try_join!(
sender.finalize(short_message.clone()),
receiver.finalize(short_message.clone())
)
.unwrap();
// Now compute the shares for the longer message.
let (sender_share, receiver_share) = tokio::try_join!(
sender.finalize(long_message.clone()),
receiver.finalize(long_message.clone())
)
.unwrap();
let tag = sender_share
.iter()
.zip(receiver_share.iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<u8>>();
assert_eq!(tag, ghash_reference_impl(h, &long_message));
// We should still be able to generate a Ghash output for the shorter message.
let (sender_share, receiver_share) = tokio::try_join!(
sender.finalize(short_message.clone()),
receiver.finalize(short_message.clone())
)
.unwrap();
let tag = sender_share
.iter()
.zip(receiver_share.iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<u8>>();
assert_eq!(tag, ghash_reference_impl(h, &short_message));
}
fn ghash_reference_impl(h: u128, message: &[u8]) -> Vec<u8> {
let mut ghash = GhashReference::new(&h.to_be_bytes().into());
ghash.update_padded(message);
let mac = ghash.finalize();
mac.to_vec()
}
}

View File

@@ -1,6 +0,0 @@
mod ghash_core;
mod ghash_inner;
#[cfg(feature = "ideal")]
pub use ghash_inner::ideal::{ideal_ghash, IdealGhash};
pub use ghash_inner::{Ghash, GhashConfig, GhashConfigBuilder, GhashConfigBuilderError};

View File

@@ -1,50 +0,0 @@
//! A library for computing different kinds of hash functions in a 2PC setting.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
/// This module implements [UniversalHash] for Ghash.
#[cfg(feature = "ghash")]
pub mod ghash;
use async_trait::async_trait;
/// Errors for [UniversalHash].
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
pub enum UniversalHashError {
#[error("Invalid state: {0}")]
InvalidState(String),
#[error("Invalid key length, expected {0}, got {1}")]
KeyLengthError(usize, usize),
#[error("Invalid input length: {0}")]
InputLengthError(usize),
#[error(transparent)]
ShareConversionError(#[from] mpz_share_conversion::ShareConversionError),
}
#[async_trait]
/// A trait supporting different kinds of hash functions.
pub trait UniversalHash: Send {
/// Sets the key for the hash function
///
/// # Arguments
///
/// * `key` - Key to use for the hash function.
async fn set_key(&mut self, key: Vec<u8>) -> Result<(), UniversalHashError>;
/// Performs any necessary one-time setup.
async fn setup(&mut self) -> Result<(), UniversalHashError>;
/// Preprocesses the hash function.
async fn preprocess(&mut self) -> Result<(), UniversalHashError>;
/// Computes hash of the input, padding the input to the block size
/// if needed.
///
/// # Arguments
///
/// * `input` - Input to hash.
async fn finalize(&mut self, input: Vec<u8>) -> Result<Vec<u8>, UniversalHashError>;
}